From 1f54d6aad963ed04fbffef162f41ead4f1a70f94 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 23 Aug 2021 13:59:17 +0800 Subject: [PATCH 01/71] [TD-6145]:support tag filtering with regex --- src/client/src/tscSQLParser.c | 168 ++-- src/common/inc/texpr.h | 2 + src/common/inc/tglobal.h | 1 + src/common/src/texpr.c | 11 + src/common/src/tglobal.c | 1 + src/inc/taosdef.h | 2 + src/inc/ttokendef.h | 306 +++---- src/query/inc/sql.y | 5 +- src/query/src/qFilter.c | 16 +- src/query/src/sql.c | 1520 ++++++++++++--------------------- src/tsdb/src/tsdbRead.c | 10 +- src/util/inc/tcompare.h | 2 + src/util/src/tcompare.c | 50 +- src/util/src/ttokenizer.c | 1 + 14 files changed, 889 insertions(+), 1206 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 612a3d4798..d26843bb17 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -279,6 +279,8 @@ static uint8_t convertRelationalOperator(SStrToken *pToken) { return TSDB_BINARY_OP_REMAINDER; case TK_LIKE: return TSDB_RELATION_LIKE; + case TK_MATCH: + return TSDB_RELATION_MATCH; case TK_ISNULL: return TSDB_RELATION_ISNULL; case TK_NOTNULL: @@ -3788,6 +3790,9 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, case TK_LIKE: pColumnFilter->lowerRelOptr = TSDB_RELATION_LIKE; break; + case TK_MATCH: + pColumnFilter->lowerRelOptr = TSDB_RELATION_MATCH; + break; case TK_ISNULL: pColumnFilter->lowerRelOptr = TSDB_RELATION_ISNULL; break; @@ -3851,9 +3856,15 @@ static int32_t tablenameListToString(tSqlExpr* pExpr, SStringBuilder* sb) { return TSDB_CODE_SUCCESS; } -static int32_t tablenameCondToString(tSqlExpr* pExpr, SStringBuilder* sb) { - taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_LIKE, QUERY_COND_REL_PREFIX_LIKE_LEN); - taosStringBuilderAppendString(sb, pExpr->value.pz); +static int32_t tablenameCondToString(tSqlExpr* pExpr, uint32_t opToken, SStringBuilder* sb) { + assert(opToken == TK_LIKE || opToken == TK_MATCH); + if (opToken == TK_LIKE) { + taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_LIKE, QUERY_COND_REL_PREFIX_LIKE_LEN); + taosStringBuilderAppendString(sb, pExpr->value.pz); + } else if (opToken == TK_MATCH) { + taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_MATCH, QUERY_COND_REL_PREFIX_MATCH_LEN); + taosStringBuilderAppendString(sb, pExpr->value.pz); + } return TSDB_CODE_SUCCESS; } @@ -3874,7 +3885,7 @@ static int32_t checkColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCol STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex); int32_t ret = 0; - const char* msg1 = "non binary column not support like operator"; + const char* msg1 = "non binary column not support like/match operator"; const char* msg2 = "binary column not support this operator"; const char* msg3 = "bool column not support this operator"; const char* msg4 = "primary key not support this operator"; @@ -3902,12 +3913,13 @@ static int32_t checkColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCol && pExpr->tokenId != TK_ISNULL && pExpr->tokenId != TK_NOTNULL && pExpr->tokenId != TK_LIKE + && pExpr->tokenId != TK_MATCH && pExpr->tokenId != TK_IN) { ret = invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); goto _err_ret; } } else { - if (pExpr->tokenId == TK_LIKE) { + if (pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH) { ret = invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); goto _err_ret; } @@ -3955,12 +3967,12 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* if (pTableCond->tokenId == TK_IN) { ret = tablenameListToString(pRight, sb); - } else if (pTableCond->tokenId == TK_LIKE) { + } else if (pTableCond->tokenId == TK_LIKE || pTableCond->tokenId == TK_MATCH) { if (pRight->tokenId != TK_STRING) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - ret = tablenameCondToString(pRight, sb); + ret = tablenameCondToString(pRight, pTableCond->tokenId, sb); } if (ret != TSDB_CODE_SUCCESS) { @@ -4409,7 +4421,7 @@ static bool validateJoinExprNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr } static bool validTableNameOptr(tSqlExpr* pExpr) { - const char nameFilterOptr[] = {TK_IN, TK_LIKE}; + const char nameFilterOptr[] = {TK_IN, TK_LIKE, TK_MATCH}; for (int32_t i = 0; i < tListLen(nameFilterOptr); ++i) { if (pExpr->tokenId == nameFilterOptr[i]) { @@ -4501,6 +4513,44 @@ static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t return TSDB_CODE_SUCCESS; } +// check for match expression +static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) { + const char* msg1 = "regular expression string should be less than %d characters"; + const char* msg2 = "illegal column type for match"; + const char* msg3 = "invalid regular expression"; + + tSqlExpr* pLeft = pExpr->pLeft; + tSqlExpr* pRight = pExpr->pRight; + + if (pExpr->tokenId == TK_MATCH) { + if (pRight->value.nLen > tsMaxRegexStringLen) { + char tmp[64] = {0}; + sprintf(tmp, msg1, tsMaxRegexStringLen); + return invalidOperationMsg(msgBuf, tmp); + } + + SSchema* pSchema = tscGetTableSchema(pTableMeta); + if ((!isTablenameToken(&pLeft->columnName)) && !IS_VAR_DATA_TYPE(pSchema[index].type)) { + return invalidOperationMsg(msgBuf, msg2); + } + + int errCode = 0; + regex_t regex; + char regErrBuf[256] = {0}; + + const char* pattern = pRight->value.pz; + int cflags = REG_EXTENDED | REG_ICASE; + if ((errCode = regcomp(®ex, pattern, cflags)) != 0) { + regerror(errCode, ®ex, regErrBuf, sizeof(regErrBuf)); + tscError("Failed to compile regex pattern %s. reason %s", pattern, regErrBuf); + return invalidOperationMsg(msgBuf, msg3); + } + } + + return TSDB_CODE_SUCCESS; +} + + int32_t handleNeOptr(tSqlExpr** rexpr, tSqlExpr* expr) { tSqlExpr* left = tSqlExprClone(expr); tSqlExpr* right = expr; @@ -4552,6 +4602,12 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql return code; } + // validate the match expression + code = validateMatchExpr(*pExpr, pTableMeta, index.columnIndex, tscGetErrorMsgPayload(pCmd)); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, index.columnIndex); if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP && index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) { @@ -4879,65 +4935,66 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, STagCond* pTagCond = &pQueryInfo->tagCond; pTagCond->tbnameCond.uid = pTableMetaInfo->pTableMeta->id.uid; - assert(pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_IN); + assert(pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_IN); - if (pExpr->tokenId == TK_LIKE) { + if (pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH) { char* str = taosStringBuilderGetResult(sb, NULL); pQueryInfo->tagCond.tbnameCond.cond = strdup(str); pQueryInfo->tagCond.tbnameCond.len = (int32_t) strlen(str); return TSDB_CODE_SUCCESS; - } + } else { + SStringBuilder sb1; + memset(&sb1, 0, sizeof(sb1)); + taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN); - SStringBuilder sb1; memset(&sb1, 0, sizeof(sb1)); - taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN); + // remove the duplicated input table names + int32_t num = 0; + char* tableNameString = taosStringBuilderGetResult(sb, NULL); - // remove the duplicated input table names - int32_t num = 0; - char* tableNameString = taosStringBuilderGetResult(sb, NULL); + char** segments = strsplit(tableNameString + QUERY_COND_REL_PREFIX_IN_LEN, TBNAME_LIST_SEP, &num); + qsort(segments, num, POINTER_BYTES, tableNameCompar); - char** segments = strsplit(tableNameString + QUERY_COND_REL_PREFIX_IN_LEN, TBNAME_LIST_SEP, &num); - qsort(segments, num, POINTER_BYTES, tableNameCompar); - - int32_t j = 1; - for (int32_t i = 1; i < num; ++i) { - if (strcmp(segments[i], segments[i - 1]) != 0) { - segments[j++] = segments[i]; + int32_t j = 1; + for (int32_t i = 1; i < num; ++i) { + if (strcmp(segments[i], segments[i - 1]) != 0) { + segments[j++] = segments[i]; + } } - } - num = j; + num = j; - char name[TSDB_DB_NAME_LEN] = {0}; - tNameGetDbName(&pTableMetaInfo->name, name); - SStrToken dbToken = { .type = TK_STRING, .z = name, .n = (uint32_t)strlen(name) }; - - for (int32_t i = 0; i < num; ++i) { - if (i >= 1) { - taosStringBuilderAppendStringLen(&sb1, TBNAME_LIST_SEP, 1); + char name[TSDB_DB_NAME_LEN] = {0}; + tNameGetDbName(&pTableMetaInfo->name, name); + SStrToken dbToken = {.type = TK_STRING, .z = name, .n = (uint32_t)strlen(name)}; + + for (int32_t i = 0; i < num; ++i) { + if (i >= 1) { + taosStringBuilderAppendStringLen(&sb1, TBNAME_LIST_SEP, 1); + } + + char idBuf[TSDB_TABLE_FNAME_LEN] = {0}; + int32_t xlen = (int32_t)strlen(segments[i]); + SStrToken t = {.z = segments[i], .n = xlen, .type = TK_STRING}; + + int32_t ret = setObjFullName(idBuf, account, &dbToken, &t, &xlen); + if (ret != TSDB_CODE_SUCCESS) { + taosStringBuilderDestroy(&sb1); + tfree(segments); + + invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); + return ret; + } + + taosStringBuilderAppendString(&sb1, idBuf); } - char idBuf[TSDB_TABLE_FNAME_LEN] = {0}; - int32_t xlen = (int32_t)strlen(segments[i]); - SStrToken t = {.z = segments[i], .n = xlen, .type = TK_STRING}; + char* str = taosStringBuilderGetResult(&sb1, NULL); + pQueryInfo->tagCond.tbnameCond.cond = strdup(str); + pQueryInfo->tagCond.tbnameCond.len = (int32_t)strlen(str); - int32_t ret = setObjFullName(idBuf, account, &dbToken, &t, &xlen); - if (ret != TSDB_CODE_SUCCESS) { - taosStringBuilderDestroy(&sb1); - tfree(segments); - - invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); - return ret; - } - - taosStringBuilderAppendString(&sb1, idBuf); + taosStringBuilderDestroy(&sb1); + tfree(segments); + return TSDB_CODE_SUCCESS; } - - char* str = taosStringBuilderGetResult(&sb1, NULL); - pQueryInfo->tagCond.tbnameCond.cond = strdup(str); - pQueryInfo->tagCond.tbnameCond.len = (int32_t) strlen(str); - - taosStringBuilderDestroy(&sb1); - tfree(segments); - return TSDB_CODE_SUCCESS; } int32_t mergeTimeRange(SSqlCmd* pCmd, STimeWindow* res, STimeWindow* win, int32_t optr) { @@ -8114,7 +8171,7 @@ int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelect } static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pSelectNodeList, tSqlExpr* pExpr, int32_t sqlOptr) { - const char* msg1 = "non binary column not support like operator"; + const char* msg1 = "non binary column not support like/match operator"; const char* msg2 = "invalid operator for binary column in having clause"; const char* msg3 = "invalid operator for bool column in having clause"; @@ -8166,11 +8223,12 @@ static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, S && pExpr->tokenId != TK_ISNULL && pExpr->tokenId != TK_NOTNULL && pExpr->tokenId != TK_LIKE + && pExpr->tokenId != TK_MATCH ) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } } else { - if (pExpr->tokenId == TK_LIKE) { + if (pExpr->tokenId == TK_LIKE || pExpr->tokenId == TK_MATCH) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } diff --git a/src/common/inc/texpr.h b/src/common/inc/texpr.h index 2e49a69366..db71559df6 100644 --- a/src/common/inc/texpr.h +++ b/src/common/inc/texpr.h @@ -33,9 +33,11 @@ struct SSchema; #define QUERY_COND_REL_PREFIX_IN "IN|" #define QUERY_COND_REL_PREFIX_LIKE "LIKE|" +#define QUERY_COND_REL_PREFIX_MATCH "MATCH|" #define QUERY_COND_REL_PREFIX_IN_LEN 3 #define QUERY_COND_REL_PREFIX_LIKE_LEN 5 +#define QUERY_COND_REL_PREFIX_MATCH_LEN 6 typedef bool (*__result_filter_fn_t)(const void *, void *); typedef void (*__do_filter_suppl_fn_t)(void *, void *); diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 4b8347ead0..c40607bf02 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -74,6 +74,7 @@ extern int8_t tsKeepOriginalColumnName; // client extern int32_t tsMaxSQLStringLen; extern int32_t tsMaxWildCardsLen; +extern int32_t tsMaxRegexStringLen; extern int8_t tsTscEnableRecordSql; extern int32_t tsMaxNumOfOrderedResults; extern int32_t tsMinSlidingTime; diff --git a/src/common/src/texpr.c b/src/common/src/texpr.c index ebdb33fd5b..2c72b7bd59 100644 --- a/src/common/src/texpr.c +++ b/src/common/src/texpr.c @@ -430,6 +430,17 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) { pVal->nType = TSDB_DATA_TYPE_BINARY; pVal->nLen = (int32_t)len; + } else if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_MATCH, QUERY_COND_REL_PREFIX_MATCH_LEN) == 0) { + right->nodeType = TSQL_NODE_VALUE; + expr->_node.optr = TSDB_RELATION_MATCH; + tVariant* pVal = exception_calloc(1, sizeof(tVariant)); + right->pVal = pVal; + size_t len = strlen(tbnameCond + QUERY_COND_REL_PREFIX_MATCH_LEN) + 1; + pVal->pz = exception_malloc(len); + memcpy(pVal->pz, tbnameCond + QUERY_COND_REL_PREFIX_MATCH_LEN, len); + pVal->nType = TSDB_DATA_TYPE_BINARY; + pVal->nLen = (int32_t)len; + } else if (strncmp(tbnameCond, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN) == 0) { right->nodeType = TSQL_NODE_VALUE; expr->_node.optr = TSDB_RELATION_IN; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index f169b07bb2..795f8b38d3 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -85,6 +85,7 @@ int32_t tsCompressColData = -1; // client int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN; +int32_t tsMaxRegexStringLen = TSDB_REGEX_STRING_MAX_LEN; int8_t tsTscEnableRecordSql = 0; // the maximum number of results for projection query on super table that are returned from diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 44b3a2cf0d..b4d60aeba5 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -164,6 +164,8 @@ do { \ #define TSDB_RELATION_OR 12 #define TSDB_RELATION_NOT 13 +#define TSDB_RELATION_MATCH 14 + #define TSDB_BINARY_OP_ADD 30 #define TSDB_BINARY_OP_SUBTRACT 31 #define TSDB_BINARY_OP_MULTIPLY 32 diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index a97de15e93..ffbe09b2c5 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -37,159 +37,159 @@ #define TK_NOTNULL 19 #define TK_IS 20 #define TK_LIKE 21 -#define TK_GLOB 22 -#define TK_BETWEEN 23 -#define TK_IN 24 -#define TK_GT 25 -#define TK_GE 26 -#define TK_LT 27 -#define TK_LE 28 -#define TK_BITAND 29 -#define TK_BITOR 30 -#define TK_LSHIFT 31 -#define TK_RSHIFT 32 -#define TK_PLUS 33 -#define TK_MINUS 34 -#define TK_DIVIDE 35 -#define TK_TIMES 36 -#define TK_STAR 37 -#define TK_SLASH 38 -#define TK_REM 39 -#define TK_CONCAT 40 -#define TK_UMINUS 41 -#define TK_UPLUS 42 -#define TK_BITNOT 43 -#define TK_SHOW 44 -#define TK_DATABASES 45 -#define TK_TOPICS 46 -#define TK_FUNCTIONS 47 -#define TK_MNODES 48 -#define TK_DNODES 49 -#define TK_ACCOUNTS 50 -#define TK_USERS 51 -#define TK_MODULES 52 -#define TK_QUERIES 53 -#define TK_CONNECTIONS 54 -#define TK_STREAMS 55 -#define TK_VARIABLES 56 -#define TK_SCORES 57 -#define TK_GRANTS 58 -#define TK_VNODES 59 -#define TK_DOT 60 -#define TK_CREATE 61 -#define TK_TABLE 62 -#define TK_STABLE 63 -#define TK_DATABASE 64 -#define TK_TABLES 65 -#define TK_STABLES 66 -#define TK_VGROUPS 67 -#define TK_DROP 68 -#define TK_TOPIC 69 -#define TK_FUNCTION 70 -#define TK_DNODE 71 -#define TK_USER 72 -#define TK_ACCOUNT 73 -#define TK_USE 74 -#define TK_DESCRIBE 75 -#define TK_ALTER 76 -#define TK_PASS 77 -#define TK_PRIVILEGE 78 -#define TK_LOCAL 79 -#define TK_COMPACT 80 -#define TK_LP 81 -#define TK_RP 82 -#define TK_IF 83 -#define TK_EXISTS 84 -#define TK_AS 85 -#define TK_OUTPUTTYPE 86 -#define TK_AGGREGATE 87 -#define TK_BUFSIZE 88 -#define TK_PPS 89 -#define TK_TSERIES 90 -#define TK_DBS 91 -#define TK_STORAGE 92 -#define TK_QTIME 93 -#define TK_CONNS 94 -#define TK_STATE 95 -#define TK_COMMA 96 -#define TK_KEEP 97 -#define TK_CACHE 98 -#define TK_REPLICA 99 -#define TK_QUORUM 100 -#define TK_DAYS 101 -#define TK_MINROWS 102 -#define TK_MAXROWS 103 -#define TK_BLOCKS 104 -#define TK_CTIME 105 -#define TK_WAL 106 -#define TK_FSYNC 107 -#define TK_COMP 108 -#define TK_PRECISION 109 -#define TK_UPDATE 110 -#define TK_CACHELAST 111 -#define TK_PARTITIONS 112 -#define TK_UNSIGNED 113 -#define TK_TAGS 114 -#define TK_USING 115 -#define TK_NULL 116 -#define TK_NOW 117 -#define TK_SELECT 118 -#define TK_UNION 119 -#define TK_ALL 120 -#define TK_DISTINCT 121 -#define TK_FROM 122 -#define TK_VARIABLE 123 -#define TK_INTERVAL 124 -#define TK_SESSION 125 -#define TK_STATE_WINDOW 126 -#define TK_FILL 127 -#define TK_SLIDING 128 -#define TK_ORDER 129 -#define TK_BY 130 -#define TK_ASC 131 -#define TK_DESC 132 -#define TK_GROUP 133 -#define TK_HAVING 134 -#define TK_LIMIT 135 -#define TK_OFFSET 136 -#define TK_SLIMIT 137 -#define TK_SOFFSET 138 -#define TK_WHERE 139 -#define TK_RESET 140 -#define TK_QUERY 141 -#define TK_SYNCDB 142 -#define TK_ADD 143 -#define TK_COLUMN 144 -#define TK_MODIFY 145 -#define TK_TAG 146 -#define TK_CHANGE 147 -#define TK_SET 148 -#define TK_KILL 149 -#define TK_CONNECTION 150 -#define TK_STREAM 151 -#define TK_COLON 152 -#define TK_ABORT 153 -#define TK_AFTER 154 -#define TK_ATTACH 155 -#define TK_BEFORE 156 -#define TK_BEGIN 157 -#define TK_CASCADE 158 -#define TK_CLUSTER 159 -#define TK_CONFLICT 160 -#define TK_COPY 161 -#define TK_DEFERRED 162 -#define TK_DELIMITERS 163 -#define TK_DETACH 164 -#define TK_EACH 165 -#define TK_END 166 -#define TK_EXPLAIN 167 -#define TK_FAIL 168 -#define TK_FOR 169 -#define TK_IGNORE 170 -#define TK_IMMEDIATE 171 -#define TK_INITIALLY 172 -#define TK_INSTEAD 173 -#define TK_MATCH 174 +#define TK_MATCH 22 +#define TK_GLOB 23 +#define TK_BETWEEN 24 +#define TK_IN 25 +#define TK_GT 26 +#define TK_GE 27 +#define TK_LT 28 +#define TK_LE 29 +#define TK_BITAND 30 +#define TK_BITOR 31 +#define TK_LSHIFT 32 +#define TK_RSHIFT 33 +#define TK_PLUS 34 +#define TK_MINUS 35 +#define TK_DIVIDE 36 +#define TK_TIMES 37 +#define TK_STAR 38 +#define TK_SLASH 39 +#define TK_REM 40 +#define TK_CONCAT 41 +#define TK_UMINUS 42 +#define TK_UPLUS 43 +#define TK_BITNOT 44 +#define TK_SHOW 45 +#define TK_DATABASES 46 +#define TK_TOPICS 47 +#define TK_FUNCTIONS 48 +#define TK_MNODES 49 +#define TK_DNODES 50 +#define TK_ACCOUNTS 51 +#define TK_USERS 52 +#define TK_MODULES 53 +#define TK_QUERIES 54 +#define TK_CONNECTIONS 55 +#define TK_STREAMS 56 +#define TK_VARIABLES 57 +#define TK_SCORES 58 +#define TK_GRANTS 59 +#define TK_VNODES 60 +#define TK_DOT 61 +#define TK_CREATE 62 +#define TK_TABLE 63 +#define TK_STABLE 64 +#define TK_DATABASE 65 +#define TK_TABLES 66 +#define TK_STABLES 67 +#define TK_VGROUPS 68 +#define TK_DROP 69 +#define TK_TOPIC 70 +#define TK_FUNCTION 71 +#define TK_DNODE 72 +#define TK_USER 73 +#define TK_ACCOUNT 74 +#define TK_USE 75 +#define TK_DESCRIBE 76 +#define TK_ALTER 77 +#define TK_PASS 78 +#define TK_PRIVILEGE 79 +#define TK_LOCAL 80 +#define TK_COMPACT 81 +#define TK_LP 82 +#define TK_RP 83 +#define TK_IF 84 +#define TK_EXISTS 85 +#define TK_AS 86 +#define TK_OUTPUTTYPE 87 +#define TK_AGGREGATE 88 +#define TK_BUFSIZE 89 +#define TK_PPS 90 +#define TK_TSERIES 91 +#define TK_DBS 92 +#define TK_STORAGE 93 +#define TK_QTIME 94 +#define TK_CONNS 95 +#define TK_STATE 96 +#define TK_COMMA 97 +#define TK_KEEP 98 +#define TK_CACHE 99 +#define TK_REPLICA 100 +#define TK_QUORUM 101 +#define TK_DAYS 102 +#define TK_MINROWS 103 +#define TK_MAXROWS 104 +#define TK_BLOCKS 105 +#define TK_CTIME 106 +#define TK_WAL 107 +#define TK_FSYNC 108 +#define TK_COMP 109 +#define TK_PRECISION 110 +#define TK_UPDATE 111 +#define TK_CACHELAST 112 +#define TK_PARTITIONS 113 +#define TK_UNSIGNED 114 +#define TK_TAGS 115 +#define TK_USING 116 +#define TK_NULL 117 +#define TK_NOW 118 +#define TK_SELECT 119 +#define TK_UNION 120 +#define TK_ALL 121 +#define TK_DISTINCT 122 +#define TK_FROM 123 +#define TK_VARIABLE 124 +#define TK_INTERVAL 125 +#define TK_SESSION 126 +#define TK_STATE_WINDOW 127 +#define TK_FILL 128 +#define TK_SLIDING 129 +#define TK_ORDER 130 +#define TK_BY 131 +#define TK_ASC 132 +#define TK_DESC 133 +#define TK_GROUP 134 +#define TK_HAVING 135 +#define TK_LIMIT 136 +#define TK_OFFSET 137 +#define TK_SLIMIT 138 +#define TK_SOFFSET 139 +#define TK_WHERE 140 +#define TK_RESET 141 +#define TK_QUERY 142 +#define TK_SYNCDB 143 +#define TK_ADD 144 +#define TK_COLUMN 145 +#define TK_MODIFY 146 +#define TK_TAG 147 +#define TK_CHANGE 148 +#define TK_SET 149 +#define TK_KILL 150 +#define TK_CONNECTION 151 +#define TK_STREAM 152 +#define TK_COLON 153 +#define TK_ABORT 154 +#define TK_AFTER 155 +#define TK_ATTACH 156 +#define TK_BEFORE 157 +#define TK_BEGIN 158 +#define TK_CASCADE 159 +#define TK_CLUSTER 160 +#define TK_CONFLICT 161 +#define TK_COPY 162 +#define TK_DEFERRED 163 +#define TK_DELIMITERS 164 +#define TK_DETACH 165 +#define TK_EACH 166 +#define TK_END 167 +#define TK_EXPLAIN 168 +#define TK_FAIL 169 +#define TK_FOR 170 +#define TK_IGNORE 171 +#define TK_IMMEDIATE 172 +#define TK_INITIALLY 173 +#define TK_INSTEAD 174 #define TK_KEY 175 #define TK_OF 176 #define TK_RAISE 177 diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 8b43e55693..82361523ed 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -11,7 +11,7 @@ %left OR. %left AND. %right NOT. -%left EQ NE ISNULL NOTNULL IS LIKE GLOB BETWEEN IN. +%left EQ NE ISNULL NOTNULL IS LIKE MATCH GLOB BETWEEN IN. %left GT GE LT LE. %left BITAND BITOR LSHIFT RSHIFT. %left PLUS MINUS. @@ -743,6 +743,9 @@ expr(A) ::= expr(X) REM expr(Y). {A = tSqlExprCreate(X, Y, TK_REM); } // like expression expr(A) ::= expr(X) LIKE expr(Y). {A = tSqlExprCreate(X, Y, TK_LIKE); } +// match expression +expr(A) ::= expr(X) MATCH expr(Y). {A = tSqlExprCreate(X, Y, TK_MATCH); } + //in expression expr(A) ::= expr(X) IN LP exprlist(Y) RP. {A = tSqlExprCreate(X, (tSqlExpr*)Y, TK_IN); } diff --git a/src/query/src/qFilter.c b/src/query/src/qFilter.c index 72f8376af6..a6988d7adc 100644 --- a/src/query/src/qFilter.c +++ b/src/query/src/qFilter.c @@ -28,6 +28,7 @@ OptrStr gOptrStr[] = { {TSDB_RELATION_GREATER_EQUAL, ">="}, {TSDB_RELATION_NOT_EQUAL, "!="}, {TSDB_RELATION_LIKE, "like"}, + {TSDB_RELATION_MATCH, "match"}, {TSDB_RELATION_ISNULL, "is null"}, {TSDB_RELATION_NOTNULL, "not null"}, {TSDB_RELATION_IN, "in"}, @@ -156,7 +157,7 @@ int8_t filterGetRangeCompFuncFromOptrs(uint8_t optr, uint8_t optr2) { __compar_fn_t gDataCompare[] = {compareInt32Val, compareInt8Val, compareInt16Val, compareInt64Val, compareFloatVal, compareDoubleVal, compareLenPrefixedStr, compareStrPatternComp, compareFindItemInSet, compareWStrPatternComp, compareLenPrefixedWStr, compareUint8Val, compareUint16Val, compareUint32Val, compareUint64Val, - setCompareBytes1, setCompareBytes2, setCompareBytes4, setCompareBytes8 + setCompareBytes1, setCompareBytes2, setCompareBytes4, setCompareBytes8, compareStrRegexComp, }; int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { @@ -195,7 +196,9 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { case TSDB_DATA_TYPE_FLOAT: comparFn = 4; break; case TSDB_DATA_TYPE_DOUBLE: comparFn = 5; break; case TSDB_DATA_TYPE_BINARY: { - if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */ + if (optr == TSDB_RELATION_MATCH) { + comparFn = 19; + } else if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */ comparFn = 7; } else if (optr == TSDB_RELATION_IN) { comparFn = 8; @@ -207,7 +210,9 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { } case TSDB_DATA_TYPE_NCHAR: { - if (optr == TSDB_RELATION_LIKE) { + if (optr == TSDB_RELATION_MATCH) { + comparFn = 19; + } else if (optr == TSDB_RELATION_LIKE) { comparFn = 9; } else if (optr == TSDB_RELATION_IN) { comparFn = 8; @@ -1871,6 +1876,9 @@ bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right) case TSDB_RELATION_LIKE: { return ret == 0; } + case TSDB_RELATION_MATCH: { + return ret == 0; + } case TSDB_RELATION_IN: { return ret == 1; } @@ -2641,7 +2649,7 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SDataStatis *pDataStatis, int32_t } if (cunit->optr == TSDB_RELATION_ISNULL || cunit->optr == TSDB_RELATION_NOTNULL - || cunit->optr == TSDB_RELATION_IN || cunit->optr == TSDB_RELATION_LIKE + || cunit->optr == TSDB_RELATION_IN || cunit->optr == TSDB_RELATION_LIKE || cunit->optr == TSDB_RELATION_MATCH || cunit->optr == TSDB_RELATION_NOT_EQUAL) { continue; } diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 09be4c0cf0..56a8877b34 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -25,7 +25,6 @@ #include #include /************ Begin %include sections from the grammar ************************/ -#line 23 "sql.y" #include #include @@ -38,7 +37,6 @@ #include "ttokendef.h" #include "tutil.h" #include "tvariant.h" -#line 42 "sql.c" /**************** End of %include directives **********************************/ /* These constants specify the various numeric values for terminal symbols ** in a format understandable to "makeheaders". This section is blank unless @@ -139,18 +137,18 @@ typedef union { #define ParseCTX_FETCH #define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 362 -#define YYNRULE 289 -#define YYNRULE_WITH_ACTION 289 +#define YYNSTATE 364 +#define YYNRULE 290 +#define YYNRULE_WITH_ACTION 290 #define YYNTOKEN 195 -#define YY_MAX_SHIFT 361 -#define YY_MIN_SHIFTREDUCE 567 -#define YY_MAX_SHIFTREDUCE 855 -#define YY_ERROR_ACTION 856 -#define YY_ACCEPT_ACTION 857 -#define YY_NO_ACTION 858 -#define YY_MIN_REDUCE 859 -#define YY_MAX_REDUCE 1147 +#define YY_MAX_SHIFT 363 +#define YY_MIN_SHIFTREDUCE 569 +#define YY_MAX_SHIFTREDUCE 858 +#define YY_ERROR_ACTION 859 +#define YY_ACCEPT_ACTION 860 +#define YY_NO_ACTION 861 +#define YY_MIN_REDUCE 862 +#define YY_MAX_REDUCE 1151 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -217,163 +215,164 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (754) +#define YY_ACTTAB_COUNT (761) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 207, 618, 246, 618, 618, 245, 360, 229, 160, 619, - /* 10 */ 1123, 619, 619, 56, 57, 1036, 60, 61, 857, 361, - /* 20 */ 249, 50, 618, 59, 318, 64, 62, 65, 63, 984, - /* 30 */ 619, 982, 983, 55, 54, 160, 985, 53, 52, 51, - /* 40 */ 986, 153, 987, 988, 356, 945, 654, 568, 569, 570, - /* 50 */ 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, - /* 60 */ 581, 151, 207, 230, 907, 207, 56, 57, 1027, 60, - /* 70 */ 61, 189, 1124, 249, 50, 1124, 59, 318, 64, 62, - /* 80 */ 65, 63, 1072, 1033, 271, 79, 55, 54, 3, 190, - /* 90 */ 53, 52, 51, 56, 57, 250, 60, 61, 702, 1027, - /* 100 */ 249, 50, 29, 59, 318, 64, 62, 65, 63, 91, - /* 110 */ 278, 277, 37, 55, 54, 232, 94, 53, 52, 51, - /* 120 */ 235, 120, 114, 125, 1014, 241, 338, 337, 124, 1014, - /* 130 */ 130, 133, 123, 56, 58, 794, 60, 61, 127, 85, - /* 140 */ 249, 50, 92, 59, 318, 64, 62, 65, 63, 997, - /* 150 */ 998, 34, 1001, 55, 54, 207, 80, 53, 52, 51, - /* 160 */ 57, 1010, 60, 61, 316, 1124, 249, 50, 263, 59, - /* 170 */ 318, 64, 62, 65, 63, 37, 44, 267, 266, 55, - /* 180 */ 54, 348, 243, 53, 52, 51, 1014, 160, 43, 314, - /* 190 */ 355, 354, 313, 312, 311, 353, 310, 309, 308, 352, - /* 200 */ 307, 351, 350, 976, 964, 965, 966, 967, 968, 969, - /* 210 */ 970, 971, 972, 973, 974, 975, 977, 978, 60, 61, - /* 220 */ 231, 160, 249, 50, 1011, 59, 318, 64, 62, 65, - /* 230 */ 63, 1008, 1027, 24, 258, 55, 54, 1000, 97, 53, - /* 240 */ 52, 51, 252, 248, 809, 175, 1013, 798, 233, 801, - /* 250 */ 210, 804, 248, 809, 1143, 917, 798, 216, 801, 292, - /* 260 */ 804, 90, 189, 135, 134, 215, 258, 55, 54, 323, - /* 270 */ 85, 53, 52, 51, 1002, 227, 228, 176, 242, 319, - /* 280 */ 5, 40, 179, 258, 227, 228, 23, 178, 103, 108, - /* 290 */ 99, 107, 204, 726, 1012, 1073, 723, 290, 724, 908, - /* 300 */ 725, 64, 62, 65, 63, 303, 189, 44, 257, 55, - /* 310 */ 54, 37, 37, 53, 52, 51, 800, 253, 803, 251, - /* 320 */ 316, 326, 325, 66, 254, 255, 198, 196, 194, 270, - /* 330 */ 37, 77, 66, 193, 139, 138, 137, 136, 223, 742, - /* 340 */ 799, 43, 802, 355, 354, 37, 37, 37, 353, 53, - /* 350 */ 52, 51, 352, 37, 351, 350, 239, 240, 810, 805, - /* 360 */ 1011, 1011, 272, 78, 37, 806, 122, 810, 805, 37, - /* 370 */ 37, 359, 358, 144, 806, 327, 38, 14, 348, 1011, - /* 380 */ 82, 93, 70, 259, 739, 256, 320, 333, 332, 83, - /* 390 */ 328, 329, 330, 73, 1011, 1011, 1011, 999, 334, 150, - /* 400 */ 148, 147, 1011, 1, 177, 775, 776, 727, 728, 335, - /* 410 */ 9, 96, 796, 1011, 336, 340, 758, 274, 1011, 1011, - /* 420 */ 1083, 766, 767, 746, 71, 712, 274, 295, 714, 297, - /* 430 */ 155, 713, 33, 74, 807, 67, 26, 830, 811, 38, - /* 440 */ 247, 38, 67, 95, 76, 67, 617, 16, 797, 15, - /* 450 */ 205, 25, 25, 113, 18, 112, 17, 731, 808, 732, - /* 460 */ 25, 6, 729, 211, 730, 298, 20, 119, 19, 118, - /* 470 */ 22, 1120, 21, 132, 131, 1119, 701, 1118, 225, 226, - /* 480 */ 208, 209, 1135, 212, 206, 213, 214, 813, 218, 219, - /* 490 */ 220, 217, 203, 1082, 237, 1079, 1078, 238, 339, 47, - /* 500 */ 1028, 268, 152, 1065, 1064, 1035, 149, 275, 1009, 279, - /* 510 */ 1046, 1043, 1044, 1048, 154, 159, 286, 171, 172, 273, - /* 520 */ 234, 1007, 173, 162, 174, 922, 300, 301, 302, 305, - /* 530 */ 306, 757, 1025, 45, 281, 201, 161, 283, 41, 317, - /* 540 */ 75, 916, 293, 72, 49, 324, 164, 1142, 291, 110, - /* 550 */ 1141, 1138, 163, 289, 180, 331, 1134, 116, 1133, 1130, - /* 560 */ 287, 285, 181, 942, 42, 39, 46, 202, 282, 904, - /* 570 */ 126, 902, 128, 129, 900, 899, 260, 280, 192, 897, - /* 580 */ 896, 895, 894, 893, 892, 891, 195, 197, 888, 886, - /* 590 */ 884, 882, 199, 48, 879, 200, 875, 304, 349, 81, - /* 600 */ 86, 284, 1066, 121, 341, 342, 343, 344, 345, 346, - /* 610 */ 347, 357, 855, 262, 261, 854, 224, 244, 299, 264, - /* 620 */ 265, 853, 836, 221, 222, 835, 269, 294, 104, 921, - /* 630 */ 920, 274, 105, 10, 276, 87, 84, 898, 734, 140, - /* 640 */ 30, 156, 141, 184, 890, 183, 943, 182, 185, 187, - /* 650 */ 186, 142, 188, 2, 889, 759, 143, 881, 980, 165, - /* 660 */ 880, 166, 167, 944, 168, 169, 170, 4, 990, 768, - /* 670 */ 157, 158, 762, 88, 236, 764, 89, 288, 31, 11, - /* 680 */ 32, 12, 13, 27, 296, 28, 96, 101, 98, 35, - /* 690 */ 100, 632, 36, 667, 102, 665, 664, 663, 661, 660, - /* 700 */ 659, 656, 622, 315, 106, 7, 321, 812, 322, 8, - /* 710 */ 814, 109, 111, 68, 69, 38, 704, 703, 115, 700, - /* 720 */ 117, 648, 646, 638, 644, 640, 642, 636, 634, 670, - /* 730 */ 669, 668, 666, 662, 658, 657, 191, 620, 585, 859, - /* 740 */ 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, - /* 750 */ 858, 858, 145, 146, + /* 0 */ 208, 620, 237, 620, 362, 231, 1017, 1030, 243, 621, + /* 10 */ 1127, 621, 1017, 57, 58, 37, 61, 62, 656, 1039, + /* 20 */ 251, 51, 50, 234, 60, 320, 65, 63, 66, 64, + /* 30 */ 1030, 802, 245, 805, 56, 55, 1017, 23, 54, 53, + /* 40 */ 52, 57, 58, 620, 61, 62, 235, 260, 251, 51, + /* 50 */ 50, 621, 60, 320, 65, 63, 66, 64, 176, 154, + /* 60 */ 233, 205, 56, 55, 1014, 248, 54, 53, 52, 979, + /* 70 */ 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, + /* 80 */ 977, 978, 980, 981, 29, 318, 80, 1036, 570, 571, + /* 90 */ 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, + /* 100 */ 582, 583, 152, 98, 232, 57, 58, 37, 61, 62, + /* 110 */ 247, 796, 251, 51, 50, 350, 60, 320, 65, 63, + /* 120 */ 66, 64, 54, 53, 52, 208, 56, 55, 280, 279, + /* 130 */ 54, 53, 52, 57, 59, 1128, 61, 62, 74, 1005, + /* 140 */ 251, 51, 50, 620, 60, 320, 65, 63, 66, 64, + /* 150 */ 809, 621, 241, 206, 56, 55, 1014, 1003, 54, 53, + /* 160 */ 52, 58, 161, 61, 62, 212, 260, 251, 51, 50, + /* 170 */ 208, 60, 320, 65, 63, 66, 64, 177, 75, 252, + /* 180 */ 1128, 56, 55, 161, 213, 54, 53, 52, 61, 62, + /* 190 */ 860, 363, 251, 51, 50, 265, 60, 320, 65, 63, + /* 200 */ 66, 64, 704, 1011, 269, 268, 56, 55, 358, 948, + /* 210 */ 54, 53, 52, 43, 316, 357, 356, 315, 314, 313, + /* 220 */ 355, 312, 311, 310, 354, 309, 353, 352, 24, 250, + /* 230 */ 811, 340, 339, 800, 95, 803, 1076, 806, 292, 208, + /* 240 */ 1030, 318, 86, 260, 1123, 211, 37, 250, 811, 1128, + /* 250 */ 244, 800, 218, 803, 1015, 806, 273, 1075, 136, 135, + /* 260 */ 217, 1122, 229, 230, 325, 86, 321, 1000, 1001, 34, + /* 270 */ 1004, 1121, 37, 37, 274, 5, 40, 180, 37, 44, + /* 280 */ 229, 230, 179, 104, 109, 100, 108, 728, 38, 123, + /* 290 */ 725, 242, 726, 37, 727, 1014, 801, 161, 804, 37, + /* 300 */ 305, 350, 44, 65, 63, 66, 64, 910, 161, 14, + /* 310 */ 67, 56, 55, 94, 190, 54, 53, 52, 329, 256, + /* 320 */ 257, 1013, 1014, 330, 272, 227, 78, 1014, 67, 920, + /* 330 */ 121, 115, 126, 225, 254, 37, 190, 125, 331, 131, + /* 340 */ 134, 124, 1014, 97, 332, 812, 807, 128, 1014, 199, + /* 350 */ 197, 195, 808, 259, 1, 178, 194, 140, 139, 138, + /* 360 */ 137, 56, 55, 812, 807, 54, 53, 52, 911, 294, + /* 370 */ 808, 91, 37, 37, 43, 190, 357, 356, 37, 93, + /* 380 */ 336, 355, 92, 744, 1014, 354, 322, 353, 352, 361, + /* 390 */ 360, 145, 79, 81, 987, 71, 985, 986, 151, 149, + /* 400 */ 148, 988, 729, 730, 83, 989, 1016, 990, 991, 255, + /* 410 */ 741, 253, 84, 328, 327, 810, 228, 337, 338, 3, + /* 420 */ 191, 1014, 1014, 342, 777, 778, 1002, 1014, 261, 760, + /* 430 */ 258, 768, 335, 334, 769, 33, 9, 72, 714, 297, + /* 440 */ 716, 276, 299, 156, 715, 68, 833, 813, 26, 276, + /* 450 */ 798, 249, 38, 38, 68, 619, 96, 16, 68, 15, + /* 460 */ 25, 25, 114, 18, 113, 17, 77, 748, 300, 25, + /* 470 */ 209, 733, 6, 734, 731, 20, 732, 19, 120, 22, + /* 480 */ 119, 21, 133, 132, 210, 214, 207, 799, 815, 215, + /* 490 */ 216, 1147, 1139, 220, 221, 222, 219, 204, 703, 1086, + /* 500 */ 1085, 239, 270, 1082, 153, 1081, 240, 341, 1038, 1068, + /* 510 */ 47, 1049, 1046, 1047, 1067, 150, 1031, 277, 1012, 1051, + /* 520 */ 281, 155, 160, 288, 172, 173, 236, 283, 1010, 174, + /* 530 */ 170, 168, 163, 175, 925, 302, 303, 304, 307, 308, + /* 540 */ 45, 202, 759, 1028, 41, 164, 319, 919, 285, 326, + /* 550 */ 76, 1146, 162, 73, 295, 111, 49, 1145, 165, 1142, + /* 560 */ 293, 291, 181, 333, 1138, 117, 1137, 1134, 289, 287, + /* 570 */ 182, 945, 42, 39, 46, 203, 907, 127, 905, 129, + /* 580 */ 130, 284, 903, 902, 262, 193, 900, 282, 899, 898, + /* 590 */ 897, 896, 895, 894, 196, 198, 891, 889, 887, 885, + /* 600 */ 200, 48, 882, 201, 878, 306, 351, 275, 82, 87, + /* 610 */ 286, 1069, 344, 122, 343, 345, 346, 347, 226, 246, + /* 620 */ 348, 349, 301, 359, 858, 263, 264, 857, 223, 266, + /* 630 */ 224, 267, 856, 839, 924, 105, 923, 106, 271, 838, + /* 640 */ 276, 296, 85, 278, 10, 901, 736, 141, 142, 185, + /* 650 */ 893, 184, 946, 183, 186, 187, 189, 143, 188, 892, + /* 660 */ 947, 2, 30, 144, 983, 4, 884, 883, 166, 169, + /* 670 */ 167, 88, 171, 761, 993, 157, 159, 770, 158, 238, + /* 680 */ 764, 89, 31, 766, 90, 290, 11, 12, 32, 13, + /* 690 */ 27, 298, 28, 97, 99, 102, 35, 101, 634, 36, + /* 700 */ 103, 669, 667, 666, 665, 663, 662, 661, 658, 317, + /* 710 */ 624, 107, 7, 323, 814, 816, 8, 324, 110, 112, + /* 720 */ 69, 70, 116, 706, 705, 38, 702, 650, 118, 648, + /* 730 */ 640, 646, 642, 644, 638, 636, 672, 671, 670, 668, + /* 740 */ 664, 660, 659, 192, 622, 587, 862, 861, 861, 861, + /* 750 */ 861, 861, 861, 861, 861, 861, 861, 861, 861, 146, + /* 760 */ 147, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 264, 1, 204, 1, 1, 204, 197, 198, 197, 9, - /* 10 */ 274, 9, 9, 13, 14, 197, 16, 17, 195, 196, - /* 20 */ 20, 21, 1, 23, 24, 25, 26, 27, 28, 221, - /* 30 */ 9, 223, 224, 33, 34, 197, 228, 37, 38, 39, - /* 40 */ 232, 197, 234, 235, 219, 220, 5, 45, 46, 47, - /* 50 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 60 */ 58, 59, 264, 61, 203, 264, 13, 14, 245, 16, - /* 70 */ 17, 210, 274, 20, 21, 274, 23, 24, 25, 26, - /* 80 */ 27, 28, 271, 265, 261, 85, 33, 34, 201, 202, - /* 90 */ 37, 38, 39, 13, 14, 204, 16, 17, 5, 245, - /* 100 */ 20, 21, 81, 23, 24, 25, 26, 27, 28, 271, - /* 110 */ 266, 267, 197, 33, 34, 261, 205, 37, 38, 39, - /* 120 */ 243, 62, 63, 64, 247, 243, 33, 34, 69, 247, - /* 130 */ 71, 72, 73, 13, 14, 82, 16, 17, 79, 81, - /* 140 */ 20, 21, 248, 23, 24, 25, 26, 27, 28, 238, - /* 150 */ 239, 240, 241, 33, 34, 264, 262, 37, 38, 39, - /* 160 */ 14, 246, 16, 17, 83, 274, 20, 21, 141, 23, - /* 170 */ 24, 25, 26, 27, 28, 197, 118, 150, 151, 33, - /* 180 */ 34, 89, 243, 37, 38, 39, 247, 197, 97, 98, - /* 190 */ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, - /* 200 */ 109, 110, 111, 221, 222, 223, 224, 225, 226, 227, - /* 210 */ 228, 229, 230, 231, 232, 233, 234, 235, 16, 17, - /* 220 */ 242, 197, 20, 21, 246, 23, 24, 25, 26, 27, - /* 230 */ 28, 197, 245, 44, 197, 33, 34, 0, 205, 37, - /* 240 */ 38, 39, 68, 1, 2, 208, 247, 5, 261, 7, - /* 250 */ 61, 9, 1, 2, 247, 203, 5, 68, 7, 269, - /* 260 */ 9, 271, 210, 74, 75, 76, 197, 33, 34, 80, - /* 270 */ 81, 37, 38, 39, 241, 33, 34, 208, 244, 37, - /* 280 */ 62, 63, 64, 197, 33, 34, 264, 69, 70, 71, - /* 290 */ 72, 73, 264, 2, 208, 271, 5, 273, 7, 203, - /* 300 */ 9, 25, 26, 27, 28, 87, 210, 118, 68, 33, - /* 310 */ 34, 197, 197, 37, 38, 39, 5, 143, 7, 145, - /* 320 */ 83, 147, 148, 81, 33, 34, 62, 63, 64, 140, - /* 330 */ 197, 142, 81, 69, 70, 71, 72, 73, 149, 37, - /* 340 */ 5, 97, 7, 99, 100, 197, 197, 197, 104, 37, - /* 350 */ 38, 39, 108, 197, 110, 111, 242, 242, 116, 117, - /* 360 */ 246, 246, 82, 205, 197, 123, 77, 116, 117, 197, - /* 370 */ 197, 65, 66, 67, 123, 242, 96, 81, 89, 246, - /* 380 */ 82, 85, 96, 143, 96, 145, 15, 147, 148, 82, - /* 390 */ 242, 242, 242, 96, 246, 246, 246, 239, 242, 62, - /* 400 */ 63, 64, 246, 206, 207, 131, 132, 116, 117, 242, - /* 410 */ 122, 115, 1, 246, 242, 242, 82, 119, 246, 246, - /* 420 */ 237, 82, 82, 121, 138, 82, 119, 82, 82, 82, - /* 430 */ 96, 82, 81, 136, 123, 96, 96, 82, 82, 96, - /* 440 */ 60, 96, 96, 96, 81, 96, 82, 144, 37, 146, - /* 450 */ 264, 96, 96, 144, 144, 146, 146, 5, 123, 7, - /* 460 */ 96, 81, 5, 264, 7, 114, 144, 144, 146, 146, - /* 470 */ 144, 264, 146, 77, 78, 264, 113, 264, 264, 264, - /* 480 */ 264, 264, 247, 264, 264, 264, 264, 116, 264, 264, - /* 490 */ 264, 264, 264, 237, 237, 237, 237, 237, 237, 263, - /* 500 */ 245, 197, 197, 272, 272, 197, 60, 245, 245, 268, - /* 510 */ 197, 197, 197, 197, 197, 197, 197, 249, 197, 199, - /* 520 */ 268, 197, 197, 258, 197, 197, 197, 197, 197, 197, - /* 530 */ 197, 123, 260, 197, 268, 197, 259, 268, 197, 197, - /* 540 */ 135, 197, 129, 137, 134, 197, 256, 197, 133, 197, - /* 550 */ 197, 197, 257, 127, 197, 197, 197, 197, 197, 197, - /* 560 */ 126, 125, 197, 197, 197, 197, 197, 197, 128, 197, - /* 570 */ 197, 197, 197, 197, 197, 197, 197, 124, 197, 197, - /* 580 */ 197, 197, 197, 197, 197, 197, 197, 197, 197, 197, - /* 590 */ 197, 197, 197, 139, 197, 197, 197, 88, 112, 199, - /* 600 */ 199, 199, 199, 95, 94, 51, 91, 93, 55, 92, - /* 610 */ 90, 83, 5, 5, 152, 5, 199, 199, 199, 152, - /* 620 */ 5, 5, 99, 199, 199, 98, 141, 114, 205, 209, - /* 630 */ 209, 119, 205, 81, 96, 96, 120, 199, 82, 200, - /* 640 */ 81, 81, 200, 212, 199, 216, 218, 217, 215, 214, - /* 650 */ 213, 200, 211, 206, 199, 82, 200, 199, 236, 255, - /* 660 */ 199, 254, 253, 220, 252, 251, 250, 201, 236, 82, - /* 670 */ 81, 96, 82, 81, 1, 82, 81, 81, 96, 130, - /* 680 */ 96, 130, 81, 81, 114, 81, 115, 70, 77, 86, - /* 690 */ 85, 5, 86, 9, 85, 5, 5, 5, 5, 5, - /* 700 */ 5, 5, 84, 15, 77, 81, 24, 82, 59, 81, - /* 710 */ 116, 146, 146, 16, 16, 96, 5, 5, 146, 82, - /* 720 */ 146, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 730 */ 5, 5, 5, 5, 5, 5, 96, 84, 60, 0, - /* 740 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, - /* 750 */ 275, 275, 21, 21, 275, 275, 275, 275, 275, 275, - /* 760 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 0 */ 264, 1, 243, 1, 197, 198, 247, 245, 243, 9, + /* 10 */ 274, 9, 247, 13, 14, 197, 16, 17, 5, 197, + /* 20 */ 20, 21, 22, 261, 24, 25, 26, 27, 28, 29, + /* 30 */ 245, 5, 243, 7, 34, 35, 247, 264, 38, 39, + /* 40 */ 40, 13, 14, 1, 16, 17, 261, 197, 20, 21, + /* 50 */ 22, 9, 24, 25, 26, 27, 28, 29, 208, 197, + /* 60 */ 242, 264, 34, 35, 246, 204, 38, 39, 40, 221, + /* 70 */ 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + /* 80 */ 232, 233, 234, 235, 82, 84, 86, 265, 46, 47, + /* 90 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + /* 100 */ 58, 59, 60, 205, 62, 13, 14, 197, 16, 17, + /* 110 */ 204, 83, 20, 21, 22, 90, 24, 25, 26, 27, + /* 120 */ 28, 29, 38, 39, 40, 264, 34, 35, 266, 267, + /* 130 */ 38, 39, 40, 13, 14, 274, 16, 17, 97, 241, + /* 140 */ 20, 21, 22, 1, 24, 25, 26, 27, 28, 29, + /* 150 */ 124, 9, 242, 264, 34, 35, 246, 0, 38, 39, + /* 160 */ 40, 14, 197, 16, 17, 264, 197, 20, 21, 22, + /* 170 */ 264, 24, 25, 26, 27, 28, 29, 208, 137, 204, + /* 180 */ 274, 34, 35, 197, 264, 38, 39, 40, 16, 17, + /* 190 */ 195, 196, 20, 21, 22, 142, 24, 25, 26, 27, + /* 200 */ 28, 29, 5, 197, 151, 152, 34, 35, 219, 220, + /* 210 */ 38, 39, 40, 98, 99, 100, 101, 102, 103, 104, + /* 220 */ 105, 106, 107, 108, 109, 110, 111, 112, 45, 1, + /* 230 */ 2, 34, 35, 5, 205, 7, 271, 9, 273, 264, + /* 240 */ 245, 84, 82, 197, 264, 62, 197, 1, 2, 274, + /* 250 */ 244, 5, 69, 7, 208, 9, 261, 271, 75, 76, + /* 260 */ 77, 264, 34, 35, 81, 82, 38, 238, 239, 240, + /* 270 */ 241, 264, 197, 197, 83, 63, 64, 65, 197, 119, + /* 280 */ 34, 35, 70, 71, 72, 73, 74, 2, 97, 78, + /* 290 */ 5, 242, 7, 197, 9, 246, 5, 197, 7, 197, + /* 300 */ 88, 90, 119, 26, 27, 28, 29, 203, 197, 82, + /* 310 */ 82, 34, 35, 86, 210, 38, 39, 40, 242, 34, + /* 320 */ 35, 246, 246, 242, 141, 264, 143, 246, 82, 203, + /* 330 */ 63, 64, 65, 150, 69, 197, 210, 70, 242, 72, + /* 340 */ 73, 74, 246, 116, 242, 117, 118, 80, 246, 63, + /* 350 */ 64, 65, 124, 69, 206, 207, 70, 71, 72, 73, + /* 360 */ 74, 34, 35, 117, 118, 38, 39, 40, 203, 269, + /* 370 */ 124, 271, 197, 197, 98, 210, 100, 101, 197, 248, + /* 380 */ 242, 105, 271, 38, 246, 109, 15, 111, 112, 66, + /* 390 */ 67, 68, 205, 262, 221, 97, 223, 224, 63, 64, + /* 400 */ 65, 228, 117, 118, 83, 232, 247, 234, 235, 144, + /* 410 */ 97, 146, 83, 148, 149, 124, 264, 242, 242, 201, + /* 420 */ 202, 246, 246, 242, 132, 133, 239, 246, 144, 83, + /* 430 */ 146, 83, 148, 149, 83, 82, 123, 139, 83, 83, + /* 440 */ 83, 120, 83, 97, 83, 97, 83, 83, 97, 120, + /* 450 */ 1, 61, 97, 97, 97, 83, 97, 145, 97, 147, + /* 460 */ 97, 97, 145, 145, 147, 147, 82, 122, 115, 97, + /* 470 */ 264, 5, 82, 7, 5, 145, 7, 147, 145, 145, + /* 480 */ 147, 147, 78, 79, 264, 264, 264, 38, 117, 264, + /* 490 */ 264, 247, 247, 264, 264, 264, 264, 264, 114, 237, + /* 500 */ 237, 237, 197, 237, 197, 237, 237, 237, 197, 272, + /* 510 */ 263, 197, 197, 197, 272, 61, 245, 245, 245, 197, + /* 520 */ 268, 197, 197, 197, 249, 197, 268, 268, 197, 197, + /* 530 */ 251, 253, 258, 197, 197, 197, 197, 197, 197, 197, + /* 540 */ 197, 197, 124, 260, 197, 257, 197, 197, 268, 197, + /* 550 */ 136, 197, 259, 138, 130, 197, 135, 197, 256, 197, + /* 560 */ 134, 128, 197, 197, 197, 197, 197, 197, 127, 126, + /* 570 */ 197, 197, 197, 197, 197, 197, 197, 197, 197, 197, + /* 580 */ 197, 129, 197, 197, 197, 197, 197, 125, 197, 197, + /* 590 */ 197, 197, 197, 197, 197, 197, 197, 197, 197, 197, + /* 600 */ 197, 140, 197, 197, 197, 89, 113, 199, 199, 199, + /* 610 */ 199, 199, 52, 96, 95, 92, 94, 56, 199, 199, + /* 620 */ 93, 91, 199, 84, 5, 153, 5, 5, 199, 153, + /* 630 */ 199, 5, 5, 100, 209, 205, 209, 205, 142, 99, + /* 640 */ 120, 115, 121, 97, 82, 199, 83, 200, 200, 212, + /* 650 */ 199, 216, 218, 217, 215, 213, 211, 200, 214, 199, + /* 660 */ 220, 206, 82, 200, 236, 201, 199, 199, 255, 252, + /* 670 */ 254, 97, 250, 83, 236, 82, 97, 83, 82, 1, + /* 680 */ 83, 82, 97, 83, 82, 82, 131, 131, 97, 82, + /* 690 */ 82, 115, 82, 116, 78, 71, 87, 86, 5, 87, + /* 700 */ 86, 9, 5, 5, 5, 5, 5, 5, 5, 15, + /* 710 */ 85, 78, 82, 25, 83, 117, 82, 60, 147, 147, + /* 720 */ 16, 16, 147, 5, 5, 97, 83, 5, 147, 5, + /* 730 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + /* 740 */ 5, 5, 5, 97, 85, 61, 0, 275, 275, 275, + /* 750 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 21, + /* 760 */ 21, 275, 275, 275, 275, 275, 275, 275, 275, 275, /* 770 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, /* 780 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, /* 790 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, @@ -391,114 +390,115 @@ static const YYCODETYPE yy_lookahead[] = { /* 910 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, /* 920 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, /* 930 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, - /* 940 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 940 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 950 */ 275, 275, 275, 275, 275, 275, }; -#define YY_SHIFT_COUNT (361) +#define YY_SHIFT_COUNT (363) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (739) +#define YY_SHIFT_MAX (746) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 189, 91, 91, 244, 244, 81, 242, 251, 251, 21, - /* 10 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - /* 20 */ 3, 3, 3, 0, 2, 251, 291, 291, 291, 58, - /* 30 */ 58, 3, 3, 3, 237, 3, 3, 3, 3, 289, - /* 40 */ 81, 92, 92, 41, 754, 754, 754, 251, 251, 251, - /* 50 */ 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, - /* 60 */ 251, 251, 251, 251, 251, 251, 251, 291, 291, 291, - /* 70 */ 93, 93, 93, 93, 93, 93, 93, 3, 3, 3, - /* 80 */ 302, 3, 3, 3, 58, 58, 3, 3, 3, 3, - /* 90 */ 274, 274, 288, 58, 3, 3, 3, 3, 3, 3, - /* 100 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - /* 110 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - /* 120 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - /* 130 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - /* 140 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - /* 150 */ 3, 3, 446, 446, 446, 408, 408, 408, 408, 446, - /* 160 */ 446, 405, 406, 413, 410, 415, 426, 434, 436, 440, - /* 170 */ 453, 454, 446, 446, 446, 509, 509, 486, 81, 81, - /* 180 */ 446, 446, 508, 510, 554, 515, 514, 553, 517, 520, - /* 190 */ 486, 41, 446, 528, 528, 446, 528, 446, 528, 446, - /* 200 */ 446, 754, 754, 53, 80, 80, 120, 80, 146, 202, - /* 210 */ 218, 276, 276, 276, 276, 59, 264, 234, 234, 234, - /* 220 */ 234, 174, 240, 27, 296, 312, 312, 311, 335, 306, - /* 230 */ 337, 280, 298, 307, 334, 339, 340, 286, 297, 343, - /* 240 */ 345, 346, 347, 349, 351, 355, 356, 411, 380, 371, - /* 250 */ 364, 303, 309, 310, 452, 457, 322, 323, 363, 326, - /* 260 */ 396, 607, 462, 608, 610, 467, 615, 616, 523, 527, - /* 270 */ 485, 512, 513, 552, 516, 556, 559, 538, 539, 573, - /* 280 */ 560, 587, 589, 590, 575, 592, 593, 595, 673, 596, - /* 290 */ 582, 549, 584, 551, 601, 513, 602, 570, 604, 571, - /* 300 */ 611, 603, 605, 617, 686, 606, 609, 684, 690, 691, - /* 310 */ 692, 693, 694, 695, 696, 618, 688, 627, 624, 625, - /* 320 */ 594, 628, 682, 649, 697, 565, 566, 619, 619, 619, - /* 330 */ 619, 698, 572, 574, 619, 619, 619, 711, 712, 637, - /* 340 */ 619, 716, 717, 718, 719, 720, 721, 722, 723, 724, - /* 350 */ 725, 726, 727, 728, 729, 730, 640, 653, 731, 732, - /* 360 */ 678, 739, + /* 0 */ 183, 115, 115, 276, 276, 1, 228, 246, 246, 2, + /* 10 */ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142, + /* 20 */ 142, 142, 142, 0, 42, 246, 285, 285, 285, 160, + /* 30 */ 160, 142, 142, 142, 157, 142, 142, 142, 142, 211, + /* 40 */ 1, 25, 25, 13, 761, 761, 761, 246, 246, 246, + /* 50 */ 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, + /* 60 */ 246, 246, 246, 246, 246, 246, 246, 246, 285, 285, + /* 70 */ 285, 197, 197, 197, 197, 197, 197, 197, 142, 142, + /* 80 */ 142, 345, 142, 142, 142, 160, 160, 142, 142, 142, + /* 90 */ 142, 292, 292, 313, 160, 142, 142, 142, 142, 142, + /* 100 */ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142, + /* 110 */ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142, + /* 120 */ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142, + /* 130 */ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142, + /* 140 */ 142, 142, 142, 142, 142, 142, 142, 142, 142, 142, + /* 150 */ 142, 142, 142, 454, 454, 454, 418, 418, 418, 418, + /* 160 */ 454, 454, 414, 415, 424, 421, 426, 433, 441, 443, + /* 170 */ 452, 462, 461, 454, 454, 454, 516, 516, 493, 1, + /* 180 */ 1, 454, 454, 517, 519, 560, 523, 522, 561, 527, + /* 190 */ 530, 493, 13, 454, 539, 539, 454, 539, 454, 539, + /* 200 */ 454, 454, 761, 761, 28, 92, 92, 120, 92, 147, + /* 210 */ 172, 212, 277, 277, 277, 277, 277, 267, 286, 327, + /* 220 */ 327, 327, 327, 265, 284, 53, 227, 84, 84, 26, + /* 230 */ 291, 323, 335, 191, 321, 329, 346, 348, 351, 298, + /* 240 */ 41, 355, 356, 357, 359, 361, 353, 363, 364, 449, + /* 250 */ 390, 371, 372, 312, 317, 318, 466, 469, 330, 333, + /* 260 */ 384, 334, 404, 619, 472, 621, 622, 476, 626, 627, + /* 270 */ 533, 540, 496, 520, 526, 562, 521, 563, 580, 546, + /* 280 */ 574, 590, 593, 594, 596, 597, 579, 599, 600, 602, + /* 290 */ 678, 603, 585, 555, 591, 556, 607, 526, 608, 576, + /* 300 */ 610, 577, 616, 609, 611, 624, 693, 612, 614, 692, + /* 310 */ 697, 698, 699, 700, 701, 702, 703, 625, 694, 633, + /* 320 */ 630, 631, 598, 634, 688, 657, 704, 571, 572, 628, + /* 330 */ 628, 628, 628, 705, 575, 581, 628, 628, 628, 718, + /* 340 */ 719, 643, 628, 722, 724, 725, 726, 727, 728, 729, + /* 350 */ 730, 731, 732, 733, 734, 735, 736, 737, 646, 659, + /* 360 */ 738, 739, 684, 746, }; -#define YY_REDUCE_COUNT (202) +#define YY_REDUCE_COUNT (203) #define YY_REDUCE_MIN (-264) -#define YY_REDUCE_MAX (466) +#define YY_REDUCE_MAX (468) static const short yy_reduce_ofst[] = { - /* 0 */ -177, -18, -18, -192, -192, -89, -202, -199, -109, -156, - /* 10 */ -22, 24, -10, 114, 115, 133, 148, 149, 150, 156, - /* 20 */ 167, 172, 173, -182, -191, -264, -123, -118, -61, -146, - /* 30 */ -13, -189, -162, 34, 33, 37, 69, 86, -85, -139, - /* 40 */ 158, 52, 96, -175, -106, 197, -113, 22, 28, 186, - /* 50 */ 199, 207, 211, 213, 214, 215, 216, 217, 219, 220, - /* 60 */ 221, 222, 224, 225, 226, 227, 228, -1, 7, 235, - /* 70 */ 183, 256, 257, 258, 259, 260, 261, 304, 305, 308, - /* 80 */ 236, 313, 314, 315, 255, 262, 316, 317, 318, 319, - /* 90 */ 231, 232, 268, 263, 321, 324, 325, 327, 328, 329, - /* 100 */ 330, 331, 332, 333, 336, 338, 341, 342, 344, 348, - /* 110 */ 350, 352, 353, 354, 357, 358, 359, 360, 361, 362, - /* 120 */ 365, 366, 367, 368, 369, 370, 372, 373, 374, 375, - /* 130 */ 376, 377, 378, 379, 381, 382, 383, 384, 385, 386, - /* 140 */ 387, 388, 389, 390, 391, 392, 393, 394, 395, 397, - /* 150 */ 398, 399, 320, 400, 401, 241, 252, 266, 269, 402, - /* 160 */ 403, 272, 277, 265, 295, 290, 404, 407, 409, 412, - /* 170 */ 414, 416, 417, 418, 419, 420, 421, 422, 423, 427, - /* 180 */ 424, 425, 428, 430, 429, 431, 433, 437, 435, 441, - /* 190 */ 432, 443, 438, 439, 442, 445, 451, 455, 456, 458, - /* 200 */ 461, 447, 466, + /* 0 */ -5, -152, -152, 173, 173, 29, -139, -94, -25, -138, + /* 10 */ -182, -35, 100, -90, 49, 76, 81, 96, 102, 138, + /* 20 */ 175, 176, 181, -178, -193, -264, -241, -235, -211, -238, + /* 30 */ -215, -14, 111, 6, -102, -150, -31, 46, 75, 104, + /* 40 */ 187, 126, 165, -11, 131, 148, 218, -227, -203, -111, + /* 50 */ -99, -80, -20, -3, 7, 61, 152, 206, 220, 221, + /* 60 */ 222, 225, 226, 229, 230, 231, 232, 233, 159, 244, + /* 70 */ 245, 262, 263, 264, 266, 268, 269, 270, 305, 307, + /* 80 */ 311, 247, 314, 315, 316, 271, 272, 322, 324, 325, + /* 90 */ 326, 237, 242, 275, 273, 328, 331, 332, 336, 337, + /* 100 */ 338, 339, 340, 341, 342, 343, 344, 347, 349, 350, + /* 110 */ 352, 354, 358, 360, 362, 365, 366, 367, 368, 369, + /* 120 */ 370, 373, 374, 375, 376, 377, 378, 379, 380, 381, + /* 130 */ 382, 383, 385, 386, 387, 388, 389, 391, 392, 393, + /* 140 */ 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, + /* 150 */ 405, 406, 407, 408, 409, 410, 252, 258, 259, 280, + /* 160 */ 411, 412, 283, 293, 274, 288, 302, 413, 416, 278, + /* 170 */ 417, 279, 422, 419, 420, 423, 425, 427, 428, 430, + /* 180 */ 432, 429, 431, 434, 436, 435, 437, 439, 442, 444, + /* 190 */ 445, 438, 440, 446, 447, 448, 451, 457, 460, 463, + /* 200 */ 467, 468, 455, 464, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 856, 979, 918, 989, 905, 915, 1126, 1126, 1126, 856, - /* 10 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, - /* 20 */ 856, 856, 856, 1037, 876, 1126, 856, 856, 856, 856, - /* 30 */ 856, 856, 856, 856, 915, 856, 856, 856, 856, 925, - /* 40 */ 915, 925, 925, 856, 1032, 963, 981, 856, 856, 856, - /* 50 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, - /* 60 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, - /* 70 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, - /* 80 */ 1039, 1045, 1042, 856, 856, 856, 1047, 856, 856, 856, - /* 90 */ 1069, 1069, 1030, 856, 856, 856, 856, 856, 856, 856, - /* 100 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, - /* 110 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, - /* 120 */ 856, 856, 856, 856, 856, 856, 903, 856, 901, 856, - /* 130 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, - /* 140 */ 856, 856, 856, 856, 887, 856, 856, 856, 856, 856, - /* 150 */ 856, 874, 878, 878, 878, 856, 856, 856, 856, 878, - /* 160 */ 878, 1076, 1080, 1062, 1074, 1070, 1057, 1055, 1053, 1061, - /* 170 */ 1052, 1084, 878, 878, 878, 923, 923, 919, 915, 915, - /* 180 */ 878, 878, 941, 939, 937, 929, 935, 931, 933, 927, - /* 190 */ 906, 856, 878, 913, 913, 878, 913, 878, 913, 878, - /* 200 */ 878, 963, 981, 856, 1085, 1075, 856, 1125, 1115, 1114, - /* 210 */ 856, 1121, 1113, 1112, 1111, 856, 856, 1107, 1110, 1109, - /* 220 */ 1108, 856, 856, 856, 856, 1117, 1116, 856, 856, 856, - /* 230 */ 856, 856, 856, 856, 856, 856, 856, 1081, 1077, 856, - /* 240 */ 856, 856, 856, 856, 856, 856, 856, 856, 1087, 856, - /* 250 */ 856, 856, 856, 856, 856, 856, 856, 856, 991, 856, - /* 260 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, - /* 270 */ 856, 1029, 856, 856, 856, 856, 856, 1041, 1040, 856, - /* 280 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, - /* 290 */ 1071, 856, 1063, 856, 856, 1003, 856, 856, 856, 856, - /* 300 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, - /* 310 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, - /* 320 */ 856, 856, 856, 856, 856, 856, 856, 1144, 1139, 1140, - /* 330 */ 1137, 856, 856, 856, 1136, 1131, 1132, 856, 856, 856, - /* 340 */ 1129, 856, 856, 856, 856, 856, 856, 856, 856, 856, - /* 350 */ 856, 856, 856, 856, 856, 856, 947, 856, 885, 883, - /* 360 */ 856, 856, + /* 0 */ 859, 982, 921, 992, 908, 918, 1130, 1130, 1130, 859, + /* 10 */ 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, + /* 20 */ 859, 859, 859, 1040, 879, 1130, 859, 859, 859, 859, + /* 30 */ 859, 859, 859, 859, 918, 859, 859, 859, 859, 928, + /* 40 */ 918, 928, 928, 859, 1035, 966, 984, 859, 859, 859, + /* 50 */ 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, + /* 60 */ 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, + /* 70 */ 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, + /* 80 */ 859, 1042, 1048, 1045, 859, 859, 859, 1050, 859, 859, + /* 90 */ 859, 1072, 1072, 1033, 859, 859, 859, 859, 859, 859, + /* 100 */ 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, + /* 110 */ 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, + /* 120 */ 859, 859, 859, 859, 859, 859, 859, 906, 859, 904, + /* 130 */ 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, + /* 140 */ 859, 859, 859, 859, 859, 890, 859, 859, 859, 859, + /* 150 */ 859, 859, 877, 881, 881, 881, 859, 859, 859, 859, + /* 160 */ 881, 881, 1079, 1083, 1065, 1077, 1073, 1060, 1058, 1056, + /* 170 */ 1064, 1055, 1087, 881, 881, 881, 926, 926, 922, 918, + /* 180 */ 918, 881, 881, 944, 942, 940, 932, 938, 934, 936, + /* 190 */ 930, 909, 859, 881, 916, 916, 881, 916, 881, 916, + /* 200 */ 881, 881, 966, 984, 859, 1088, 1078, 859, 1129, 1118, + /* 210 */ 1117, 859, 1125, 1124, 1116, 1115, 1114, 859, 859, 1110, + /* 220 */ 1113, 1112, 1111, 859, 859, 859, 859, 1120, 1119, 859, + /* 230 */ 859, 859, 859, 859, 859, 859, 859, 859, 859, 1084, + /* 240 */ 1080, 859, 859, 859, 859, 859, 859, 859, 859, 859, + /* 250 */ 1090, 859, 859, 859, 859, 859, 859, 859, 859, 859, + /* 260 */ 994, 859, 859, 859, 859, 859, 859, 859, 859, 859, + /* 270 */ 859, 859, 859, 1032, 859, 859, 859, 859, 859, 1044, + /* 280 */ 1043, 859, 859, 859, 859, 859, 859, 859, 859, 859, + /* 290 */ 859, 859, 1074, 859, 1066, 859, 859, 1006, 859, 859, + /* 300 */ 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, + /* 310 */ 859, 859, 859, 859, 859, 859, 859, 859, 859, 859, + /* 320 */ 859, 859, 859, 859, 859, 859, 859, 859, 859, 1148, + /* 330 */ 1143, 1144, 1141, 859, 859, 859, 1140, 1135, 1136, 859, + /* 340 */ 859, 859, 1133, 859, 859, 859, 859, 859, 859, 859, + /* 350 */ 859, 859, 859, 859, 859, 859, 859, 859, 950, 859, + /* 360 */ 888, 886, 859, 859, }; /********** End of lemon-generated parsing tables *****************************/ @@ -540,6 +540,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* NOTNULL => nothing */ 0, /* IS => nothing */ 1, /* LIKE => ID */ + 1, /* MATCH => ID */ 1, /* GLOB => ID */ 0, /* BETWEEN => nothing */ 0, /* IN => nothing */ @@ -692,7 +693,6 @@ static const YYCODETYPE yyFallback[] = { 1, /* IMMEDIATE => ID */ 1, /* INITIALLY => ID */ 1, /* INSTEAD => ID */ - 1, /* MATCH => ID */ 1, /* KEY => ID */ 1, /* OF => ID */ 1, /* RAISE => ID */ @@ -822,159 +822,159 @@ static const char *const yyTokenName[] = { /* 19 */ "NOTNULL", /* 20 */ "IS", /* 21 */ "LIKE", - /* 22 */ "GLOB", - /* 23 */ "BETWEEN", - /* 24 */ "IN", - /* 25 */ "GT", - /* 26 */ "GE", - /* 27 */ "LT", - /* 28 */ "LE", - /* 29 */ "BITAND", - /* 30 */ "BITOR", - /* 31 */ "LSHIFT", - /* 32 */ "RSHIFT", - /* 33 */ "PLUS", - /* 34 */ "MINUS", - /* 35 */ "DIVIDE", - /* 36 */ "TIMES", - /* 37 */ "STAR", - /* 38 */ "SLASH", - /* 39 */ "REM", - /* 40 */ "CONCAT", - /* 41 */ "UMINUS", - /* 42 */ "UPLUS", - /* 43 */ "BITNOT", - /* 44 */ "SHOW", - /* 45 */ "DATABASES", - /* 46 */ "TOPICS", - /* 47 */ "FUNCTIONS", - /* 48 */ "MNODES", - /* 49 */ "DNODES", - /* 50 */ "ACCOUNTS", - /* 51 */ "USERS", - /* 52 */ "MODULES", - /* 53 */ "QUERIES", - /* 54 */ "CONNECTIONS", - /* 55 */ "STREAMS", - /* 56 */ "VARIABLES", - /* 57 */ "SCORES", - /* 58 */ "GRANTS", - /* 59 */ "VNODES", - /* 60 */ "DOT", - /* 61 */ "CREATE", - /* 62 */ "TABLE", - /* 63 */ "STABLE", - /* 64 */ "DATABASE", - /* 65 */ "TABLES", - /* 66 */ "STABLES", - /* 67 */ "VGROUPS", - /* 68 */ "DROP", - /* 69 */ "TOPIC", - /* 70 */ "FUNCTION", - /* 71 */ "DNODE", - /* 72 */ "USER", - /* 73 */ "ACCOUNT", - /* 74 */ "USE", - /* 75 */ "DESCRIBE", - /* 76 */ "ALTER", - /* 77 */ "PASS", - /* 78 */ "PRIVILEGE", - /* 79 */ "LOCAL", - /* 80 */ "COMPACT", - /* 81 */ "LP", - /* 82 */ "RP", - /* 83 */ "IF", - /* 84 */ "EXISTS", - /* 85 */ "AS", - /* 86 */ "OUTPUTTYPE", - /* 87 */ "AGGREGATE", - /* 88 */ "BUFSIZE", - /* 89 */ "PPS", - /* 90 */ "TSERIES", - /* 91 */ "DBS", - /* 92 */ "STORAGE", - /* 93 */ "QTIME", - /* 94 */ "CONNS", - /* 95 */ "STATE", - /* 96 */ "COMMA", - /* 97 */ "KEEP", - /* 98 */ "CACHE", - /* 99 */ "REPLICA", - /* 100 */ "QUORUM", - /* 101 */ "DAYS", - /* 102 */ "MINROWS", - /* 103 */ "MAXROWS", - /* 104 */ "BLOCKS", - /* 105 */ "CTIME", - /* 106 */ "WAL", - /* 107 */ "FSYNC", - /* 108 */ "COMP", - /* 109 */ "PRECISION", - /* 110 */ "UPDATE", - /* 111 */ "CACHELAST", - /* 112 */ "PARTITIONS", - /* 113 */ "UNSIGNED", - /* 114 */ "TAGS", - /* 115 */ "USING", - /* 116 */ "NULL", - /* 117 */ "NOW", - /* 118 */ "SELECT", - /* 119 */ "UNION", - /* 120 */ "ALL", - /* 121 */ "DISTINCT", - /* 122 */ "FROM", - /* 123 */ "VARIABLE", - /* 124 */ "INTERVAL", - /* 125 */ "SESSION", - /* 126 */ "STATE_WINDOW", - /* 127 */ "FILL", - /* 128 */ "SLIDING", - /* 129 */ "ORDER", - /* 130 */ "BY", - /* 131 */ "ASC", - /* 132 */ "DESC", - /* 133 */ "GROUP", - /* 134 */ "HAVING", - /* 135 */ "LIMIT", - /* 136 */ "OFFSET", - /* 137 */ "SLIMIT", - /* 138 */ "SOFFSET", - /* 139 */ "WHERE", - /* 140 */ "RESET", - /* 141 */ "QUERY", - /* 142 */ "SYNCDB", - /* 143 */ "ADD", - /* 144 */ "COLUMN", - /* 145 */ "MODIFY", - /* 146 */ "TAG", - /* 147 */ "CHANGE", - /* 148 */ "SET", - /* 149 */ "KILL", - /* 150 */ "CONNECTION", - /* 151 */ "STREAM", - /* 152 */ "COLON", - /* 153 */ "ABORT", - /* 154 */ "AFTER", - /* 155 */ "ATTACH", - /* 156 */ "BEFORE", - /* 157 */ "BEGIN", - /* 158 */ "CASCADE", - /* 159 */ "CLUSTER", - /* 160 */ "CONFLICT", - /* 161 */ "COPY", - /* 162 */ "DEFERRED", - /* 163 */ "DELIMITERS", - /* 164 */ "DETACH", - /* 165 */ "EACH", - /* 166 */ "END", - /* 167 */ "EXPLAIN", - /* 168 */ "FAIL", - /* 169 */ "FOR", - /* 170 */ "IGNORE", - /* 171 */ "IMMEDIATE", - /* 172 */ "INITIALLY", - /* 173 */ "INSTEAD", - /* 174 */ "MATCH", + /* 22 */ "MATCH", + /* 23 */ "GLOB", + /* 24 */ "BETWEEN", + /* 25 */ "IN", + /* 26 */ "GT", + /* 27 */ "GE", + /* 28 */ "LT", + /* 29 */ "LE", + /* 30 */ "BITAND", + /* 31 */ "BITOR", + /* 32 */ "LSHIFT", + /* 33 */ "RSHIFT", + /* 34 */ "PLUS", + /* 35 */ "MINUS", + /* 36 */ "DIVIDE", + /* 37 */ "TIMES", + /* 38 */ "STAR", + /* 39 */ "SLASH", + /* 40 */ "REM", + /* 41 */ "CONCAT", + /* 42 */ "UMINUS", + /* 43 */ "UPLUS", + /* 44 */ "BITNOT", + /* 45 */ "SHOW", + /* 46 */ "DATABASES", + /* 47 */ "TOPICS", + /* 48 */ "FUNCTIONS", + /* 49 */ "MNODES", + /* 50 */ "DNODES", + /* 51 */ "ACCOUNTS", + /* 52 */ "USERS", + /* 53 */ "MODULES", + /* 54 */ "QUERIES", + /* 55 */ "CONNECTIONS", + /* 56 */ "STREAMS", + /* 57 */ "VARIABLES", + /* 58 */ "SCORES", + /* 59 */ "GRANTS", + /* 60 */ "VNODES", + /* 61 */ "DOT", + /* 62 */ "CREATE", + /* 63 */ "TABLE", + /* 64 */ "STABLE", + /* 65 */ "DATABASE", + /* 66 */ "TABLES", + /* 67 */ "STABLES", + /* 68 */ "VGROUPS", + /* 69 */ "DROP", + /* 70 */ "TOPIC", + /* 71 */ "FUNCTION", + /* 72 */ "DNODE", + /* 73 */ "USER", + /* 74 */ "ACCOUNT", + /* 75 */ "USE", + /* 76 */ "DESCRIBE", + /* 77 */ "ALTER", + /* 78 */ "PASS", + /* 79 */ "PRIVILEGE", + /* 80 */ "LOCAL", + /* 81 */ "COMPACT", + /* 82 */ "LP", + /* 83 */ "RP", + /* 84 */ "IF", + /* 85 */ "EXISTS", + /* 86 */ "AS", + /* 87 */ "OUTPUTTYPE", + /* 88 */ "AGGREGATE", + /* 89 */ "BUFSIZE", + /* 90 */ "PPS", + /* 91 */ "TSERIES", + /* 92 */ "DBS", + /* 93 */ "STORAGE", + /* 94 */ "QTIME", + /* 95 */ "CONNS", + /* 96 */ "STATE", + /* 97 */ "COMMA", + /* 98 */ "KEEP", + /* 99 */ "CACHE", + /* 100 */ "REPLICA", + /* 101 */ "QUORUM", + /* 102 */ "DAYS", + /* 103 */ "MINROWS", + /* 104 */ "MAXROWS", + /* 105 */ "BLOCKS", + /* 106 */ "CTIME", + /* 107 */ "WAL", + /* 108 */ "FSYNC", + /* 109 */ "COMP", + /* 110 */ "PRECISION", + /* 111 */ "UPDATE", + /* 112 */ "CACHELAST", + /* 113 */ "PARTITIONS", + /* 114 */ "UNSIGNED", + /* 115 */ "TAGS", + /* 116 */ "USING", + /* 117 */ "NULL", + /* 118 */ "NOW", + /* 119 */ "SELECT", + /* 120 */ "UNION", + /* 121 */ "ALL", + /* 122 */ "DISTINCT", + /* 123 */ "FROM", + /* 124 */ "VARIABLE", + /* 125 */ "INTERVAL", + /* 126 */ "SESSION", + /* 127 */ "STATE_WINDOW", + /* 128 */ "FILL", + /* 129 */ "SLIDING", + /* 130 */ "ORDER", + /* 131 */ "BY", + /* 132 */ "ASC", + /* 133 */ "DESC", + /* 134 */ "GROUP", + /* 135 */ "HAVING", + /* 136 */ "LIMIT", + /* 137 */ "OFFSET", + /* 138 */ "SLIMIT", + /* 139 */ "SOFFSET", + /* 140 */ "WHERE", + /* 141 */ "RESET", + /* 142 */ "QUERY", + /* 143 */ "SYNCDB", + /* 144 */ "ADD", + /* 145 */ "COLUMN", + /* 146 */ "MODIFY", + /* 147 */ "TAG", + /* 148 */ "CHANGE", + /* 149 */ "SET", + /* 150 */ "KILL", + /* 151 */ "CONNECTION", + /* 152 */ "STREAM", + /* 153 */ "COLON", + /* 154 */ "ABORT", + /* 155 */ "AFTER", + /* 156 */ "ATTACH", + /* 157 */ "BEFORE", + /* 158 */ "BEGIN", + /* 159 */ "CASCADE", + /* 160 */ "CLUSTER", + /* 161 */ "CONFLICT", + /* 162 */ "COPY", + /* 163 */ "DEFERRED", + /* 164 */ "DELIMITERS", + /* 165 */ "DETACH", + /* 166 */ "EACH", + /* 167 */ "END", + /* 168 */ "EXPLAIN", + /* 169 */ "FAIL", + /* 170 */ "FOR", + /* 171 */ "IGNORE", + /* 172 */ "IMMEDIATE", + /* 173 */ "INITIALLY", + /* 174 */ "INSTEAD", /* 175 */ "KEY", /* 176 */ "OF", /* 177 */ "RAISE", @@ -1345,32 +1345,33 @@ static const char *const yyRuleName[] = { /* 260 */ "expr ::= expr SLASH expr", /* 261 */ "expr ::= expr REM expr", /* 262 */ "expr ::= expr LIKE expr", - /* 263 */ "expr ::= expr IN LP exprlist RP", - /* 264 */ "exprlist ::= exprlist COMMA expritem", - /* 265 */ "exprlist ::= expritem", - /* 266 */ "expritem ::= expr", - /* 267 */ "expritem ::=", - /* 268 */ "cmd ::= RESET QUERY CACHE", - /* 269 */ "cmd ::= SYNCDB ids REPLICA", - /* 270 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 271 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 272 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist", - /* 273 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 274 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 275 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 276 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 277 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist", - /* 278 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", - /* 279 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 280 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist", - /* 281 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 282 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 283 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 284 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem", - /* 285 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist", - /* 286 */ "cmd ::= KILL CONNECTION INTEGER", - /* 287 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 288 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 263 */ "expr ::= expr MATCH expr", + /* 264 */ "expr ::= expr IN LP exprlist RP", + /* 265 */ "exprlist ::= exprlist COMMA expritem", + /* 266 */ "exprlist ::= expritem", + /* 267 */ "expritem ::= expr", + /* 268 */ "expritem ::=", + /* 269 */ "cmd ::= RESET QUERY CACHE", + /* 270 */ "cmd ::= SYNCDB ids REPLICA", + /* 271 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 272 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 273 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist", + /* 274 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 275 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 276 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 277 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 278 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist", + /* 279 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", + /* 280 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", + /* 281 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist", + /* 282 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 283 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 284 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 285 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem", + /* 286 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist", + /* 287 */ "cmd ::= KILL CONNECTION INTEGER", + /* 288 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 289 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1500,9 +1501,7 @@ static void yy_destructor( case 248: /* selcollist */ case 262: /* sclp */ { -#line 750 "sql.y" tSqlExprListDestroy((yypminor->yy131)); -#line 1506 "sql.c" } break; case 219: /* intitemlist */ @@ -1516,32 +1515,24 @@ tSqlExprListDestroy((yypminor->yy131)); case 269: /* sortlist */ case 273: /* grouplist */ { -#line 253 "sql.y" taosArrayDestroy((yypminor->yy131)); -#line 1522 "sql.c" } break; case 240: /* create_table_list */ { -#line 361 "sql.y" destroyCreateTableSql((yypminor->yy272)); -#line 1529 "sql.c" } break; case 245: /* select */ { -#line 481 "sql.y" destroySqlNode((yypminor->yy256)); -#line 1536 "sql.c" } break; case 249: /* from */ case 266: /* tablelist */ case 267: /* sub */ { -#line 536 "sql.y" destroyRelationInfo((yypminor->yy544)); -#line 1545 "sql.c" } break; case 250: /* where_opt */ @@ -1549,23 +1540,17 @@ destroyRelationInfo((yypminor->yy544)); case 264: /* expr */ case 274: /* expritem */ { -#line 683 "sql.y" tSqlExprDestroy((yypminor->yy46)); -#line 1555 "sql.c" } break; case 261: /* union */ { -#line 489 "sql.y" destroyAllSqlNode((yypminor->yy131)); -#line 1562 "sql.c" } break; case 270: /* sortitem */ { -#line 616 "sql.y" tVariantDestroy(&(yypminor->yy516)); -#line 1569 "sql.c" } break; /********* End destructor definitions *****************************************/ @@ -2117,32 +2102,33 @@ static const YYCODETYPE yyRuleInfoLhs[] = { 264, /* (260) expr ::= expr SLASH expr */ 264, /* (261) expr ::= expr REM expr */ 264, /* (262) expr ::= expr LIKE expr */ - 264, /* (263) expr ::= expr IN LP exprlist RP */ - 204, /* (264) exprlist ::= exprlist COMMA expritem */ - 204, /* (265) exprlist ::= expritem */ - 274, /* (266) expritem ::= expr */ - 274, /* (267) expritem ::= */ - 196, /* (268) cmd ::= RESET QUERY CACHE */ - 196, /* (269) cmd ::= SYNCDB ids REPLICA */ - 196, /* (270) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - 196, /* (271) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - 196, /* (272) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ - 196, /* (273) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - 196, /* (274) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - 196, /* (275) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - 196, /* (276) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - 196, /* (277) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ - 196, /* (278) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - 196, /* (279) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - 196, /* (280) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ - 196, /* (281) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - 196, /* (282) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - 196, /* (283) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - 196, /* (284) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ - 196, /* (285) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ - 196, /* (286) cmd ::= KILL CONNECTION INTEGER */ - 196, /* (287) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - 196, /* (288) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + 264, /* (263) expr ::= expr MATCH expr */ + 264, /* (264) expr ::= expr IN LP exprlist RP */ + 204, /* (265) exprlist ::= exprlist COMMA expritem */ + 204, /* (266) exprlist ::= expritem */ + 274, /* (267) expritem ::= expr */ + 274, /* (268) expritem ::= */ + 196, /* (269) cmd ::= RESET QUERY CACHE */ + 196, /* (270) cmd ::= SYNCDB ids REPLICA */ + 196, /* (271) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + 196, /* (272) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + 196, /* (273) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + 196, /* (274) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + 196, /* (275) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + 196, /* (276) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + 196, /* (277) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + 196, /* (278) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + 196, /* (279) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + 196, /* (280) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + 196, /* (281) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + 196, /* (282) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + 196, /* (283) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + 196, /* (284) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + 196, /* (285) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + 196, /* (286) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + 196, /* (287) cmd ::= KILL CONNECTION INTEGER */ + 196, /* (288) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + 196, /* (289) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -2411,32 +2397,33 @@ static const signed char yyRuleInfoNRhs[] = { -3, /* (260) expr ::= expr SLASH expr */ -3, /* (261) expr ::= expr REM expr */ -3, /* (262) expr ::= expr LIKE expr */ - -5, /* (263) expr ::= expr IN LP exprlist RP */ - -3, /* (264) exprlist ::= exprlist COMMA expritem */ - -1, /* (265) exprlist ::= expritem */ - -1, /* (266) expritem ::= expr */ - 0, /* (267) expritem ::= */ - -3, /* (268) cmd ::= RESET QUERY CACHE */ - -3, /* (269) cmd ::= SYNCDB ids REPLICA */ - -7, /* (270) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - -7, /* (271) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - -7, /* (272) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ - -7, /* (273) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - -7, /* (274) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - -8, /* (275) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - -9, /* (276) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - -7, /* (277) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ - -7, /* (278) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - -7, /* (279) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - -7, /* (280) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ - -7, /* (281) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - -7, /* (282) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - -8, /* (283) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - -9, /* (284) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ - -7, /* (285) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ - -3, /* (286) cmd ::= KILL CONNECTION INTEGER */ - -5, /* (287) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - -5, /* (288) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + -3, /* (263) expr ::= expr MATCH expr */ + -5, /* (264) expr ::= expr IN LP exprlist RP */ + -3, /* (265) exprlist ::= exprlist COMMA expritem */ + -1, /* (266) exprlist ::= expritem */ + -1, /* (267) expritem ::= expr */ + 0, /* (268) expritem ::= */ + -3, /* (269) cmd ::= RESET QUERY CACHE */ + -3, /* (270) cmd ::= SYNCDB ids REPLICA */ + -7, /* (271) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (272) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + -7, /* (273) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + -7, /* (274) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + -7, /* (275) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + -8, /* (276) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + -9, /* (277) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + -7, /* (278) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + -7, /* (279) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (280) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + -7, /* (281) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + -7, /* (282) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + -7, /* (283) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + -8, /* (284) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + -9, /* (285) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + -7, /* (286) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + -3, /* (287) cmd ::= KILL CONNECTION INTEGER */ + -5, /* (288) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + -5, /* (289) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2530,346 +2517,226 @@ static YYACTIONTYPE yy_reduce( case 138: /* cmd ::= CREATE TABLE create_table_args */ yytestcase(yyruleno==138); case 139: /* cmd ::= CREATE TABLE create_stable_args */ yytestcase(yyruleno==139); case 140: /* cmd ::= CREATE STABLE create_stable_args */ yytestcase(yyruleno==140); -#line 63 "sql.y" {} -#line 2536 "sql.c" break; case 1: /* cmd ::= SHOW DATABASES */ -#line 66 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_DB, 0, 0);} -#line 2541 "sql.c" break; case 2: /* cmd ::= SHOW TOPICS */ -#line 67 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_TP, 0, 0);} -#line 2546 "sql.c" break; case 3: /* cmd ::= SHOW FUNCTIONS */ -#line 68 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_FUNCTION, 0, 0);} -#line 2551 "sql.c" break; case 4: /* cmd ::= SHOW MNODES */ -#line 69 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_MNODE, 0, 0);} -#line 2556 "sql.c" break; case 5: /* cmd ::= SHOW DNODES */ -#line 70 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_DNODE, 0, 0);} -#line 2561 "sql.c" break; case 6: /* cmd ::= SHOW ACCOUNTS */ -#line 71 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_ACCT, 0, 0);} -#line 2566 "sql.c" break; case 7: /* cmd ::= SHOW USERS */ -#line 72 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_USER, 0, 0);} -#line 2571 "sql.c" break; case 8: /* cmd ::= SHOW MODULES */ -#line 74 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_MODULE, 0, 0); } -#line 2576 "sql.c" break; case 9: /* cmd ::= SHOW QUERIES */ -#line 75 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_QUERIES, 0, 0); } -#line 2581 "sql.c" break; case 10: /* cmd ::= SHOW CONNECTIONS */ -#line 76 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_CONNS, 0, 0);} -#line 2586 "sql.c" break; case 11: /* cmd ::= SHOW STREAMS */ -#line 77 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_STREAMS, 0, 0); } -#line 2591 "sql.c" break; case 12: /* cmd ::= SHOW VARIABLES */ -#line 78 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_VARIABLES, 0, 0); } -#line 2596 "sql.c" break; case 13: /* cmd ::= SHOW SCORES */ -#line 79 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_SCORES, 0, 0); } -#line 2601 "sql.c" break; case 14: /* cmd ::= SHOW GRANTS */ -#line 80 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_GRANTS, 0, 0); } -#line 2606 "sql.c" break; case 15: /* cmd ::= SHOW VNODES */ -#line 82 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, 0, 0); } -#line 2611 "sql.c" break; case 16: /* cmd ::= SHOW VNODES ids */ -#line 83 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, &yymsp[0].minor.yy0, 0); } -#line 2616 "sql.c" break; case 17: /* dbPrefix ::= */ -#line 87 "sql.y" {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.type = 0;} -#line 2621 "sql.c" break; case 18: /* dbPrefix ::= ids DOT */ -#line 88 "sql.y" {yylhsminor.yy0 = yymsp[-1].minor.yy0; } -#line 2626 "sql.c" yymsp[-1].minor.yy0 = yylhsminor.yy0; break; case 19: /* cpxName ::= */ -#line 91 "sql.y" {yymsp[1].minor.yy0.n = 0; } -#line 2632 "sql.c" break; case 20: /* cpxName ::= DOT ids */ -#line 92 "sql.y" {yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n += 1; } -#line 2637 "sql.c" break; case 21: /* cmd ::= SHOW CREATE TABLE ids cpxName */ -#line 94 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_TABLE, 1, &yymsp[-1].minor.yy0); } -#line 2645 "sql.c" break; case 22: /* cmd ::= SHOW CREATE STABLE ids cpxName */ -#line 98 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_STABLE, 1, &yymsp[-1].minor.yy0); } -#line 2653 "sql.c" break; case 23: /* cmd ::= SHOW CREATE DATABASE ids */ -#line 103 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_DATABASE, 1, &yymsp[0].minor.yy0); } -#line 2660 "sql.c" break; case 24: /* cmd ::= SHOW dbPrefix TABLES */ -#line 107 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-1].minor.yy0, 0); } -#line 2667 "sql.c" break; case 25: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */ -#line 111 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0); } -#line 2674 "sql.c" break; case 26: /* cmd ::= SHOW dbPrefix STABLES */ -#line 115 "sql.y" { setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &yymsp[-1].minor.yy0, 0); } -#line 2681 "sql.c" break; case 27: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */ -#line 119 "sql.y" { SStrToken token; tSetDbName(&token, &yymsp[-3].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &yymsp[0].minor.yy0); } -#line 2690 "sql.c" break; case 28: /* cmd ::= SHOW dbPrefix VGROUPS */ -#line 125 "sql.y" { SStrToken token; tSetDbName(&token, &yymsp[-1].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0); } -#line 2699 "sql.c" break; case 29: /* cmd ::= SHOW dbPrefix VGROUPS ids */ -#line 131 "sql.y" { SStrToken token; tSetDbName(&token, &yymsp[-2].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &yymsp[0].minor.yy0); } -#line 2708 "sql.c" break; case 30: /* cmd ::= DROP TABLE ifexists ids cpxName */ -#line 138 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, -1); } -#line 2716 "sql.c" break; case 31: /* cmd ::= DROP STABLE ifexists ids cpxName */ -#line 144 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, TSDB_SUPER_TABLE); } -#line 2724 "sql.c" break; case 32: /* cmd ::= DROP DATABASE ifexists ids */ -#line 149 "sql.y" { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_DEFAULT, -1); } -#line 2729 "sql.c" break; case 33: /* cmd ::= DROP TOPIC ifexists ids */ -#line 150 "sql.y" { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_TOPIC, -1); } -#line 2734 "sql.c" break; case 34: /* cmd ::= DROP FUNCTION ids */ -#line 151 "sql.y" { setDropFuncInfo(pInfo, TSDB_SQL_DROP_FUNCTION, &yymsp[0].minor.yy0); } -#line 2739 "sql.c" break; case 35: /* cmd ::= DROP DNODE ids */ -#line 153 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &yymsp[0].minor.yy0); } -#line 2744 "sql.c" break; case 36: /* cmd ::= DROP USER ids */ -#line 154 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_DROP_USER, 1, &yymsp[0].minor.yy0); } -#line 2749 "sql.c" break; case 37: /* cmd ::= DROP ACCOUNT ids */ -#line 155 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &yymsp[0].minor.yy0); } -#line 2754 "sql.c" break; case 38: /* cmd ::= USE ids */ -#line 158 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_USE_DB, 1, &yymsp[0].minor.yy0);} -#line 2759 "sql.c" break; case 39: /* cmd ::= DESCRIBE ids cpxName */ -#line 161 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0); } -#line 2767 "sql.c" break; case 40: /* cmd ::= ALTER USER ids PASS ids */ -#line 167 "sql.y" { setAlterUserSql(pInfo, TSDB_ALTER_USER_PASSWD, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, NULL); } -#line 2772 "sql.c" break; case 41: /* cmd ::= ALTER USER ids PRIVILEGE ids */ -#line 168 "sql.y" { setAlterUserSql(pInfo, TSDB_ALTER_USER_PRIVILEGES, &yymsp[-2].minor.yy0, NULL, &yymsp[0].minor.yy0);} -#line 2777 "sql.c" break; case 42: /* cmd ::= ALTER DNODE ids ids */ -#line 169 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } -#line 2782 "sql.c" break; case 43: /* cmd ::= ALTER DNODE ids ids ids */ -#line 170 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } -#line 2787 "sql.c" break; case 44: /* cmd ::= ALTER LOCAL ids */ -#line 171 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &yymsp[0].minor.yy0); } -#line 2792 "sql.c" break; case 45: /* cmd ::= ALTER LOCAL ids ids */ -#line 172 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } -#line 2797 "sql.c" break; case 46: /* cmd ::= ALTER DATABASE ids alter_db_optr */ case 47: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==47); -#line 173 "sql.y" { SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy42, &t);} -#line 2803 "sql.c" break; case 48: /* cmd ::= ALTER ACCOUNT ids acct_optr */ -#line 176 "sql.y" { setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy341);} -#line 2808 "sql.c" break; case 49: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ -#line 177 "sql.y" { setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy341);} -#line 2813 "sql.c" break; case 50: /* cmd ::= COMPACT VNODES IN LP exprlist RP */ -#line 181 "sql.y" { setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, yymsp[-1].minor.yy131);} -#line 2818 "sql.c" break; case 51: /* ids ::= ID */ case 52: /* ids ::= STRING */ yytestcase(yyruleno==52); -#line 187 "sql.y" {yylhsminor.yy0 = yymsp[0].minor.yy0; } -#line 2824 "sql.c" yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 53: /* ifexists ::= IF EXISTS */ -#line 191 "sql.y" { yymsp[-1].minor.yy0.n = 1;} -#line 2830 "sql.c" break; case 54: /* ifexists ::= */ case 56: /* ifnotexists ::= */ yytestcase(yyruleno==56); case 180: /* distinct ::= */ yytestcase(yyruleno==180); -#line 192 "sql.y" { yymsp[1].minor.yy0.n = 0;} -#line 2837 "sql.c" break; case 55: /* ifnotexists ::= IF NOT EXISTS */ -#line 195 "sql.y" { yymsp[-2].minor.yy0.n = 1;} -#line 2842 "sql.c" break; case 57: /* cmd ::= CREATE DNODE ids */ -#line 200 "sql.y" { setDCLSqlElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} -#line 2847 "sql.c" break; case 58: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ -#line 202 "sql.y" { setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy341);} -#line 2852 "sql.c" break; case 59: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ case 60: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==60); -#line 203 "sql.y" { setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy42, &yymsp[-2].minor.yy0);} -#line 2858 "sql.c" break; case 61: /* cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ -#line 205 "sql.y" { setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy163, &yymsp[0].minor.yy0, 1);} -#line 2863 "sql.c" break; case 62: /* cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ -#line 206 "sql.y" { setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy163, &yymsp[0].minor.yy0, 2);} -#line 2868 "sql.c" break; case 63: /* cmd ::= CREATE USER ids PASS ids */ -#line 207 "sql.y" { setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} -#line 2873 "sql.c" break; case 64: /* bufsize ::= */ case 66: /* pps ::= */ yytestcase(yyruleno==66); @@ -2881,9 +2748,7 @@ static YYACTIONTYPE yy_reduce( case 78: /* users ::= */ yytestcase(yyruleno==78); case 80: /* conns ::= */ yytestcase(yyruleno==80); case 82: /* state ::= */ yytestcase(yyruleno==82); -#line 209 "sql.y" { yymsp[1].minor.yy0.n = 0; } -#line 2887 "sql.c" break; case 65: /* bufsize ::= BUFSIZE INTEGER */ case 67: /* pps ::= PPS INTEGER */ yytestcase(yyruleno==67); @@ -2895,12 +2760,9 @@ static YYACTIONTYPE yy_reduce( case 79: /* users ::= USERS INTEGER */ yytestcase(yyruleno==79); case 81: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==81); case 83: /* state ::= STATE ids */ yytestcase(yyruleno==83); -#line 210 "sql.y" { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } -#line 2901 "sql.c" break; case 84: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ -#line 240 "sql.y" { yylhsminor.yy341.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; yylhsminor.yy341.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; @@ -2912,21 +2774,16 @@ static YYACTIONTYPE yy_reduce( yylhsminor.yy341.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; yylhsminor.yy341.stat = yymsp[0].minor.yy0; } -#line 2916 "sql.c" yymsp[-8].minor.yy341 = yylhsminor.yy341; break; case 85: /* intitemlist ::= intitemlist COMMA intitem */ case 154: /* tagitemlist ::= tagitemlist COMMA tagitem */ yytestcase(yyruleno==154); -#line 256 "sql.y" { yylhsminor.yy131 = tVariantListAppend(yymsp[-2].minor.yy131, &yymsp[0].minor.yy516, -1); } -#line 2923 "sql.c" yymsp[-2].minor.yy131 = yylhsminor.yy131; break; case 86: /* intitemlist ::= intitem */ case 155: /* tagitemlist ::= tagitem */ yytestcase(yyruleno==155); -#line 257 "sql.y" { yylhsminor.yy131 = tVariantListAppend(NULL, &yymsp[0].minor.yy516, -1); } -#line 2930 "sql.c" yymsp[0].minor.yy131 = yylhsminor.yy131; break; case 87: /* intitem ::= INTEGER */ @@ -2934,15 +2791,11 @@ static YYACTIONTYPE yy_reduce( case 157: /* tagitem ::= FLOAT */ yytestcase(yyruleno==157); case 158: /* tagitem ::= STRING */ yytestcase(yyruleno==158); case 159: /* tagitem ::= BOOL */ yytestcase(yyruleno==159); -#line 259 "sql.y" { toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy516, &yymsp[0].minor.yy0); } -#line 2940 "sql.c" yymsp[0].minor.yy516 = yylhsminor.yy516; break; case 88: /* keep ::= KEEP intitemlist */ -#line 263 "sql.y" { yymsp[-1].minor.yy131 = yymsp[0].minor.yy131; } -#line 2946 "sql.c" break; case 89: /* cache ::= CACHE INTEGER */ case 90: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==90); @@ -2959,142 +2812,99 @@ static YYACTIONTYPE yy_reduce( case 101: /* update ::= UPDATE INTEGER */ yytestcase(yyruleno==101); case 102: /* cachelast ::= CACHELAST INTEGER */ yytestcase(yyruleno==102); case 103: /* partitions ::= PARTITIONS INTEGER */ yytestcase(yyruleno==103); -#line 265 "sql.y" { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } -#line 2965 "sql.c" break; case 104: /* db_optr ::= */ -#line 282 "sql.y" {setDefaultCreateDbOption(&yymsp[1].minor.yy42); yymsp[1].minor.yy42.dbType = TSDB_DB_TYPE_DEFAULT;} -#line 2970 "sql.c" break; case 105: /* db_optr ::= db_optr cache */ -#line 284 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 2975 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 106: /* db_optr ::= db_optr replica */ case 123: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==123); -#line 285 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 2982 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 107: /* db_optr ::= db_optr quorum */ case 124: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==124); -#line 286 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 2989 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 108: /* db_optr ::= db_optr days */ -#line 287 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 2995 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 109: /* db_optr ::= db_optr minrows */ -#line 288 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } -#line 3001 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 110: /* db_optr ::= db_optr maxrows */ -#line 289 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } -#line 3007 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 111: /* db_optr ::= db_optr blocks */ case 126: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==126); -#line 290 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3014 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 112: /* db_optr ::= db_optr ctime */ -#line 291 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3020 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 113: /* db_optr ::= db_optr wal */ -#line 292 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3026 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 114: /* db_optr ::= db_optr fsync */ -#line 293 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3032 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 115: /* db_optr ::= db_optr comp */ case 127: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==127); -#line 294 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3039 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 116: /* db_optr ::= db_optr prec */ -#line 295 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.precision = yymsp[0].minor.yy0; } -#line 3045 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 117: /* db_optr ::= db_optr keep */ case 125: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==125); -#line 296 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.keep = yymsp[0].minor.yy131; } -#line 3052 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 118: /* db_optr ::= db_optr update */ case 128: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==128); -#line 297 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3059 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 119: /* db_optr ::= db_optr cachelast */ case 129: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==129); -#line 298 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3066 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 120: /* topic_optr ::= db_optr */ case 130: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==130); -#line 302 "sql.y" { yylhsminor.yy42 = yymsp[0].minor.yy42; yylhsminor.yy42.dbType = TSDB_DB_TYPE_TOPIC; } -#line 3073 "sql.c" yymsp[0].minor.yy42 = yylhsminor.yy42; break; case 121: /* topic_optr ::= topic_optr partitions */ case 131: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==131); -#line 303 "sql.y" { yylhsminor.yy42 = yymsp[-1].minor.yy42; yylhsminor.yy42.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3080 "sql.c" yymsp[-1].minor.yy42 = yylhsminor.yy42; break; case 122: /* alter_db_optr ::= */ -#line 306 "sql.y" { setDefaultCreateDbOption(&yymsp[1].minor.yy42); yymsp[1].minor.yy42.dbType = TSDB_DB_TYPE_DEFAULT;} -#line 3086 "sql.c" break; case 132: /* typename ::= ids */ -#line 326 "sql.y" { yymsp[0].minor.yy0.type = 0; tSetColumnType (&yylhsminor.yy163, &yymsp[0].minor.yy0); } -#line 3094 "sql.c" yymsp[0].minor.yy163 = yylhsminor.yy163; break; case 133: /* typename ::= ids LP signed RP */ -#line 332 "sql.y" { if (yymsp[-1].minor.yy459 <= 0) { yymsp[-3].minor.yy0.type = 0; @@ -3104,42 +2914,30 @@ static YYACTIONTYPE yy_reduce( tSetColumnType(&yylhsminor.yy163, &yymsp[-3].minor.yy0); } } -#line 3108 "sql.c" yymsp[-3].minor.yy163 = yylhsminor.yy163; break; case 134: /* typename ::= ids UNSIGNED */ -#line 343 "sql.y" { yymsp[-1].minor.yy0.type = 0; yymsp[-1].minor.yy0.n = ((yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z); tSetColumnType (&yylhsminor.yy163, &yymsp[-1].minor.yy0); } -#line 3118 "sql.c" yymsp[-1].minor.yy163 = yylhsminor.yy163; break; case 135: /* signed ::= INTEGER */ -#line 350 "sql.y" { yylhsminor.yy459 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3124 "sql.c" yymsp[0].minor.yy459 = yylhsminor.yy459; break; case 136: /* signed ::= PLUS INTEGER */ -#line 351 "sql.y" { yymsp[-1].minor.yy459 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } -#line 3130 "sql.c" break; case 137: /* signed ::= MINUS INTEGER */ -#line 352 "sql.y" { yymsp[-1].minor.yy459 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} -#line 3135 "sql.c" break; case 141: /* cmd ::= CREATE TABLE create_table_list */ -#line 358 "sql.y" { pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy272;} -#line 3140 "sql.c" break; case 142: /* create_table_list ::= create_from_stable */ -#line 362 "sql.y" { SCreateTableSql* pCreateTable = calloc(1, sizeof(SCreateTableSql)); pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo)); @@ -3148,20 +2946,16 @@ static YYACTIONTYPE yy_reduce( pCreateTable->type = TSQL_CREATE_TABLE_FROM_STABLE; yylhsminor.yy272 = pCreateTable; } -#line 3152 "sql.c" yymsp[0].minor.yy272 = yylhsminor.yy272; break; case 143: /* create_table_list ::= create_table_list create_from_stable */ -#line 371 "sql.y" { taosArrayPush(yymsp[-1].minor.yy272->childTableInfo, &yymsp[0].minor.yy96); yylhsminor.yy272 = yymsp[-1].minor.yy272; } -#line 3161 "sql.c" yymsp[-1].minor.yy272 = yylhsminor.yy272; break; case 144: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ -#line 377 "sql.y" { yylhsminor.yy272 = tSetCreateTableInfo(yymsp[-1].minor.yy131, NULL, NULL, TSQL_CREATE_TABLE); setSqlInfo(pInfo, yylhsminor.yy272, NULL, TSDB_SQL_CREATE_TABLE); @@ -3169,11 +2963,9 @@ static YYACTIONTYPE yy_reduce( yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-4].minor.yy0, &yymsp[-5].minor.yy0); } -#line 3173 "sql.c" yymsp[-5].minor.yy272 = yylhsminor.yy272; break; case 145: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ -#line 387 "sql.y" { yylhsminor.yy272 = tSetCreateTableInfo(yymsp[-5].minor.yy131, yymsp[-1].minor.yy131, NULL, TSQL_CREATE_STABLE); setSqlInfo(pInfo, yylhsminor.yy272, NULL, TSDB_SQL_CREATE_TABLE); @@ -3181,43 +2973,33 @@ static YYACTIONTYPE yy_reduce( yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } -#line 3185 "sql.c" yymsp[-9].minor.yy272 = yylhsminor.yy272; break; case 146: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ -#line 398 "sql.y" { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; yylhsminor.yy96 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy131, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } -#line 3195 "sql.c" yymsp[-9].minor.yy96 = yylhsminor.yy96; break; case 147: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ -#line 404 "sql.y" { yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n; yylhsminor.yy96 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy131, yymsp[-1].minor.yy131, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); } -#line 3205 "sql.c" yymsp[-12].minor.yy96 = yylhsminor.yy96; break; case 148: /* tagNamelist ::= tagNamelist COMMA ids */ -#line 412 "sql.y" {taosArrayPush(yymsp[-2].minor.yy131, &yymsp[0].minor.yy0); yylhsminor.yy131 = yymsp[-2].minor.yy131; } -#line 3211 "sql.c" yymsp[-2].minor.yy131 = yylhsminor.yy131; break; case 149: /* tagNamelist ::= ids */ -#line 413 "sql.y" {yylhsminor.yy131 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy131, &yymsp[0].minor.yy0);} -#line 3217 "sql.c" yymsp[0].minor.yy131 = yylhsminor.yy131; break; case 150: /* create_table_args ::= ifnotexists ids cpxName AS select */ -#line 417 "sql.y" { yylhsminor.yy272 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy256, TSQL_CREATE_STREAM); setSqlInfo(pInfo, yylhsminor.yy272, NULL, TSDB_SQL_CREATE_TABLE); @@ -3225,254 +3007,178 @@ static YYACTIONTYPE yy_reduce( yymsp[-3].minor.yy0.n += yymsp[-2].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-3].minor.yy0, &yymsp[-4].minor.yy0); } -#line 3229 "sql.c" yymsp[-4].minor.yy272 = yylhsminor.yy272; break; case 151: /* columnlist ::= columnlist COMMA column */ -#line 428 "sql.y" {taosArrayPush(yymsp[-2].minor.yy131, &yymsp[0].minor.yy163); yylhsminor.yy131 = yymsp[-2].minor.yy131; } -#line 3235 "sql.c" yymsp[-2].minor.yy131 = yylhsminor.yy131; break; case 152: /* columnlist ::= column */ -#line 429 "sql.y" {yylhsminor.yy131 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy131, &yymsp[0].minor.yy163);} -#line 3241 "sql.c" yymsp[0].minor.yy131 = yylhsminor.yy131; break; case 153: /* column ::= ids typename */ -#line 433 "sql.y" { tSetColumnInfo(&yylhsminor.yy163, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy163); } -#line 3249 "sql.c" yymsp[-1].minor.yy163 = yylhsminor.yy163; break; case 160: /* tagitem ::= NULL */ -#line 448 "sql.y" { yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy516, &yymsp[0].minor.yy0); } -#line 3255 "sql.c" yymsp[0].minor.yy516 = yylhsminor.yy516; break; case 161: /* tagitem ::= NOW */ -#line 449 "sql.y" { yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&yylhsminor.yy516, &yymsp[0].minor.yy0);} -#line 3261 "sql.c" yymsp[0].minor.yy516 = yylhsminor.yy516; break; case 162: /* tagitem ::= MINUS INTEGER */ case 163: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==163); case 164: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==164); case 165: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==165); -#line 451 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; toTSDBType(yymsp[-1].minor.yy0.type); tVariantCreate(&yylhsminor.yy516, &yymsp[-1].minor.yy0); } -#line 3275 "sql.c" yymsp[-1].minor.yy516 = yylhsminor.yy516; break; case 166: /* select ::= SELECT selcollist from where_opt interval_opt sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ -#line 482 "sql.y" { yylhsminor.yy256 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy131, yymsp[-11].minor.yy544, yymsp[-10].minor.yy46, yymsp[-4].minor.yy131, yymsp[-2].minor.yy131, &yymsp[-9].minor.yy530, &yymsp[-7].minor.yy39, &yymsp[-6].minor.yy538, &yymsp[-8].minor.yy0, yymsp[-5].minor.yy131, &yymsp[0].minor.yy284, &yymsp[-1].minor.yy284, yymsp[-3].minor.yy46); } -#line 3283 "sql.c" yymsp[-13].minor.yy256 = yylhsminor.yy256; break; case 167: /* select ::= LP select RP */ -#line 486 "sql.y" {yymsp[-2].minor.yy256 = yymsp[-1].minor.yy256;} -#line 3289 "sql.c" break; case 168: /* union ::= select */ -#line 490 "sql.y" { yylhsminor.yy131 = setSubclause(NULL, yymsp[0].minor.yy256); } -#line 3294 "sql.c" yymsp[0].minor.yy131 = yylhsminor.yy131; break; case 169: /* union ::= union UNION ALL select */ -#line 491 "sql.y" { yylhsminor.yy131 = appendSelectClause(yymsp[-3].minor.yy131, yymsp[0].minor.yy256); } -#line 3300 "sql.c" yymsp[-3].minor.yy131 = yylhsminor.yy131; break; case 170: /* cmd ::= union */ -#line 493 "sql.y" { setSqlInfo(pInfo, yymsp[0].minor.yy131, NULL, TSDB_SQL_SELECT); } -#line 3306 "sql.c" break; case 171: /* select ::= SELECT selcollist */ -#line 500 "sql.y" { yylhsminor.yy256 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy131, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } -#line 3313 "sql.c" yymsp[-1].minor.yy256 = yylhsminor.yy256; break; case 172: /* sclp ::= selcollist COMMA */ -#line 512 "sql.y" {yylhsminor.yy131 = yymsp[-1].minor.yy131;} -#line 3319 "sql.c" yymsp[-1].minor.yy131 = yylhsminor.yy131; break; case 173: /* sclp ::= */ case 203: /* orderby_opt ::= */ yytestcase(yyruleno==203); -#line 513 "sql.y" {yymsp[1].minor.yy131 = 0;} -#line 3326 "sql.c" break; case 174: /* selcollist ::= sclp distinct expr as */ -#line 514 "sql.y" { yylhsminor.yy131 = tSqlExprListAppend(yymsp[-3].minor.yy131, yymsp[-1].minor.yy46, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } -#line 3333 "sql.c" yymsp[-3].minor.yy131 = yylhsminor.yy131; break; case 175: /* selcollist ::= sclp STAR */ -#line 518 "sql.y" { tSqlExpr *pNode = tSqlExprCreateIdValue(NULL, TK_ALL); yylhsminor.yy131 = tSqlExprListAppend(yymsp[-1].minor.yy131, pNode, 0, 0); } -#line 3342 "sql.c" yymsp[-1].minor.yy131 = yylhsminor.yy131; break; case 176: /* as ::= AS ids */ -#line 526 "sql.y" { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } -#line 3348 "sql.c" break; case 177: /* as ::= ids */ -#line 527 "sql.y" { yylhsminor.yy0 = yymsp[0].minor.yy0; } -#line 3353 "sql.c" yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 178: /* as ::= */ -#line 528 "sql.y" { yymsp[1].minor.yy0.n = 0; } -#line 3359 "sql.c" break; case 179: /* distinct ::= DISTINCT */ -#line 531 "sql.y" { yylhsminor.yy0 = yymsp[0].minor.yy0; } -#line 3364 "sql.c" yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 181: /* from ::= FROM tablelist */ case 182: /* from ::= FROM sub */ yytestcase(yyruleno==182); -#line 537 "sql.y" {yymsp[-1].minor.yy544 = yymsp[0].minor.yy544;} -#line 3371 "sql.c" break; case 183: /* sub ::= LP union RP */ -#line 542 "sql.y" {yymsp[-2].minor.yy544 = addSubqueryElem(NULL, yymsp[-1].minor.yy131, NULL);} -#line 3376 "sql.c" break; case 184: /* sub ::= LP union RP ids */ -#line 543 "sql.y" {yymsp[-3].minor.yy544 = addSubqueryElem(NULL, yymsp[-2].minor.yy131, &yymsp[0].minor.yy0);} -#line 3381 "sql.c" break; case 185: /* sub ::= sub COMMA LP union RP ids */ -#line 544 "sql.y" {yylhsminor.yy544 = addSubqueryElem(yymsp[-5].minor.yy544, yymsp[-2].minor.yy131, &yymsp[0].minor.yy0);} -#line 3386 "sql.c" yymsp[-5].minor.yy544 = yylhsminor.yy544; break; case 186: /* tablelist ::= ids cpxName */ -#line 548 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy544 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); } -#line 3395 "sql.c" yymsp[-1].minor.yy544 = yylhsminor.yy544; break; case 187: /* tablelist ::= ids cpxName ids */ -#line 553 "sql.y" { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; yylhsminor.yy544 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } -#line 3404 "sql.c" yymsp[-2].minor.yy544 = yylhsminor.yy544; break; case 188: /* tablelist ::= tablelist COMMA ids cpxName */ -#line 558 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yylhsminor.yy544 = setTableNameList(yymsp[-3].minor.yy544, &yymsp[-1].minor.yy0, NULL); } -#line 3413 "sql.c" yymsp[-3].minor.yy544 = yylhsminor.yy544; break; case 189: /* tablelist ::= tablelist COMMA ids cpxName ids */ -#line 563 "sql.y" { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; yylhsminor.yy544 = setTableNameList(yymsp[-4].minor.yy544, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } -#line 3422 "sql.c" yymsp[-4].minor.yy544 = yylhsminor.yy544; break; case 190: /* tmvar ::= VARIABLE */ -#line 570 "sql.y" {yylhsminor.yy0 = yymsp[0].minor.yy0;} -#line 3428 "sql.c" yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 191: /* interval_opt ::= INTERVAL LP tmvar RP */ -#line 573 "sql.y" {yymsp[-3].minor.yy530.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy530.offset.n = 0;} -#line 3434 "sql.c" break; case 192: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ -#line 574 "sql.y" {yymsp[-5].minor.yy530.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy530.offset = yymsp[-1].minor.yy0;} -#line 3439 "sql.c" break; case 193: /* interval_opt ::= */ -#line 575 "sql.y" {memset(&yymsp[1].minor.yy530, 0, sizeof(yymsp[1].minor.yy530));} -#line 3444 "sql.c" break; case 194: /* session_option ::= */ -#line 578 "sql.y" {yymsp[1].minor.yy39.col.n = 0; yymsp[1].minor.yy39.gap.n = 0;} -#line 3449 "sql.c" break; case 195: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ -#line 579 "sql.y" { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; yymsp[-6].minor.yy39.col = yymsp[-4].minor.yy0; yymsp[-6].minor.yy39.gap = yymsp[-1].minor.yy0; } -#line 3458 "sql.c" break; case 196: /* windowstate_option ::= */ -#line 585 "sql.y" { yymsp[1].minor.yy538.col.n = 0; yymsp[1].minor.yy538.col.z = NULL;} -#line 3463 "sql.c" break; case 197: /* windowstate_option ::= STATE_WINDOW LP ids RP */ -#line 586 "sql.y" { yymsp[-3].minor.yy538.col = yymsp[-1].minor.yy0; } -#line 3468 "sql.c" break; case 198: /* fill_opt ::= */ -#line 590 "sql.y" { yymsp[1].minor.yy131 = 0; } -#line 3473 "sql.c" break; case 199: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ -#line 591 "sql.y" { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); @@ -3481,390 +3187,268 @@ static YYACTIONTYPE yy_reduce( tVariantListInsert(yymsp[-1].minor.yy131, &A, -1, 0); yymsp[-5].minor.yy131 = yymsp[-1].minor.yy131; } -#line 3485 "sql.c" break; case 200: /* fill_opt ::= FILL LP ID RP */ -#line 600 "sql.y" { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-3].minor.yy131 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } -#line 3493 "sql.c" break; case 201: /* sliding_opt ::= SLIDING LP tmvar RP */ -#line 606 "sql.y" {yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } -#line 3498 "sql.c" break; case 202: /* sliding_opt ::= */ -#line 607 "sql.y" {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } -#line 3503 "sql.c" break; case 204: /* orderby_opt ::= ORDER BY sortlist */ -#line 619 "sql.y" {yymsp[-2].minor.yy131 = yymsp[0].minor.yy131;} -#line 3508 "sql.c" break; case 205: /* sortlist ::= sortlist COMMA item sortorder */ -#line 621 "sql.y" { yylhsminor.yy131 = tVariantListAppend(yymsp[-3].minor.yy131, &yymsp[-1].minor.yy516, yymsp[0].minor.yy43); } -#line 3515 "sql.c" yymsp[-3].minor.yy131 = yylhsminor.yy131; break; case 206: /* sortlist ::= item sortorder */ -#line 625 "sql.y" { yylhsminor.yy131 = tVariantListAppend(NULL, &yymsp[-1].minor.yy516, yymsp[0].minor.yy43); } -#line 3523 "sql.c" yymsp[-1].minor.yy131 = yylhsminor.yy131; break; case 207: /* item ::= ids cpxName */ -#line 630 "sql.y" { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; tVariantCreate(&yylhsminor.yy516, &yymsp[-1].minor.yy0); } -#line 3534 "sql.c" yymsp[-1].minor.yy516 = yylhsminor.yy516; break; case 208: /* sortorder ::= ASC */ -#line 638 "sql.y" { yymsp[0].minor.yy43 = TSDB_ORDER_ASC; } -#line 3540 "sql.c" break; case 209: /* sortorder ::= DESC */ -#line 639 "sql.y" { yymsp[0].minor.yy43 = TSDB_ORDER_DESC;} -#line 3545 "sql.c" break; case 210: /* sortorder ::= */ -#line 640 "sql.y" { yymsp[1].minor.yy43 = TSDB_ORDER_ASC; } -#line 3550 "sql.c" break; case 211: /* groupby_opt ::= */ -#line 648 "sql.y" { yymsp[1].minor.yy131 = 0;} -#line 3555 "sql.c" break; case 212: /* groupby_opt ::= GROUP BY grouplist */ -#line 649 "sql.y" { yymsp[-2].minor.yy131 = yymsp[0].minor.yy131;} -#line 3560 "sql.c" break; case 213: /* grouplist ::= grouplist COMMA item */ -#line 651 "sql.y" { yylhsminor.yy131 = tVariantListAppend(yymsp[-2].minor.yy131, &yymsp[0].minor.yy516, -1); } -#line 3567 "sql.c" yymsp[-2].minor.yy131 = yylhsminor.yy131; break; case 214: /* grouplist ::= item */ -#line 655 "sql.y" { yylhsminor.yy131 = tVariantListAppend(NULL, &yymsp[0].minor.yy516, -1); } -#line 3575 "sql.c" yymsp[0].minor.yy131 = yylhsminor.yy131; break; case 215: /* having_opt ::= */ case 225: /* where_opt ::= */ yytestcase(yyruleno==225); - case 267: /* expritem ::= */ yytestcase(yyruleno==267); -#line 662 "sql.y" + case 268: /* expritem ::= */ yytestcase(yyruleno==268); {yymsp[1].minor.yy46 = 0;} -#line 3583 "sql.c" break; case 216: /* having_opt ::= HAVING expr */ case 226: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==226); -#line 663 "sql.y" {yymsp[-1].minor.yy46 = yymsp[0].minor.yy46;} -#line 3589 "sql.c" break; case 217: /* limit_opt ::= */ case 221: /* slimit_opt ::= */ yytestcase(yyruleno==221); -#line 667 "sql.y" {yymsp[1].minor.yy284.limit = -1; yymsp[1].minor.yy284.offset = 0;} -#line 3595 "sql.c" break; case 218: /* limit_opt ::= LIMIT signed */ case 222: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==222); -#line 668 "sql.y" {yymsp[-1].minor.yy284.limit = yymsp[0].minor.yy459; yymsp[-1].minor.yy284.offset = 0;} -#line 3601 "sql.c" break; case 219: /* limit_opt ::= LIMIT signed OFFSET signed */ -#line 670 "sql.y" { yymsp[-3].minor.yy284.limit = yymsp[-2].minor.yy459; yymsp[-3].minor.yy284.offset = yymsp[0].minor.yy459;} -#line 3606 "sql.c" break; case 220: /* limit_opt ::= LIMIT signed COMMA signed */ -#line 672 "sql.y" { yymsp[-3].minor.yy284.limit = yymsp[0].minor.yy459; yymsp[-3].minor.yy284.offset = yymsp[-2].minor.yy459;} -#line 3611 "sql.c" break; case 223: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ -#line 678 "sql.y" {yymsp[-3].minor.yy284.limit = yymsp[-2].minor.yy459; yymsp[-3].minor.yy284.offset = yymsp[0].minor.yy459;} -#line 3616 "sql.c" break; case 224: /* slimit_opt ::= SLIMIT signed COMMA signed */ -#line 680 "sql.y" {yymsp[-3].minor.yy284.limit = yymsp[0].minor.yy459; yymsp[-3].minor.yy284.offset = yymsp[-2].minor.yy459;} -#line 3621 "sql.c" break; case 227: /* expr ::= LP expr RP */ -#line 693 "sql.y" {yylhsminor.yy46 = yymsp[-1].minor.yy46; yylhsminor.yy46->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy46->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} -#line 3626 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 228: /* expr ::= ID */ -#line 695 "sql.y" { yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);} -#line 3632 "sql.c" yymsp[0].minor.yy46 = yylhsminor.yy46; break; case 229: /* expr ::= ID DOT ID */ -#line 696 "sql.y" { yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);} -#line 3638 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 230: /* expr ::= ID DOT STAR */ -#line 697 "sql.y" { yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);} -#line 3644 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 231: /* expr ::= INTEGER */ -#line 699 "sql.y" { yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);} -#line 3650 "sql.c" yymsp[0].minor.yy46 = yylhsminor.yy46; break; case 232: /* expr ::= MINUS INTEGER */ case 233: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==233); -#line 700 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);} -#line 3657 "sql.c" yymsp[-1].minor.yy46 = yylhsminor.yy46; break; case 234: /* expr ::= FLOAT */ -#line 702 "sql.y" { yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);} -#line 3663 "sql.c" yymsp[0].minor.yy46 = yylhsminor.yy46; break; case 235: /* expr ::= MINUS FLOAT */ case 236: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==236); -#line 703 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);} -#line 3670 "sql.c" yymsp[-1].minor.yy46 = yylhsminor.yy46; break; case 237: /* expr ::= STRING */ -#line 705 "sql.y" { yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);} -#line 3676 "sql.c" yymsp[0].minor.yy46 = yylhsminor.yy46; break; case 238: /* expr ::= NOW */ -#line 706 "sql.y" { yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); } -#line 3682 "sql.c" yymsp[0].minor.yy46 = yylhsminor.yy46; break; case 239: /* expr ::= VARIABLE */ -#line 707 "sql.y" { yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);} -#line 3688 "sql.c" yymsp[0].minor.yy46 = yylhsminor.yy46; break; case 240: /* expr ::= PLUS VARIABLE */ case 241: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==241); -#line 708 "sql.y" { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_VARIABLE);} -#line 3695 "sql.c" yymsp[-1].minor.yy46 = yylhsminor.yy46; break; case 242: /* expr ::= BOOL */ -#line 710 "sql.y" { yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);} -#line 3701 "sql.c" yymsp[0].minor.yy46 = yylhsminor.yy46; break; case 243: /* expr ::= NULL */ -#line 711 "sql.y" { yylhsminor.yy46 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);} -#line 3707 "sql.c" yymsp[0].minor.yy46 = yylhsminor.yy46; break; case 244: /* expr ::= ID LP exprlist RP */ -#line 714 "sql.y" { tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy46 = tSqlExprCreateFunction(yymsp[-1].minor.yy131, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } -#line 3713 "sql.c" yymsp[-3].minor.yy46 = yylhsminor.yy46; break; case 245: /* expr ::= ID LP STAR RP */ -#line 717 "sql.y" { tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy46 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } -#line 3719 "sql.c" yymsp[-3].minor.yy46 = yylhsminor.yy46; break; case 246: /* expr ::= expr IS NULL */ -#line 720 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, NULL, TK_ISNULL);} -#line 3725 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 247: /* expr ::= expr IS NOT NULL */ -#line 721 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-3].minor.yy46, NULL, TK_NOTNULL);} -#line 3731 "sql.c" yymsp[-3].minor.yy46 = yylhsminor.yy46; break; case 248: /* expr ::= expr LT expr */ -#line 724 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_LT);} -#line 3737 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 249: /* expr ::= expr GT expr */ -#line 725 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_GT);} -#line 3743 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 250: /* expr ::= expr LE expr */ -#line 726 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_LE);} -#line 3749 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 251: /* expr ::= expr GE expr */ -#line 727 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_GE);} -#line 3755 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 252: /* expr ::= expr NE expr */ -#line 728 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_NE);} -#line 3761 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 253: /* expr ::= expr EQ expr */ -#line 729 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_EQ);} -#line 3767 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 254: /* expr ::= expr BETWEEN expr AND expr */ -#line 731 "sql.y" { tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy46); yylhsminor.yy46 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy46, yymsp[-2].minor.yy46, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy46, TK_LE), TK_AND);} -#line 3773 "sql.c" yymsp[-4].minor.yy46 = yylhsminor.yy46; break; case 255: /* expr ::= expr AND expr */ -#line 733 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_AND);} -#line 3779 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 256: /* expr ::= expr OR expr */ -#line 734 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_OR); } -#line 3785 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 257: /* expr ::= expr PLUS expr */ -#line 737 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_PLUS); } -#line 3791 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 258: /* expr ::= expr MINUS expr */ -#line 738 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_MINUS); } -#line 3797 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 259: /* expr ::= expr STAR expr */ -#line 739 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_STAR); } -#line 3803 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 260: /* expr ::= expr SLASH expr */ -#line 740 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_DIVIDE);} -#line 3809 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 261: /* expr ::= expr REM expr */ -#line 741 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_REM); } -#line 3815 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; case 262: /* expr ::= expr LIKE expr */ -#line 744 "sql.y" {yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_LIKE); } -#line 3821 "sql.c" yymsp[-2].minor.yy46 = yylhsminor.yy46; break; - case 263: /* expr ::= expr IN LP exprlist RP */ -#line 747 "sql.y" + case 263: /* expr ::= expr MATCH expr */ +{yylhsminor.yy46 = tSqlExprCreate(yymsp[-2].minor.yy46, yymsp[0].minor.yy46, TK_MATCH); } + yymsp[-2].minor.yy46 = yylhsminor.yy46; + break; + case 264: /* expr ::= expr IN LP exprlist RP */ {yylhsminor.yy46 = tSqlExprCreate(yymsp[-4].minor.yy46, (tSqlExpr*)yymsp[-1].minor.yy131, TK_IN); } -#line 3827 "sql.c" yymsp[-4].minor.yy46 = yylhsminor.yy46; break; - case 264: /* exprlist ::= exprlist COMMA expritem */ -#line 755 "sql.y" + case 265: /* exprlist ::= exprlist COMMA expritem */ {yylhsminor.yy131 = tSqlExprListAppend(yymsp[-2].minor.yy131,yymsp[0].minor.yy46,0, 0);} -#line 3833 "sql.c" yymsp[-2].minor.yy131 = yylhsminor.yy131; break; - case 265: /* exprlist ::= expritem */ -#line 756 "sql.y" + case 266: /* exprlist ::= expritem */ {yylhsminor.yy131 = tSqlExprListAppend(0,yymsp[0].minor.yy46,0, 0);} -#line 3839 "sql.c" yymsp[0].minor.yy131 = yylhsminor.yy131; break; - case 266: /* expritem ::= expr */ -#line 757 "sql.y" + case 267: /* expritem ::= expr */ {yylhsminor.yy46 = yymsp[0].minor.yy46;} -#line 3845 "sql.c" yymsp[0].minor.yy46 = yylhsminor.yy46; break; - case 268: /* cmd ::= RESET QUERY CACHE */ -#line 761 "sql.y" + case 269: /* cmd ::= RESET QUERY CACHE */ { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} -#line 3851 "sql.c" break; - case 269: /* cmd ::= SYNCDB ids REPLICA */ -#line 764 "sql.y" + case 270: /* cmd ::= SYNCDB ids REPLICA */ { setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &yymsp[-1].minor.yy0);} -#line 3856 "sql.c" break; - case 270: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ -#line 767 "sql.y" + case 271: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 3865 "sql.c" break; - case 271: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ -#line 773 "sql.y" + case 272: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3874,28 +3458,22 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 3878 "sql.c" break; - case 272: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ -#line 783 "sql.y" + case 273: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 3887 "sql.c" break; - case 273: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ -#line 790 "sql.y" + case 274: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 3896 "sql.c" break; - case 274: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ -#line 795 "sql.y" + case 275: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3905,10 +3483,8 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 3909 "sql.c" break; - case 275: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ -#line 805 "sql.y" + case 276: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3921,10 +3497,8 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 3925 "sql.c" break; - case 276: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ -#line 818 "sql.y" + case 277: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; @@ -3935,28 +3509,22 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 3939 "sql.c" break; - case 277: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ -#line 829 "sql.y" + case 278: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 3948 "sql.c" break; - case 278: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ -#line 836 "sql.y" + case 279: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 3957 "sql.c" break; - case 279: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ -#line 842 "sql.y" + case 280: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3966,28 +3534,22 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 3970 "sql.c" break; - case 280: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ -#line 852 "sql.y" + case 281: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 3979 "sql.c" break; - case 281: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ -#line 859 "sql.y" + case 282: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 3988 "sql.c" break; - case 282: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ -#line 864 "sql.y" + case 283: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3997,10 +3559,8 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4001 "sql.c" break; - case 283: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ -#line 874 "sql.y" + case 284: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -4013,10 +3573,8 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4017 "sql.c" break; - case 284: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ -#line 887 "sql.y" + case 285: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; @@ -4027,31 +3585,22 @@ static YYACTIONTYPE yy_reduce( SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4031 "sql.c" break; - case 285: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ -#line 898 "sql.y" + case 286: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -#line 4040 "sql.c" break; - case 286: /* cmd ::= KILL CONNECTION INTEGER */ -#line 905 "sql.y" + case 287: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} -#line 4045 "sql.c" break; - case 287: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ -#line 906 "sql.y" + case 288: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} -#line 4050 "sql.c" break; - case 288: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ -#line 907 "sql.y" + case 289: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} -#line 4055 "sql.c" break; default: break; @@ -4113,7 +3662,6 @@ static void yy_syntax_error( ParseCTX_FETCH #define TOKEN yyminor /************ Begin %syntax_error code ****************************************/ -#line 37 "sql.y" pInfo->valid = false; int32_t outputBufLen = tListLen(pInfo->msg); @@ -4136,7 +3684,6 @@ static void yy_syntax_error( } assert(len <= outputBufLen); -#line 4140 "sql.c" /************ End %syntax_error code ******************************************/ ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ ParseCTX_STORE @@ -4162,8 +3709,7 @@ static void yy_accept( /* Here code is inserted which will be executed whenever the ** parser accepts */ /*********** Begin %parse_accept code *****************************************/ -#line 61 "sql.y" -#line 4167 "sql.c" + /*********** End %parse_accept code *******************************************/ ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ ParseCTX_STORE diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 9cc9b7224c..bd3b43c1aa 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -3708,6 +3708,9 @@ static bool tableFilterFp(const void* pNode, void* param) { case TSDB_RELATION_LIKE: { return ret == 0; } + case TSDB_RELATION_MATCH: { + return ret == 0; + } case TSDB_RELATION_IN: { return ret == 1; } @@ -4041,6 +4044,8 @@ static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) { pCond->start->v = queryColInfo->q; } else if (optr == TSDB_RELATION_LIKE) { assert(0); + } else if (optr == TSDB_RELATION_MATCH) { + assert(0); } return TSDB_CODE_SUCCESS; @@ -4198,7 +4203,7 @@ static void queryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, S if (pQueryInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { if (pQueryInfo->optr == TSDB_RELATION_IN) { addToResult = pQueryInfo->compare(name, pQueryInfo->q); - } else if (pQueryInfo->optr == TSDB_RELATION_LIKE) { + } else if (pQueryInfo->optr == TSDB_RELATION_LIKE || pQueryInfo->optr == TSDB_RELATION_MATCH) { addToResult = !pQueryInfo->compare(name, pQueryInfo->q); } } else { @@ -4230,7 +4235,8 @@ void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *re param->setupInfoFn(pExpr, param->pExtInfo); tQueryInfo *pQueryInfo = pExpr->_node.info; - if (pQueryInfo->indexed && (pQueryInfo->optr != TSDB_RELATION_LIKE && pQueryInfo->optr != TSDB_RELATION_IN)) { + if (pQueryInfo->indexed && (pQueryInfo->optr != TSDB_RELATION_LIKE && pQueryInfo->optr == TSDB_RELATION_MATCH + && pQueryInfo->optr != TSDB_RELATION_IN)) { queryIndexedColumn(pSkipList, pQueryInfo, result); } else { queryIndexlessColumn(pSkipList, pQueryInfo, result, param->nodeFilterFn); diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index d1760ab28c..7c2cbd4317 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -26,6 +26,7 @@ extern "C" { #define TSDB_PATTERN_NOMATCH 1 #define TSDB_PATTERN_NOWILDCARDMATCH 2 #define TSDB_PATTERN_STRING_MAX_LEN 100 +#define TSDB_REGEX_STRING_MAX_LEN 128 #define FLT_COMPAR_TOL_FACTOR 4 #define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON)) @@ -82,6 +83,7 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight); int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight); int32_t compareStrPatternComp(const void* pLeft, const void* pRight); +int32_t compareStrRegexComp(const void* pLeft, const void* pRight); int32_t compareFindItemInSet(const void *pLeft, const void* pRight); int32_t compareWStrPatternComp(const void* pLeft, const void* pRight); diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 36480418c9..956943c0b6 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -13,10 +13,11 @@ * along with this program. If not, see . */ +#include "tcompare.h" +#include +#include "hash.h" #include "os.h" #include "ttype.h" -#include "tcompare.h" -#include "hash.h" int32_t setCompareBytes1(const void *pLeft, const void *pRight) { return NULL != taosHashGet((SHashObj *)pRight, pLeft, 1) ? 1 : 0; @@ -343,6 +344,43 @@ int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } +int32_t compareStrRegexComp(const void* pLeft, const void* pRight) { + size_t sz = varDataLen(pRight); + char *pattern = malloc(sz + 1); + memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); + pattern[sz] = 0; + + sz = varDataLen(pLeft); + char *str = malloc(sz + 1); + memcpy(str, varDataVal(pLeft), sz); + str[sz] = 0; + + int errCode = 0; + regex_t regex; + char msgbuf[256] = {0}; + + int cflags = REG_EXTENDED | REG_ICASE; + if ((errCode = regcomp(®ex, pattern, cflags)) != 0) { + regerror(errCode, ®ex, msgbuf, sizeof(msgbuf)); + uError("Failed to compile regex pattern %s. reason %s", pattern, msgbuf); + regfree(®ex); + free(str); + free(pattern); + return 1; + } + + errCode = regexec(®ex, str, 0, NULL, 0); + if (errCode != 0 && errCode != REG_NOMATCH) { + regerror(errCode, ®ex, msgbuf, sizeof(msgbuf)); + uError("Failed to match %s with pattern %s, reason %s", str, pattern, msgbuf) + } + int32_t result = (errCode == 0) ? 0 : 1; + regfree(®ex); + free(str); + free(pattern); + return result; +} + int32_t taosArrayCompareString(const void* a, const void* b) { const char* x = *(const char**)a; const char* y = *(const char**)b; @@ -403,7 +441,9 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { case TSDB_DATA_TYPE_FLOAT: comparFn = compareFloatVal; break; case TSDB_DATA_TYPE_DOUBLE: comparFn = compareDoubleVal; break; case TSDB_DATA_TYPE_BINARY: { - if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */ + if (optr == TSDB_RELATION_MATCH) { + comparFn = compareStrRegexComp; + } else if (optr == TSDB_RELATION_LIKE) { /* wildcard query using like operator */ comparFn = compareStrPatternComp; } else if (optr == TSDB_RELATION_IN) { comparFn = compareFindItemInSet; @@ -415,7 +455,9 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { } case TSDB_DATA_TYPE_NCHAR: { - if (optr == TSDB_RELATION_LIKE) { + if (optr == TSDB_RELATION_MATCH) { + comparFn = compareStrRegexComp; + } else if (optr == TSDB_RELATION_LIKE) { comparFn = compareWStrPatternComp; } else if (optr == TSDB_RELATION_IN) { comparFn = compareFindItemInSet; diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index c4d05b2d5a..7b1134a7ee 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -53,6 +53,7 @@ static SKeyword keywordTable[] = { {"NOTNULL", TK_NOTNULL}, {"IS", TK_IS}, {"LIKE", TK_LIKE}, + {"MATCH", TK_MATCH}, {"GLOB", TK_GLOB}, {"BETWEEN", TK_BETWEEN}, {"IN", TK_IN}, From d7227bb5b41ee3b42ceb278ed97856e417b2598a Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 24 Aug 2021 08:50:15 +0800 Subject: [PATCH 02/71] fix windows compilation and test --- src/util/src/tcompare.c | 7 ++++++- tests/script/general/parser/where.sim | 10 +++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 956943c0b6..36ba95e309 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -12,10 +12,15 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ +#define _BSD_SOURCE +#define _GNU_SOURCE +#define _XOPEN_SOURCE +#define _DEFAULT_SOURCE #include "tcompare.h" -#include +#include "tulog.h" #include "hash.h" +#include "regex.h" #include "os.h" #include "ttype.h" diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim index 6b789de490..0f9e317b40 100644 --- a/tests/script/general/parser/where.sim +++ b/tests/script/general/parser/where.sim @@ -95,15 +95,15 @@ if $rows != 2 then return -1 endi -print $tbPrefix +print $tbPrefix, $data00 $data10 $tb = $tbPrefix . 0 -if $data00 != wh_tb1 then - print expect wh_tb1, actual:$data00 +if $data00 != $tb then + print expect $tb, actual:$data00 return -1 endi $tb = $tbPrefix . 1 -if $data10 != wh_tb0 then - print expect wh_tb0, actual:$data00 +if $data10 != $tb then + print expect $tb, actual:$data00 return -1 endi From b48c2c9c108f15e43822af9b9d48c648ebd1bdf8 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 27 Aug 2021 15:03:42 +0800 Subject: [PATCH 03/71] [TD-6361] restrict UDF func name --- src/client/src/tscSQLParser.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 3a0d59b6c2..0b433b3f8f 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -432,7 +432,7 @@ int32_t readFromFile(char *name, uint32_t *len, void **buf) { int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) { - const char *msg1 = "function name is too long"; + const char *msg1 = "invalidate function name"; const char *msg2 = "path is too long"; const char *msg3 = "invalid outputtype"; const char *msg4 = "invalid script"; @@ -449,7 +449,10 @@ int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) { } createInfo->name.z[createInfo->name.n] = 0; - + // funcname's naming rule is same to column + if (!validateColumnName(createInfo->name.z)) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + } strdequote(createInfo->name.z); if (strlen(createInfo->name.z) >= TSDB_FUNC_NAME_LEN) { From cc2c54dd115807784b8babde4c6bea1025bfd2a0 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 27 Aug 2021 18:00:50 +0800 Subject: [PATCH 04/71] [TD-6361] restrict UDF func name --- src/client/src/tscSQLParser.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 0b433b3f8f..71d2984f0f 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -432,7 +432,7 @@ int32_t readFromFile(char *name, uint32_t *len, void **buf) { int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) { - const char *msg1 = "invalidate function name"; + const char *msg1 = "invalidate function name"; const char *msg2 = "path is too long"; const char *msg3 = "invalid outputtype"; const char *msg4 = "invalid script"; @@ -450,7 +450,7 @@ int32_t handleUserDefinedFunc(SSqlObj* pSql, struct SSqlInfo* pInfo) { createInfo->name.z[createInfo->name.n] = 0; // funcname's naming rule is same to column - if (!validateColumnName(createInfo->name.z)) { + if (validateColumnName(createInfo->name.z) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } strdequote(createInfo->name.z); From e0805bdda1003ac3d9cd9b4027bb480aafcc0682 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Sun, 29 Aug 2021 09:14:50 +0000 Subject: [PATCH 05/71] [TD-5929] create testcases for TD-5929 ---> diff cols support or --- tests/pytest/fulltest.sh | 2 +- tests/pytest/query/queryDiffColsOr.py | 95 ++++++++++++++++++--------- 2 files changed, 65 insertions(+), 32 deletions(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 886e2a365e..06ec3c6bfa 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -390,7 +390,7 @@ python3 ./test.py -f alter/alterColMultiTimes.py python3 ./test.py -f query/queryWildcardLength.py python3 ./test.py -f query/queryTbnameUpperLower.py python3 ./test.py -f query/query.py - +python3 ./test.py -f query/queryDiffColsOr.py #======================p4-end=============== diff --git a/tests/pytest/query/queryDiffColsOr.py b/tests/pytest/query/queryDiffColsOr.py index feeab84a7e..e9e791da9f 100644 --- a/tests/pytest/query/queryDiffColsOr.py +++ b/tests/pytest/query/queryDiffColsOr.py @@ -10,13 +10,10 @@ ################################################################### # -*- coding: utf-8 -*- -from copy import deepcopy from util.log import tdLog from util.cases import tdCases from util.sql import tdSql from util.common import tdCom - - class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -409,6 +406,62 @@ class TDTestCase: tdSql.checkRows(10) tdSql.checkEqual(int(res[9][0]), 10) + def queryMultiTbWithTag(self, tb_name): + # tags (1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1)') + + tdSql.execute( + f'CREATE TABLE {tb_name}_sub2 using {tb_name} tags (2, 2, 2, 4, 2.2, 2.2, "binary2", "nchar2", true, 12)') + tdSql.execute( + f'CREATE TABLE {tb_name}_sub3 using {tb_name} tags (3, 3, 3, 3, 3.3, 3.3, "binary3", "nchar3", true, 13)') + tdSql.execute( + f'insert into {tb_name}_sub2 values ("2021-01-25 12:00:00", 2, 2, 2, 4, 2.2, 2.2, "binary2", "nchar2", true, 12)') + tdSql.execute( + f'insert into {tb_name}_sub3 values ("2021-01-27 12:00:00", 3, 3, 3, 3, 3.3, 3.3, "binary3", "nchar3", true, 13)') + ## select count avg sum from (condition_A or condition_B and like and in) where condition_A or condition_B or condition_tag_C or condition_tag_D or like and in interval + query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) where c1 != 2 or c3 = 1 or t1=2 or t1=3 or c8 like "ncha_" and c9 in (true) interval(8d)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(3) + tdSql.checkEqual(int(res[0][1]), 3) + tdSql.checkEqual(int(res[0][2]), 1) + tdSql.checkEqual(int(res[0][3]), 10) + tdSql.checkEqual(int(res[1][1]), 3) + tdSql.checkEqual(int(res[1][2]), 3) + tdSql.checkEqual(int(res[1][3]), 3) + tdSql.checkEqual(int(res[2][1]), 3) + tdSql.checkEqual(int(res[2][2]), 2) + tdSql.checkEqual(int(res[2][3]), 6) + + + # ! to confirm + ## select count avg sum from (condition_A or condition_B or condition_tag_C or condition_tag_D and like and in) where condition_A or condition_B or like and in interval + # query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where t1 = 3 and t1 = 2 or c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) where c1 != 2 or c3 = 1 or c8 like "ncha_" and c9 in (true) interval(8d)' + # res = tdSql.query(query_sql, True) + # tdSql.checkRows(3) + # tdSql.checkEqual(int(res[0][1]), 3) + # tdSql.checkEqual(int(res[0][2]), 1) + # tdSql.checkEqual(int(res[0][3]), 10) + # tdSql.checkEqual(int(res[1][1]), 3) + # tdSql.checkEqual(int(res[1][2]), 3) + # tdSql.checkEqual(int(res[1][3]), 3) + # tdSql.checkEqual(int(res[2][1]), 3) + # tdSql.checkEqual(int(res[2][2]), 2) + # tdSql.checkEqual(int(res[2][3]), 6) + + ## select count avg sum from (condition_A and condition_B and and line and in and ts and condition_tag_A and condition_tag_B and between) where condition_C orr condition_D or condition_tag_C or condition_tag_D or like and in interval + query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where c1 >= 1 and c2 = 2 and c7 like "binar_" and c4 in (3, 5) and ts > "2021-01-11 12:00:00" and t1 < 2 and t1 > 0 and c6 between 0 and 7) where c1 != 2 or c3 = 1 or t1=2 or t1=3 or c8 like "ncha_" and c9 in (true) interval(8d)' + res = tdSql.query(query_sql, True) + tdSql.checkRows(2) + tdSql.checkEqual(int(res[0][1]), 2) + tdSql.checkEqual(int(res[0][2]), 1) + tdSql.checkEqual(int(res[0][3]), 2) + tdSql.checkEqual(int(res[1][1]), 1) + tdSql.checkEqual(int(res[1][2]), 1) + tdSql.checkEqual(int(res[1][3]), 1) + + # ! to confirm + #select * from (select * from pyclqtwi where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5) and ts > "2021-01-11 12:00:00") where c1 != 2 or c3 = 1 or t1=2 or t1=3 or c8 like "ncha_" and c9 in (true) ; + #DB error: invalid operation: invalid expression (0.008747s) + def checkTbColTypeOperator(self): ''' Ordinary table full column type and operator @@ -492,33 +545,13 @@ class TDTestCase: ''' tb_name = self.initStb() self.queryMultiTb(tb_name) - - - # tb_name1 = tdCom.getLongName(8, "letters") - # tb_name2 = tdCom.getLongName(8, "letters") - # tb_name3 = tdCom.getLongName(8, "letters") - # tdSql.execute( - # f"CREATE TABLE {tb_name1} (ts timestamp, c1 tinyint, c2 smallint, c3 int)") - # tdSql.execute( - # f"CREATE TABLE {tb_name2} (ts timestamp, c1 tinyint, c2 smallint, c3 int)") - # tdSql.execute( - # f"CREATE TABLE {tb_name3} (ts timestamp, c1 tinyint, c2 smallint, c3 int)") - # insert_sql_list = [f'insert into {tb_name1} values ("2021-01-01 12:00:00", 1, 5, 1)', - # f'insert into {tb_name1} values ("2021-01-03 12:00:00", 2, 4, 1)', - # f'insert into {tb_name1} values ("2021-01-05 12:00:00", 3, 2, 1)', - # f'insert into {tb_name2} values ("2021-01-01 12:00:00", 4, 2, 1)', - # f'insert into {tb_name2} values ("2021-01-02 12:00:00", 5, 1, 1)', - # f'insert into {tb_name2} values ("2021-01-04 12:00:00", 1, 2, 1)', - # f'insert into {tb_name3} values ("2021-01-02 12:00:00", 4, 2, 1)', - # f'insert into {tb_name3} values ("2021-01-06 12:00:00", 5, 1, 1)', - # f'insert into {tb_name3} values ("2021-01-07 12:00:00", 1, 2, 1)', - # ] - # for sql in insert_sql_list: - # tdSql.execute(sql) - # tdSql.query( - # f'select * from {tb_name1} t1, {tb_name2}, {tb_name3} t3 t2 where (t1.ts=t2.ts or t2.ts=t3.ts)') - # tdSql.checkRows(4) - + + def checkMultiTbWithTag(self): + ''' + test Multi tb with tag + ''' + tb_name = self.initStb() + self.queryMultiTbWithTag(tb_name) def run(self): tdSql.prepare() @@ -534,7 +567,7 @@ class TDTestCase: self.checkStbPreCal() self.checkMultiTb() self.checkMultiStb() - + self.checkMultiTbWithTag() def stop(self): tdSql.close() From f8fdc010659bfc7b9f3ee39f2d9b634301a1e3d0 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Sun, 29 Aug 2021 20:53:41 +0800 Subject: [PATCH 06/71] tagregex: fix windows compilation error --- src/client/src/tscSQLParser.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index d26843bb17..8d34c1d1fe 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -22,6 +22,7 @@ #include #include "os.h" +#include "regex.h" #include "qPlan.h" #include "qSqlparser.h" #include "qTableMeta.h" From 51849220bdc1e0164df4d31d1a673e0f28801e42 Mon Sep 17 00:00:00 2001 From: xywang Date: Mon, 30 Aug 2021 02:54:33 +0800 Subject: [PATCH 07/71] [TD-6408]: fixed json error when timestamp was before Epoch --- src/plugins/http/src/httpJson.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/plugins/http/src/httpJson.c b/src/plugins/http/src/httpJson.c index 3c72b795ee..b9067f0639 100644 --- a/src/plugins/http/src/httpJson.c +++ b/src/plugins/http/src/httpJson.c @@ -275,7 +275,7 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { quot = t / 1000; fractionLen = 5; format = ".%03" PRId64; - mod = t % 1000; + mod = ((t) % 1000 + 1000) % 1000; break; } @@ -283,7 +283,7 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { quot = t / 1000000; fractionLen = 8; format = ".%06" PRId64; - mod = t % 1000000; + mod = ((t) % 1000000 + 1000000) % 1000000; break; } @@ -291,7 +291,7 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { quot = t / 1000000000; fractionLen = 11; format = ".%09" PRId64; - mod = t % 1000000000; + mod = ((t) % 1000000000 + 1000000000) % 1000000000; break; } @@ -322,7 +322,7 @@ void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { quot = t / 1000; fractionLen = 5; format = ".%03" PRId64; - mod = t % 1000; + mod = ((t) % 1000 + 1000) % 1000; break; } @@ -330,7 +330,7 @@ void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { quot = t / 1000000; fractionLen = 8; format = ".%06" PRId64; - mod = t % 1000000; + mod = ((t) % 1000000 + 1000000) % 1000000; break; } @@ -338,7 +338,7 @@ void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { quot = t / 1000000000; fractionLen = 11; format = ".%09" PRId64; - mod = t % 1000000000; + mod = ((t) % 1000000000 + 1000000000) % 1000000000; break; } From 6912568b8af57942a6f67157e70facc95fae3289 Mon Sep 17 00:00:00 2001 From: xywang Date: Mon, 30 Aug 2021 03:37:58 +0800 Subject: [PATCH 08/71] [TD-6408]: one second more than actual timestamp --- src/plugins/http/src/httpJson.c | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/src/plugins/http/src/httpJson.c b/src/plugins/http/src/httpJson.c index b9067f0639..86e0f2f40b 100644 --- a/src/plugins/http/src/httpJson.c +++ b/src/plugins/http/src/httpJson.c @@ -272,26 +272,35 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { switch (timePrecision) { case TSDB_TIME_PRECISION_MILLI: { + mod = ((t) % 1000 + 1000) % 1000; + if (t < 0 && mod != 0) { + t -= 1000; + } quot = t / 1000; fractionLen = 5; format = ".%03" PRId64; - mod = ((t) % 1000 + 1000) % 1000; break; } case TSDB_TIME_PRECISION_MICRO: { + mod = ((t) % 1000000 + 1000000) % 1000000; + if (t < 0 && mod != 0) { + t -= 1000000; + } quot = t / 1000000; fractionLen = 8; format = ".%06" PRId64; - mod = ((t) % 1000000 + 1000000) % 1000000; break; } case TSDB_TIME_PRECISION_NANO: { + mod = ((t) % 1000000000 + 1000000000) % 1000000000; + if (t < 0 && mod != 0) { + t -= 1000000000; + } quot = t / 1000000000; fractionLen = 11; format = ".%09" PRId64; - mod = ((t) % 1000000000 + 1000000000) % 1000000000; break; } @@ -319,26 +328,35 @@ void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { switch (timePrecision) { case TSDB_TIME_PRECISION_MILLI: { + mod = ((t) % 1000 + 1000) % 1000; + if (t < 0 && mod != 0) { + t -= 1000; + } quot = t / 1000; fractionLen = 5; format = ".%03" PRId64; - mod = ((t) % 1000 + 1000) % 1000; break; } case TSDB_TIME_PRECISION_MICRO: { + mod = ((t) % 1000000 + 1000000) % 1000000; + if (t < 0 && mod != 0) { + t -= 1000000; + } quot = t / 1000000; fractionLen = 8; format = ".%06" PRId64; - mod = ((t) % 1000000 + 1000000) % 1000000; break; } case TSDB_TIME_PRECISION_NANO: { + mod = ((t) % 1000000000 + 1000000000) % 1000000000; + if (t < 0 && mod != 0) { + t -= 1000000000; + } quot = t / 1000000000; fractionLen = 11; format = ".%09" PRId64; - mod = ((t) % 1000000000 + 1000000000) % 1000000000; break; } From 28f2f0d70bd66dfa70f54e4b753ba428fca16b1e Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 30 Aug 2021 09:24:13 +0800 Subject: [PATCH 09/71] schemaless: add regfree and change uerror to udebug --- src/client/src/tscSQLParser.c | 1 + src/util/src/tcompare.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 6100ec9b8d..fea02900fb 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -4531,6 +4531,7 @@ static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_ tscError("Failed to compile regex pattern %s. reason %s", pattern, regErrBuf); return invalidOperationMsg(msgBuf, msg3); } + regfree(®ex); } return TSDB_CODE_SUCCESS; diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 0af7bb6fb3..e906eb8423 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -378,7 +378,7 @@ int32_t compareStrRegexComp(const void* pLeft, const void* pRight) { errCode = regexec(®ex, str, 0, NULL, 0); if (errCode != 0 && errCode != REG_NOMATCH) { regerror(errCode, ®ex, msgbuf, sizeof(msgbuf)); - uError("Failed to match %s with pattern %s, reason %s", str, pattern, msgbuf) + uDebug("Failed to match %s with pattern %s, reason %s", str, pattern, msgbuf) } int32_t result = (errCode == 0) ? 0 : 1; regfree(®ex); From 4f11fde0a1d221684936648860f9dde314b0b834 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 30 Aug 2021 10:24:42 +0800 Subject: [PATCH 10/71] [TS-157] : update description about func "INTERP". --- documentation20/cn/12.taos-sql/docs.md | 37 ++++++++++++++++---------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index b183b6e419..88712cc746 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -1220,27 +1220,36 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ``` 功能说明:返回表/超级表的指定时间截面、指定字段的记录。 - 返回结果数据类型:同应用的字段。 + 返回结果数据类型:同字段类型。 - 应用字段:所有字段。 + 应用字段:数值型字段。 适用于:**表、超级表**。 - 说明:(从 2.0.15.0 版本开始新增此函数)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。其中,条件语句里面可以附带更多的筛选条件,例如标签、tbname。 + 说明:(从 2.0.15.0 版本开始新增此函数)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。此外,条件语句里面可附带筛选条件,例如标签、tbname。 + + INTERP 查询要求查询的时间区间必须位于数据集合(表)的所有记录的时间范围之内。如果给定的时间戳位于时间范围之外,即使有插值指令,仍然不返回结果。 示例: - ```mysql - taos> select interp(*) from meters where ts='2017-7-14 10:42:00.005' fill(prev); - interp(ts) | interp(f1) | interp(f2) | interp(f3) | - ==================================================================== - 2017-07-14 10:42:00.005 | 5 | 9 | 6 | - Query OK, 1 row(s) in set (0.002912s) + ```sql + taos> SELECT INTERP(*) FROM meters WHERE ts='2017-7-14 18:40:00.004'; + interp(ts) | interp(current) | interp(voltage) | interp(phase) | + ========================================================================================== + 2017-07-14 18:40:00.004 | 9.84020 | 216 | 0.32222 | + Query OK, 1 row(s) in set (0.002652s) + ``` - taos> select interp(*) from meters where tbname in ('t1') and ts='2017-7-14 10:42:00.005' fill(prev); - interp(ts) | interp(f1) | interp(f2) | interp(f3) | - ==================================================================== - 2017-07-14 10:42:00.005 | 5 | 6 | 7 | - Query OK, 1 row(s) in set (0.002005s) + 如果给定的时间戳无对应的数据,在不指定插值生成策略的情况下,不会返回结果,如果指定了插值策略,会根据插值策略返回结果。 + + ```sql + taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005'; + Query OK, 0 row(s) in set (0.004022s) + + taos> SELECT INTERP(*) FROM meters WHERE tbname IN ('d636') AND ts='2017-7-14 18:40:00.005' FILL(PREV);; + interp(ts) | interp(current) | interp(voltage) | interp(phase) | + ========================================================================================== + 2017-07-14 18:40:00.005 | 9.88150 | 217 | 0.32500 | + Query OK, 1 row(s) in set (0.003056s) ``` ### 计算函数 From 2a28f3d744c90eaacfa4095bd4e5c9cc7eb27645 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Mon, 30 Aug 2021 10:51:25 +0800 Subject: [PATCH 11/71] add sqlt & sqlutc test script[ci skip] --- tests/script/http/httpTestSqlUtc.c | 128 +++++++++++++++++++++++++++++ tests/script/http/httpTestSqlt.c | 128 +++++++++++++++++++++++++++++ tests/script/http/makefile | 9 +- 3 files changed, 264 insertions(+), 1 deletion(-) create mode 100644 tests/script/http/httpTestSqlUtc.c create mode 100644 tests/script/http/httpTestSqlt.c diff --git a/tests/script/http/httpTestSqlUtc.c b/tests/script/http/httpTestSqlUtc.c new file mode 100644 index 0000000000..8a88d1f285 --- /dev/null +++ b/tests/script/http/httpTestSqlUtc.c @@ -0,0 +1,128 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAXLINE 1024 + +typedef struct { + pthread_t pid; + int threadId; + int rows; + int tables; +} ThreadObj; + +void post(char *ip,int port,char *page,char *msg) { + int sockfd,n; + char recvline[MAXLINE]; + struct sockaddr_in servaddr; + char content[4096]; + char content_page[50]; + sprintf(content_page,"POST /%s HTTP/1.1\r\n",page); + char content_host[50]; + sprintf(content_host,"HOST: %s:%d\r\n",ip,port); + char content_type[] = "Content-Type: text/plain\r\n"; + char Auth[] = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + char content_len[50]; + sprintf(content_len,"Content-Length: %ld\r\n\r\n",strlen(msg)); + sprintf(content,"%s%s%s%s%s%s",content_page,content_host,content_type,Auth,content_len,msg); + if((sockfd = socket(AF_INET,SOCK_STREAM,0)) < 0) { + printf("socket error\n"); + } + bzero(&servaddr,sizeof(servaddr)); + servaddr.sin_family = AF_INET; + servaddr.sin_port = htons(port); + if(inet_pton(AF_INET,ip,&servaddr.sin_addr) <= 0) { + printf("inet_pton error\n"); + } + if(connect(sockfd,(struct sockaddr *)&servaddr,sizeof(servaddr)) < 0) { + printf("connect error\n"); + } + write(sockfd,content,strlen(content)); + printf("%s\n", content); + while((n = read(sockfd,recvline,MAXLINE)) > 0) { + recvline[n] = 0; + if(fputs(recvline,stdout) == EOF) { + printf("fputs error\n"); + } + } + if(n < 0) { + printf("read error\n"); + } +} + +void singleThread() { + char ip[] = "127.0.0.1"; + int port = 6041; + char page[] = "rest/sqlutc"; + char page1[] = "rest/sqlutc/db1"; + char page2[] = "rest/sqlutc/db2"; + char nonexit[] = "rest/sqlutc/xxdb"; + + post(ip,port,page,"drop database if exists db1"); + post(ip,port,page,"create database if not exists db1"); + post(ip,port,page,"drop database if exists db2"); + post(ip,port,page,"create database if not exists db2"); + post(ip,port,page1,"create table t11 (ts timestamp, c1 int)"); + post(ip,port,page2,"create table t21 (ts timestamp, c1 int)"); + post(ip,port,page1,"insert into t11 values (now, 1)"); + post(ip,port,page2,"insert into t21 values (now, 2)"); + post(ip,port,nonexit,"create database if not exists db3"); +} + +void execute(void *params) { + char ip[] = "127.0.0.1"; + int port = 6041; + char page[] = "rest/sqlutc"; + char *unique = calloc(1, 1024); + char *sql = calloc(1, 1024); + ThreadObj *pThread = (ThreadObj *)params; + printf("Thread %d started\n", pThread->threadId); + sprintf(unique, "rest/sql/db%d",pThread->threadId); + sprintf(sql, "drop database if exists db%d", pThread->threadId); + post(ip,port,page, sql); + sprintf(sql, "create database if not exists db%d", pThread->threadId); + post(ip,port,page, sql); + for (int i = 0; i < pThread->tables; i++) { + sprintf(sql, "create table t%d (ts timestamp, c1 int)", i); + post(ip,port,unique, sql); + } + for (int i = 0; i < pThread->rows; i++) { + sprintf(sql, "insert into t%d values (now + %ds, %d)", pThread->threadId, i, pThread->threadId); + post(ip,port,unique, sql); + } + free(unique); + free(sql); + return; +} + +void multiThread() { + int numOfThreads = 100; + int numOfTables = 100; + int numOfRows = 1; + ThreadObj *threads = calloc((size_t)numOfThreads, sizeof(ThreadObj)); + for (int i = 0; i < numOfThreads; i++) { + ThreadObj *pthread = threads + i; + pthread_attr_t thattr; + pthread->threadId = i + 1; + pthread->rows = numOfRows; + pthread->tables = numOfTables; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + pthread_create(&pthread->pid, &thattr, (void *(*)(void *))execute, pthread); + } + for (int i = 0; i < numOfThreads; i++) { + pthread_join(threads[i].pid, NULL); + } + free(threads); +} + +int main() { + singleThread(); + multiThread(); + exit(0); +} \ No newline at end of file diff --git a/tests/script/http/httpTestSqlt.c b/tests/script/http/httpTestSqlt.c new file mode 100644 index 0000000000..82885a4a2d --- /dev/null +++ b/tests/script/http/httpTestSqlt.c @@ -0,0 +1,128 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAXLINE 1024 + +typedef struct { + pthread_t pid; + int threadId; + int rows; + int tables; +} ThreadObj; + +void post(char *ip,int port,char *page,char *msg) { + int sockfd,n; + char recvline[MAXLINE]; + struct sockaddr_in servaddr; + char content[4096]; + char content_page[50]; + sprintf(content_page,"POST /%s HTTP/1.1\r\n",page); + char content_host[50]; + sprintf(content_host,"HOST: %s:%d\r\n",ip,port); + char content_type[] = "Content-Type: text/plain\r\n"; + char Auth[] = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + char content_len[50]; + sprintf(content_len,"Content-Length: %ld\r\n\r\n",strlen(msg)); + sprintf(content,"%s%s%s%s%s%s",content_page,content_host,content_type,Auth,content_len,msg); + if((sockfd = socket(AF_INET,SOCK_STREAM,0)) < 0) { + printf("socket error\n"); + } + bzero(&servaddr,sizeof(servaddr)); + servaddr.sin_family = AF_INET; + servaddr.sin_port = htons(port); + if(inet_pton(AF_INET,ip,&servaddr.sin_addr) <= 0) { + printf("inet_pton error\n"); + } + if(connect(sockfd,(struct sockaddr *)&servaddr,sizeof(servaddr)) < 0) { + printf("connect error\n"); + } + write(sockfd,content,strlen(content)); + printf("%s\n", content); + while((n = read(sockfd,recvline,MAXLINE)) > 0) { + recvline[n] = 0; + if(fputs(recvline,stdout) == EOF) { + printf("fputs error\n"); + } + } + if(n < 0) { + printf("read error\n"); + } +} + +void singleThread() { + char ip[] = "127.0.0.1"; + int port = 6041; + char page[] = "rest/sqlt"; + char page1[] = "rest/sqlt/db1"; + char page2[] = "rest/sqlt/db2"; + char nonexit[] = "rest/sqlt/xxdb"; + + post(ip,port,page,"drop database if exists db1"); + post(ip,port,page,"create database if not exists db1"); + post(ip,port,page,"drop database if exists db2"); + post(ip,port,page,"create database if not exists db2"); + post(ip,port,page1,"create table t11 (ts timestamp, c1 int)"); + post(ip,port,page2,"create table t21 (ts timestamp, c1 int)"); + post(ip,port,page1,"insert into t11 values (now, 1)"); + post(ip,port,page2,"insert into t21 values (now, 2)"); + post(ip,port,nonexit,"create database if not exists db3"); +} + +void execute(void *params) { + char ip[] = "127.0.0.1"; + int port = 6041; + char page[] = "rest/sqlt"; + char *unique = calloc(1, 1024); + char *sql = calloc(1, 1024); + ThreadObj *pThread = (ThreadObj *)params; + printf("Thread %d started\n", pThread->threadId); + sprintf(unique, "rest/sql/db%d",pThread->threadId); + sprintf(sql, "drop database if exists db%d", pThread->threadId); + post(ip,port,page, sql); + sprintf(sql, "create database if not exists db%d", pThread->threadId); + post(ip,port,page, sql); + for (int i = 0; i < pThread->tables; i++) { + sprintf(sql, "create table t%d (ts timestamp, c1 int)", i); + post(ip,port,unique, sql); + } + for (int i = 0; i < pThread->rows; i++) { + sprintf(sql, "insert into t%d values (now + %ds, %d)", pThread->threadId, i, pThread->threadId); + post(ip,port,unique, sql); + } + free(unique); + free(sql); + return; +} + +void multiThread() { + int numOfThreads = 100; + int numOfTables = 100; + int numOfRows = 1; + ThreadObj *threads = calloc((size_t)numOfThreads, sizeof(ThreadObj)); + for (int i = 0; i < numOfThreads; i++) { + ThreadObj *pthread = threads + i; + pthread_attr_t thattr; + pthread->threadId = i + 1; + pthread->rows = numOfRows; + pthread->tables = numOfTables; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + pthread_create(&pthread->pid, &thattr, (void *(*)(void *))execute, pthread); + } + for (int i = 0; i < numOfThreads; i++) { + pthread_join(threads[i].pid, NULL); + } + free(threads); +} + +int main() { + singleThread(); + multiThread(); + exit(0); +} \ No newline at end of file diff --git a/tests/script/http/makefile b/tests/script/http/makefile index d1be683eda..50886cf6b1 100644 --- a/tests/script/http/makefile +++ b/tests/script/http/makefile @@ -1,2 +1,9 @@ all: - gcc -g httpTest.c -o httpTest -lpthread \ No newline at end of file + gcc -g httpTest.c -o httpTest -lpthread + gcc -g httpTestSqlt.c -o httpTestSqlt -lpthread + gcc -g httpTestSqlUtc.c -o httpTestSqlUtc -lpthread + +clean: + rm httpTest + rm httpTestSqlt + rm httpTestSqlUtc \ No newline at end of file From 178b810656cf527f97e157b5ed8d3e1146b20b83 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 30 Aug 2021 11:00:05 +0800 Subject: [PATCH 12/71] [TS-20] : support underscore escapes in LIKE matching wildcard. --- documentation20/cn/12.taos-sql/docs.md | 1 + 1 file changed, 1 insertion(+) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 88712cc746..9552a8fb2c 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -722,6 +722,7 @@ Query OK, 1 row(s) in set (0.001091s) 1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。 2. like 算子使用通配符字符串进行匹配检查。 * 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意字符。 + * 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.1.8.0 版本开始支持) * 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) 3. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 4. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 From 43fab7f6f772ac4368d8776611004398d1a93e52 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Mon, 30 Aug 2021 11:09:11 +0800 Subject: [PATCH 13/71] fix test script --- tests/script/http/httpTestSqlUtc.c | 2 +- tests/script/http/httpTestSqlt.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/script/http/httpTestSqlUtc.c b/tests/script/http/httpTestSqlUtc.c index 8a88d1f285..643c884a1a 100644 --- a/tests/script/http/httpTestSqlUtc.c +++ b/tests/script/http/httpTestSqlUtc.c @@ -82,7 +82,7 @@ void execute(void *params) { char *sql = calloc(1, 1024); ThreadObj *pThread = (ThreadObj *)params; printf("Thread %d started\n", pThread->threadId); - sprintf(unique, "rest/sql/db%d",pThread->threadId); + sprintf(unique, "rest/sqlutc/db%d",pThread->threadId); sprintf(sql, "drop database if exists db%d", pThread->threadId); post(ip,port,page, sql); sprintf(sql, "create database if not exists db%d", pThread->threadId); diff --git a/tests/script/http/httpTestSqlt.c b/tests/script/http/httpTestSqlt.c index 82885a4a2d..2eaaee0f99 100644 --- a/tests/script/http/httpTestSqlt.c +++ b/tests/script/http/httpTestSqlt.c @@ -82,7 +82,7 @@ void execute(void *params) { char *sql = calloc(1, 1024); ThreadObj *pThread = (ThreadObj *)params; printf("Thread %d started\n", pThread->threadId); - sprintf(unique, "rest/sql/db%d",pThread->threadId); + sprintf(unique, "rest/sqlt/db%d",pThread->threadId); sprintf(sql, "drop database if exists db%d", pThread->threadId); post(ip,port,page, sql); sprintf(sql, "create database if not exists db%d", pThread->threadId); From e72a6cc1ee5b151c146fea84481f295fccec19c3 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 30 Aug 2021 11:27:13 +0800 Subject: [PATCH 14/71] change mac default path back after homebrew install support merged. (#7688) --- packaging/tools/make_install.sh | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 07f1d61fad..d146091f0a 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -20,20 +20,19 @@ fi # Dynamic directory -data_dir="/var/lib/taos" if [ "$osType" != "Darwin" ]; then + data_dir="/var/lib/taos" log_dir="/var/log/taos" else - log_dir=~/TDengine/log + data_dir="/var/local/lib/taos" + log_dir="/var/local/log/taos" fi -data_link_dir="/usr/local/taos/data" -log_link_dir="/usr/local/taos/log" if [ "$osType" != "Darwin" ]; then cfg_install_dir="/etc/taos" else - cfg_install_dir="/usr/local/Cellar/tdengine/${verNumber}/taos" + cfg_install_dir="/usr/local/etc/taos" fi if [ "$osType" != "Darwin" ]; then @@ -41,6 +40,10 @@ if [ "$osType" != "Darwin" ]; then lib_link_dir="/usr/lib" lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" +else + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" fi #install main path @@ -144,12 +147,13 @@ function install_main_path() { function install_bin() { # Remove links + ${csudo} rm -f ${bin_link_dir}/taos || : + ${csudo} rm -f ${bin_link_dir}/taosd || : + ${csudo} rm -f ${bin_link_dir}/taosdemo || : + ${csudo} rm -f ${bin_link_dir}/taosdump || : + if [ "$osType" != "Darwin" ]; then - ${csudo} rm -f ${bin_link_dir}/taos || : - ${csudo} rm -f ${bin_link_dir}/taosd || : - ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/perfMonitor || : - ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : fi @@ -167,11 +171,12 @@ function install_bin() { ${csudo} chmod 0555 ${install_main_dir}/bin/* #Make link + [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : + [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : + [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + if [ "$osType" != "Darwin" ]; then - [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : - [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : - [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : - [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || : [ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : fi From 512e64fdf5436c01becd3e0b292d513039b14450 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 30 Aug 2021 14:21:01 +0800 Subject: [PATCH 15/71] [TD-2639] : fix evaluation link in English doc index. --- documentation20/en/00.index/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md index 1672c70b3c..0ac4a06ef4 100644 --- a/documentation20/en/00.index/docs.md +++ b/documentation20/en/00.index/docs.md @@ -6,7 +6,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series * [TDengine Introduction and Features](/evaluation#intro) * [TDengine Use Scenes](/evaluation#scenes) -* [TDengine Performance Metrics and Verification]((/evaluation#)) +* [TDengine Performance Metrics and Verification](/evaluation#) ## [Getting Started](/getting-started) From 174364c6a90c7ecdb770f381ddcc53a654cf9afa Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 30 Aug 2021 14:24:26 +0800 Subject: [PATCH 16/71] Hotfix/sangshuduo/td 2936 change mac default path (#7694) * change mac default path back after homebrew install support merged. * fix lib/log path install for mac --- packaging/tools/make_install.sh | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index d146091f0a..0849a76e31 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -25,8 +25,8 @@ if [ "$osType" != "Darwin" ]; then data_dir="/var/lib/taos" log_dir="/var/log/taos" else - data_dir="/var/local/lib/taos" - log_dir="/var/local/log/taos" + data_dir="/usr/local/var/lib/taos" + log_dir="/usr/local/var/log/taos" fi if [ "$osType" != "Darwin" ]; then @@ -293,18 +293,14 @@ function install_config() { } function install_log() { - if [ "$osType" != "Darwin" ]; then - ${csudo} rm -rf ${log_dir} || : - ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} - ${csudo} ln -s ${log_dir} ${install_main_dir}/log - fi + ${csudo} rm -rf ${log_dir} || : + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + ${csudo} ln -s ${log_dir} ${install_main_dir}/log } function install_data() { - if [ "$osType" != "Darwin" ]; then - ${csudo} mkdir -p ${data_dir} - ${csudo} ln -s ${data_dir} ${install_main_dir}/data - fi + ${csudo} mkdir -p ${data_dir} + ${csudo} ln -s ${data_dir} ${install_main_dir}/data } function install_connector() { @@ -501,10 +497,7 @@ function install_TDengine() { install_main_path - if [ "$osType" != "Darwin" ]; then - install_data - fi - + install_data install_log install_header install_lib From ea811da8a8cdc9c6e3df64d16dbcfbfb663ae427 Mon Sep 17 00:00:00 2001 From: kartist <31991579+kartist@users.noreply.github.com> Date: Mon, 30 Aug 2021 14:25:48 +0800 Subject: [PATCH 17/71] doc: 7652 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit update '降频操作' to '降采样' --- documentation20/cn/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 9552a8fb2c..9981f1b7f8 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -35,7 +35,7 @@ taos> DESCRIBE meters; - 内部函数 now 是客户端的当前时间 - 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间 - Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度的逻辑也是类似的。) -- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。 +- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。 TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒和纳秒。(从 2.1.5.0 版本开始支持纳秒精度) From 6731ab763140c4bdb94afc2d0e8b65a1cc3e71f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=9A=AE=E8=9B=8B=E5=95=8A=E7=9A=AE=E8=9B=8B?= <53462360+lovetaitai@users.noreply.github.com> Date: Mon, 30 Aug 2021 14:47:30 +0800 Subject: [PATCH 18/71] : add the missing note --- documentation20/en/04.model/docs.md | 1 + 1 file changed, 1 insertion(+) diff --git a/documentation20/en/04.model/docs.md b/documentation20/en/04.model/docs.md index 1bf6c67878..28f1c7b791 100644 --- a/documentation20/en/04.model/docs.md +++ b/documentation20/en/04.model/docs.md @@ -26,6 +26,7 @@ Replace the database operating in the current connection with “power”, other - Any table or STable belongs to a database. Before creating a table, a database must be created first. - Tables in two different databases cannot be JOIN. +- You need to specify a timestamp when creating and inserting records and querying history records. ## Create a STable From 36fd0df1bfac8fa853c0e25f87539843276473e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=82=E6=B0=91233?= Date: Mon, 30 Aug 2021 14:52:14 +0800 Subject: [PATCH 19/71] doc: update 10.cluster docs.md --- documentation20/cn/10.cluster/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index f995597db0..757ad9f2c0 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -14,7 +14,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预 **第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。 **注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(`rm -rf /var/lib/taos/*`); -**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。 +**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是修改 `/etc/hosts` 文件。 **第二步**:建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口; From 0bdf053e1baeab70756185a932ba74314d071d3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=82=E6=B0=91233?= Date: Mon, 30 Aug 2021 14:55:24 +0800 Subject: [PATCH 20/71] doc: update 10.cluster/docs.md --- documentation20/en/10.cluster/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/10.cluster/docs.md b/documentation20/en/10.cluster/docs.md index d7d908ff42..f65c683483 100644 --- a/documentation20/en/10.cluster/docs.md +++ b/documentation20/en/10.cluster/docs.md @@ -16,7 +16,7 @@ Please refer to the [video tutorial](https://www.taosdata.com/blog/2020/11/11/19 **Note 1:** Because the information of FQDN will be written into a file, if FQDN has not been configured or changed before, and TDengine has been started, be sure to clean up the previous data (`rm -rf /var/lib/taos/*`)on the premise of ensuring that the data is useless or backed up; -**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or Host file. +**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or modify `/etc/hosts` file. **Step 2:** It is recommended to close the firewall of all physical nodes, and at least ensure that the TCP and UDP ports of ports 6030-6042 are open. It is **strongly recommended** to close the firewall first and configure the ports after the cluster is built; From 79fecce878d0a6d188d39d7f1b36ff1feb647298 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 30 Aug 2021 15:56:01 +0800 Subject: [PATCH 21/71] Use hosts instead of specific file paths for compatibility with more operating systems. --- documentation20/cn/10.cluster/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index 757ad9f2c0..f39138d61d 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -14,7 +14,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预 **第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。 **注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(`rm -rf /var/lib/taos/*`); -**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是修改 `/etc/hosts` 文件。 +**注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是修改 hosts 文件。 **第二步**:建议关闭所有物理节点的防火墙,至少保证端口:6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口; From 4c36f00a052160d1003f2095501ebfd00af1d016 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 30 Aug 2021 15:57:04 +0800 Subject: [PATCH 22/71] Use hosts instead of specific file paths for compatibility with more operating systems. --- documentation20/en/10.cluster/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/10.cluster/docs.md b/documentation20/en/10.cluster/docs.md index f65c683483..05d0a463aa 100644 --- a/documentation20/en/10.cluster/docs.md +++ b/documentation20/en/10.cluster/docs.md @@ -16,7 +16,7 @@ Please refer to the [video tutorial](https://www.taosdata.com/blog/2020/11/11/19 **Note 1:** Because the information of FQDN will be written into a file, if FQDN has not been configured or changed before, and TDengine has been started, be sure to clean up the previous data (`rm -rf /var/lib/taos/*`)on the premise of ensuring that the data is useless or backed up; -**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or modify `/etc/hosts` file. +**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or modify hosts file. **Step 2:** It is recommended to close the firewall of all physical nodes, and at least ensure that the TCP and UDP ports of ports 6030-6042 are open. It is **strongly recommended** to close the firewall first and configure the ports after the cluster is built; From 0d7d9f12932b790604b99f543a434989fca7d46b Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 30 Aug 2021 16:08:14 +0800 Subject: [PATCH 23/71] Improve expression slightly. --- documentation20/en/04.model/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/04.model/docs.md b/documentation20/en/04.model/docs.md index 28f1c7b791..08d952d317 100644 --- a/documentation20/en/04.model/docs.md +++ b/documentation20/en/04.model/docs.md @@ -26,7 +26,7 @@ Replace the database operating in the current connection with “power”, other - Any table or STable belongs to a database. Before creating a table, a database must be created first. - Tables in two different databases cannot be JOIN. -- You need to specify a timestamp when creating and inserting records and querying history records. +- You need to specify a timestamp when creating and inserting records, or querying history records. ## Create a STable From e639cac5224d6e6798747927e4fd63a87d53a4a6 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 30 Aug 2021 16:12:20 +0800 Subject: [PATCH 24/71] [TD-2639] : remove dead link in English doc. --- documentation20/en/03.architecture/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md index ce8dd6c8be..be3e711d9a 100644 --- a/documentation20/en/03.architecture/docs.md +++ b/documentation20/en/03.architecture/docs.md @@ -139,7 +139,7 @@ TDengine suggests using collection point ID as the table name (like D1001 in the ### STable: A Collection of Data Points in the Same Type -The method of one table for each point will bring a greatly increasing number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the [STable(Super Table)](https://www.taosdata.com/en/documentation/super-table) concept is introduced by TDengine. +The method of one table for each point will bring a greatly increasing number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the STable (Super Table) concept is introduced by TDengine. STable is an abstract collection for a type of data point. A STable contains a set of points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable (a combination of data collection points of a specific type), in addition to defining the table structure of the collected metrics, it is also necessary to define the schema of its tag. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established. From e119a533071470d74a6c0735df9a5fff8c7b1422 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 30 Aug 2021 16:12:30 +0800 Subject: [PATCH 25/71] [td-255] code refactor. --- src/query/src/qExecutor.c | 87 +++++++++------------------------------ 1 file changed, 19 insertions(+), 68 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 700cf17fd3..8fefed51c8 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6388,6 +6388,19 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) { return pInfo->binfo.pRes; } +static void doHandleRemainBlockForNewGroupImpl(SFillOperatorInfo *pInfo, SQueryRuntimeEnv* pRuntimeEnv, bool* newgroup) { + pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; + int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey; + taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start); + + taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey); + taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock); + + doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p); + pInfo->existNewGroupBlock = NULL; + *newgroup = true; +} + static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo *pInfo, SQueryRuntimeEnv *pRuntimeEnv, bool *newgroup) { if (taosFillHasMoreResults(pInfo->pFillInfo)) { *newgroup = false; @@ -6399,16 +6412,7 @@ static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo *pInfo, SQueryRunt // handle the cached new group data block if (pInfo->existNewGroupBlock) { - pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; - int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey; - taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start); - - taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey); - taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock); - - doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p); - pInfo->existNewGroupBlock = NULL; - *newgroup = true; + doHandleRemainBlockForNewGroupImpl(pInfo, pRuntimeEnv, newgroup); } } @@ -6427,26 +6431,6 @@ static SSDataBlock* doFill(void* param, bool* newgroup) { if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || (!pInfo->multigroupResult && pInfo->pRes->info.rows > 0)) { return pInfo->pRes; } -// if (taosFillHasMoreResults(pInfo->pFillInfo)) { -// *newgroup = false; -// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, (int32_t)pRuntimeEnv->resultInfo.capacity); -// return pInfo->pRes; -// } -// -// // handle the cached new group data block -// if (pInfo->existNewGroupBlock) { -// pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; -// int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey; -// taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start); -// -// taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey); -// taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock); -// -// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity); -// pInfo->existNewGroupBlock = NULL; -// *newgroup = true; -// return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; -// } while(1) { publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC); @@ -6493,46 +6477,13 @@ static SSDataBlock* doFill(void* param, bool* newgroup) { if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || pBlock == NULL) { return pInfo->pRes; } - -// if (taosFillHasMoreResults(pInfo->pFillInfo)) { -// *newgroup = false; -// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, (int32_t)pRuntimeEnv->resultInfo.capacity); -// return pInfo->pRes; -// } -// -// // handle the cached new group data block -// if (pInfo->existNewGroupBlock) { -// pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; -// int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey; -// taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start); -// -// taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey); -// taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock); -// -// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity); -// pInfo->existNewGroupBlock = NULL; -// *newgroup = true; -// -// if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold) { -// return pInfo->pRes; -// } -// -//// return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; -// } - } else if (pInfo->existNewGroupBlock) { // try next group - pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows; - int64_t ekey = pInfo->existNewGroupBlock->info.window.ekey; - taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start); + assert(pBlock != NULL); + doHandleRemainBlockForNewGroupImpl(pInfo, pRuntimeEnv, newgroup); - taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey); - taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock); - - doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p); - pInfo->existNewGroupBlock = NULL; - *newgroup = true; - - return (pInfo->pRes->info.rows > 0) ? pInfo->pRes : NULL; + if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold) { + return pInfo->pRes; + } } else { return NULL; } From 41542f5ea0cdc1b5de04538bf9e94c2463b8e238 Mon Sep 17 00:00:00 2001 From: Jimmy Dai <85916182+StonycatDAI@users.noreply.github.com> Date: Mon, 30 Aug 2021 16:43:35 +0800 Subject: [PATCH 26/71] Update docs.md modify and review the English docs --- documentation20/en/02.getting-started/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md index 7b7de202b7..eefb3052fc 100644 --- a/documentation20/en/02.getting-started/docs.md +++ b/documentation20/en/02.getting-started/docs.md @@ -2,7 +2,7 @@ ## Quick Install -TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). +TDengine software consists of 3 components: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). ### Install from Source From ea5c0128991884504035927da7331cb963395eb5 Mon Sep 17 00:00:00 2001 From: FandyWu <88819543+FandyWu@users.noreply.github.com> Date: Mon, 30 Aug 2021 16:59:15 +0800 Subject: [PATCH 27/71] Update docs.md --- documentation20/en/03.architecture/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md index be3e711d9a..ac6c94fe40 100644 --- a/documentation20/en/03.architecture/docs.md +++ b/documentation20/en/03.architecture/docs.md @@ -119,7 +119,7 @@ As the data points are a series of data points over time, the data points genera 9. in addition to storage and query operations, various statistical and real-time calculation operations are also required; 10. data volume is huge, a system may generate over 10 billion data points in a day. -By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency. +In light of the characteristics mentioned above, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency. ### Relational Database Model From a5fb19ec9f47abcfb5b380f6868fb9074321b5ea Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 30 Aug 2021 17:23:39 +0800 Subject: [PATCH 28/71] [TD-6424] : add English version of Docker guide. --- .../en/02.getting-started/01.docker/docs.md | 243 ++++++++++++++++++ documentation20/en/02.getting-started/docs.md | 4 +- 2 files changed, 246 insertions(+), 1 deletion(-) create mode 100644 documentation20/en/02.getting-started/01.docker/docs.md diff --git a/documentation20/en/02.getting-started/01.docker/docs.md b/documentation20/en/02.getting-started/01.docker/docs.md new file mode 100644 index 0000000000..daa89ef101 --- /dev/null +++ b/documentation20/en/02.getting-started/01.docker/docs.md @@ -0,0 +1,243 @@ +# Quickly experience TDengine through Docker + +While it is not recommended to deploy TDengine services via Docker in a production environment, Docker tools do a good job of shielding the environmental differences in the underlying operating system and are well suited for use in development testing or first-time experience with the toolset for installing and running TDengine. In particular, Docker makes it relatively easy to try TDengine on Mac OSX and Windows systems without having to install a virtual machine or rent an additional Linux server. In addition, starting from version 2.0.14.0, TDengine provides images that support both X86-64, X86, arm64, and arm32 platforms, so non-mainstream computers that can run docker, such as NAS, Raspberry Pi, and embedded development boards, can also easily experience TDengine based on this document. + +The following article explains how to quickly build a single-node TDengine runtime environment via Docker to support development and testing through a Step by Step style introduction. + +## Docker download + +The Docker tools themselves can be downloaded from [Docker official site](https://docs.docker.com/get-docker/). + +After installation, you can check the Docker version in the command line terminal. If the version number is output properly, the Docker environment has been installed successfully. + +```bash +$ docker -v +Docker version 20.10.3, build 48d30b5 +``` + +## Running TDengine in a Docker container + +1, Use the command to pull the TDengine image and make it run in the background. + +```bash +$ docker run -d --name tdengine tdengine/tdengine +7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292 +``` + +- **docker run**: Running a container via Docker +- **--name tdengine**: Set the container name, we can see the corresponding container by the container name +- **-d**: Keeping containers running in the background +- **tdengine/tdengine**: Pulled from the official TDengine published application image +- **7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292**: The long character returned is the container ID, and we can also view the corresponding container by its container ID + +2, Verify that the container is running correctly. + +```bash +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS ··· +c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ··· +``` + +- **docker ps**: Lists information about all containers that are in running state. +- **CONTAINER ID**: Container ID. +- **IMAGE**: The mirror used. +- **COMMAND**: The command to run when starting the container. +- **CREATED**: The time when the container was created. +- **STATUS**: The container status. Up means running. + +3, Go inside the Docker container and use TDengine. + +```bash +$ docker exec -it tdengine /bin/bash +root@c452519b0f9b:~/TDengine-server-2.0.20.13# +``` + +- **docker exec**: Enter the container via the docker exec command; if you exit, the container will not stop. +- **-i**: Enter the interactive mode. +- **-t**: Specify a terminal. +- **c452519b0f9b**: The container ID, which needs to be modified according to the value returned by the docker ps command. +- **/bin/bash**: Load the container and run bash to interact with it. + +4, After entering the container, execute the taos shell client program. + +```bash +$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos + +Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> +``` + +The TDengine terminal successfully connects to the server and prints out a welcome message and version information. If it fails, an error message is printed. + +In the TDengine terminal, you can create/delete databases, tables, super tables, etc., and perform insert and query operations via SQL commands. For details, you can refer to [TAOS SQL guide](https://www.taosdata.com/en/documentation/taos-sql). + +## Learn more about TDengine with taosdemo + +1, Following the above steps, exit the TDengine terminal program first. + +```bash +$ taos> q +root@c452519b0f9b:~/TDengine-server-2.0.20.13# +``` + +2, Execute taosdemo from the command line interface. + +```bash +root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo + +taosdemo is simulating data generated by power equipments monitoring... + +host: 127.0.0.1:6030 +user: root +password: taosdata +configDir: +resultFile: ./output.txt +thread num of insert data: 10 +thread num of create table: 10 +top insert interval: 0 +number of records per req: 30000 +max sql length: 1048576 +database count: 1 +database[0]: + database[0] name: test + drop: yes + replica: 1 + precision: ms + super table count: 1 + super table[0]: + stbName: meters + autoCreateTable: no + childTblExists: no + childTblCount: 10000 + childTblPrefix: d + dataSource: rand + iface: taosc + insertRows: 10000 + interlaceRows: 0 + disorderRange: 1000 + disorderRatio: 0 + maxSqlLen: 1048576 + timeStampStep: 1 + startTimestamp: 2017-07-14 10:40:00.000 + sampleFormat: + sampleFile: + tagsFile: + columnCount: 3 +column[0]:FLOAT column[1]:INT column[2]:FLOAT + tagCount: 2 + tag[0]:INT tag[1]:BINARY(16) + + Press enter key to continue or Ctrl-C to stop +``` + +After enter, this command will automatically create a super table meters under the database test, there are 10,000 tables under this super table, the table name is "d0" to "d9999", each table has 10,000 records, each record has four fields (ts, current, voltage, phase), the time stamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999", each table has a tag location and groupId, groupId is set from 1 to 10 and location is set to "beijing" or "shanghai". + +It takes about a few minutes to execute this command and ends up inserting a total of 100 million records. + +3, Go to the TDengine terminal and view the data generated by taosdemo. + +- **Go to the terminal interface.** + +```bash +$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos + +Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> +``` + +- **View the database.** + +```bash +$ taos> show databases; + name | created_time | ntables | vgroups | ··· + test | 2021-08-18 06:01:11.021 | 10000 | 6 | ··· + log | 2021-08-18 05:51:51.065 | 4 | 1 | ··· + +``` + +- **View Super Tables.** + +```bash +$ taos> use test; +Database changed. + +$ taos> show stables; + name | created_time | columns | tags | tables | +============================================================================================ + meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 | +Query OK, 1 row(s) in set (0.003259s) + +``` + +- **View the table and limit the output to 10 entries.** + +```bash +$ taos> select * from test.t0 limit 10; + +DB error: Table does not exist (0.002857s) +taos> select * from test.d0 limit 10; + ts | current | voltage | phase | +====================================================================================== + 2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 | + 2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 | + 2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 | + 2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 | + 2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 | + 2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 | + 2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 | + 2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 | + 2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 | + 2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 | +Query OK, 10 row(s) in set (0.016791s) + +``` + +- **View the tag values for the d0 table.** + +```bash +$ taos> select groupid, location from test.d0; + groupid | location | +================================= + 0 | shanghai | +Query OK, 1 row(s) in set (0.003490s) + +``` + +## Stop the TDengine service that is running in Docker + +```bash +$ docker stop tdengine +tdengine +``` + +- **docker stop**: Stop the specified running docker image with docker stop. +- **tdengine**: The name of the container. + +## TDengine connected in Docker during programming development + +There are two ideas for connecting from outside of Docker to use TDengine services running inside a Docker container: + +1, By port mapping (-p), the open network port inside the container is mapped to the specified port of the host. By mounting the local directory (-v), you can synchronize the data inside the host and the container to prevent data loss after the container is deleted. + +```bash +$ docker run -d -v /etc/taos:/etc/taos -P 6041:6041 tdengine/tdengine +526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd + +$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} +``` + +- The first command starts a docker container with TDengine running and maps the 6041 port of the container to port 6041 of the host. +- The second command, accessing TDengine through the RESTful interface, connects to port 6041 on the local machine, so the connection is successful. + +Note: In this example, for convenience reasons, only port 6041 is mapped, which is required for RESTful. If you wish to connect to the TDengine service in a non-RESTful manner, you will need to map a total of 11 ports starting at 6030. In the example, mounting the local directory also only deals with the /etc/taos directory where the configuration files are located, but not the data storage directory. + +2, Go directly to the docker container to do development via the exec command. That is, put the program code in the same Docker container where the TDengine server is located and connect to the TDengine service local to the container. + +```bash +$ docker exec -it tdengine /bin/bash +``` diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md index 7b7de202b7..f506ece34a 100644 --- a/documentation20/en/02.getting-started/docs.md +++ b/documentation20/en/02.getting-started/docs.md @@ -10,7 +10,9 @@ Please visit our [TDengine github page](https://github.com/taosdata/TDengine) fo ### Install from Docker Container -Please visit our [TDengine Official Docker Image: Distribution, Downloading, and Usage](https://www.taosdata.com/blog/2020/05/13/1509.html). +For the time being, it is not recommended to use Docker to deploy the client or server side of TDengine in production environments, but it is convenient to use Docker to deploy in development environments or when trying it for the first time. In particular, with Docker, it is easy to try TDengine in Mac OS X and Windows environments. + +Please refer to the detailed operation in [Quickly experience TDengine through Docker](https://www.taosdata.com/en/documentation/getting-started/docker). ### Install from Package From 4a5d0f205fde8f9ee0146b652e34ab13e234f813 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 30 Aug 2021 18:30:03 +0800 Subject: [PATCH 29/71] [ci skip]update jenkinsfile --- Jenkinsfile | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 6109e4811a..e9ea8bafd3 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -256,13 +256,11 @@ pipeline { steps { pre_test() - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - timeout(time: 60, unit: 'MINUTES'){ - sh ''' - cd ${WKC}/tests/pytest - ./crash_gen.sh -a -p -t 4 -s 2000 - ''' - } + timeout(time: 60, unit: 'MINUTES'){ + sh ''' + cd ${WKC}/tests/pytest + ./crash_gen.sh -a -p -t 4 -s 2000 + ''' } timeout(time: 60, unit: 'MINUTES'){ // sh ''' From 061fe86a537beae5ab46a8cf21bf82ea44a546e7 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 30 Aug 2021 18:51:31 +0800 Subject: [PATCH 30/71] Feature/sangshuduo/td 5875 taosdemo ue improve (#7703) * [TD-5875]: taosdemo show progress * empty commit for CI * better msg for create child table. * fix total child tables bug. * fix INF rps if spent 0 ms to insert. --- src/kit/taosdemo/taosdemo.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 05bfb25026..70ccbe0924 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -8398,7 +8398,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, free(stmtBuffer); - int64_t start = taosGetTimestampMs(); + int64_t start = taosGetTimestampUs(); for (int i = 0; i < threads; i++) { pthread_join(pids[i], NULL); @@ -8442,18 +8442,18 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (cntDelay == 0) cntDelay = 1; avgDelay = (double)totalDelay / cntDelay; - int64_t end = taosGetTimestampMs(); + int64_t end = taosGetTimestampUs(); int64_t t = end - start; + if (0 == t) t = 1; - double tInMs = (double) t / 1000.0; + double tInMs = (double) t / 1000000.0; if (stbInfo) { fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", tInMs, stbInfo->totalInsertRows, stbInfo->totalAffectedRows, threads, db_name, stbInfo->sTblName, - (double) tInMs? - (double)(stbInfo->totalInsertRows/tInMs):FLT_MAX); + (double)(stbInfo->totalInsertRows/tInMs)); if (g_fpOfInsertResult) { fprintf(g_fpOfInsertResult, @@ -8461,24 +8461,21 @@ static void startMultiThreadInsertData(int threads, char* db_name, tInMs, stbInfo->totalInsertRows, stbInfo->totalAffectedRows, threads, db_name, stbInfo->sTblName, - (tInMs)? - (double)(stbInfo->totalInsertRows/tInMs):FLT_MAX); + (double)(stbInfo->totalInsertRows/tInMs)); } } else { fprintf(stderr, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", tInMs, g_args.totalInsertRows, g_args.totalAffectedRows, threads, db_name, - (tInMs)? - (double)(g_args.totalInsertRows/tInMs):FLT_MAX); + (double)(g_args.totalInsertRows/tInMs)); if (g_fpOfInsertResult) { fprintf(g_fpOfInsertResult, "Spent %.4f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", tInMs, g_args.totalInsertRows, g_args.totalAffectedRows, threads, db_name, - (tInMs)? - (double)(g_args.totalInsertRows/tInMs):FLT_MAX); + (double)(g_args.totalInsertRows/tInMs)); } } From 570c8e5b1ec0e4054077722674b43a770e4c6f4a Mon Sep 17 00:00:00 2001 From: ZhangLianlei Date: Mon, 30 Aug 2021 22:28:54 +0800 Subject: [PATCH 31/71] [TD-7685]: add the missing connector which is also supported to the document. --- documentation20/cn/05.insert/docs.md | 2 +- documentation20/en/05.insert/docs.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md index ce2d65e7d2..52e6d706fc 100644 --- a/documentation20/cn/05.insert/docs.md +++ b/documentation20/cn/05.insert/docs.md @@ -4,7 +4,7 @@ TDengine支持多种接口写入数据,包括SQL, Prometheus, Telegraf, EMQ MQ ## SQL写入 -应用通过C/C++, JDBC, GO, 或Python Connector 执行SQL insert语句来插入数据,用户还可以通过TAOS Shell,手动输入SQL insert语句插入数据。比如下面这条insert 就将一条记录写入到表d1001中: +应用通过C/C++, JDBC, GO, C#, 或Python Connector 执行SQL insert语句来插入数据,用户还可以通过TAOS Shell,手动输入SQL insert语句插入数据。比如下面这条insert 就将一条记录写入到表d1001中: ```mysql INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); ``` diff --git a/documentation20/en/05.insert/docs.md b/documentation20/en/05.insert/docs.md index 88746ea608..6690babf77 100644 --- a/documentation20/en/05.insert/docs.md +++ b/documentation20/en/05.insert/docs.md @@ -4,7 +4,7 @@ TDengine supports multiple interfaces to write data, including SQL, Prometheus, ## SQL Writing -Applications insert data by executing SQL insert statements through C/C++, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001: +Applications insert data by executing SQL insert statements through C/C++, JDBC, GO, C#, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001: ```mysql INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); From d31c5f1dc86d0c3ac827165d83a3886bd170bcdd Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Mon, 30 Aug 2021 23:02:03 +0800 Subject: [PATCH 32/71] [TD-6449]: prefer english only in method documentations in python connector [ci skip] (#7711) --- src/connector/python/taos/cinterface.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index 42dac3c2e8..a1b6fe312b 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -102,9 +102,7 @@ _libtaos.taos_get_client_info.restype = c_char_p def taos_get_client_info(): # type: () -> str - """Get client version info. - 获取客户端版本信息。 - """ + """Get client version info.""" return _libtaos.taos_get_client_info().decode() @@ -114,6 +112,7 @@ _libtaos.taos_get_server_info.argtypes = (c_void_p,) def taos_get_server_info(connection): # type: (c_void_p) -> str + """Get server version as string.""" return _libtaos.taos_get_server_info(connection).decode() @@ -134,11 +133,10 @@ _libtaos.taos_connect.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_uint1 def taos_connect(host=None, user="root", password="taosdata", db=None, port=0): # type: (None|str, str, str, None|str, int) -> c_void_p """Create TDengine database connection. - 创建数据库连接,初始化连接上下文。其中需要用户提供的参数包含: - - host: server hostname/FQDN, TDengine管理主节点的FQDN - - user: user name/用户名 - - password: user password / 用户密码 + - host: server hostname/FQDN + - user: user name + - password: user password - db: database name (optional) - port: server port @@ -187,11 +185,10 @@ _libtaos.taos_connect_auth.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_ def taos_connect_auth(host=None, user="root", auth="", db=None, port=0): # type: (None|str, str, str, None|str, int) -> c_void_p - """ - 创建数据库连接,初始化连接上下文。其中需要用户提供的参数包含: + """Connect server with auth token. - - host: server hostname/FQDN, TDengine管理主节点的FQDN - - user: user name/用户名 + - host: server hostname/FQDN + - user: user name - auth: base64 encoded auth token - db: database name (optional) - port: server port From 2483985d4b81e888bff0a1aa7b2f4987e5e3c4f1 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Mon, 30 Aug 2021 23:04:52 +0800 Subject: [PATCH 33/71] [TD-6449]: fix python3.6 datetime parsing in pytest util package (#7704) --- tests/pytest/util/sql.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index dfe1e4a582..2b654a3793 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -21,7 +21,15 @@ import shutil import pandas as pd from util.log import * - +def _parse_datetime(timestr): + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f') + except ValueError: + pass + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S') + except ValueError: + pass class TDSql: def __init__(self): @@ -181,7 +189,7 @@ class TDSql: tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" % (self.sql, row, col, self.queryResult[row][col], data)) else: - if self.queryResult[row][col] == datetime.datetime.fromisoformat(data): + if self.queryResult[row][col] == _parse_datetime(data): tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (self.sql, row, col, self.queryResult[row][col], data)) return From 56776b62fe09a0cf7a46a20354b9fc056ab7ab3e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 31 Aug 2021 00:24:40 +0800 Subject: [PATCH 34/71] [TD-6448]: taosdemo stmt rand race. (#7719) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 173 +++++++++++++++--------------------- 1 file changed, 71 insertions(+), 102 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 70ccbe0924..29443f4fd4 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -291,7 +291,6 @@ typedef struct SSuperTable_S { uint64_t lenOfTagOfOneRow; char* sampleDataBuf; - char* sampleBindArray; //int sampleRowCount; //int sampleUsePos; @@ -438,7 +437,8 @@ typedef struct SQueryMetaInfo_S { typedef struct SThreadInfo_S { TAOS * taos; TAOS_STMT *stmt; - int64_t *bind_ts; + char* sampleBindArray; + int64_t *bind_ts; int threadID; char db_name[TSDB_DB_NAME_LEN]; uint32_t time_precision; @@ -5738,20 +5738,6 @@ static void postFreeResource() { free(g_Dbs.db[i].superTbls[j].sampleDataBuf); g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; } - if (g_Dbs.db[i].superTbls[j].sampleBindArray) { - for (int k = 0; k < MAX_SAMPLES_ONCE_FROM_FILE; k++) { - uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)( - g_Dbs.db[i].superTbls[j].sampleBindArray - + sizeof(uintptr_t *) * k)); - for (int c = 1; c < g_Dbs.db[i].superTbls[j].columnCount + 1; c++) { - TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c)); - if (bind) - tmfree(bind->buffer); - } - tmfree((char *)tmp); - } - } - tmfree((char *)g_Dbs.db[i].superTbls[j].sampleBindArray); if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { free(g_Dbs.db[i].superTbls[j].tagDataBuf); @@ -6085,9 +6071,6 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) int32_t affectedRows; SSuperTable* stbInfo = pThreadInfo->stbInfo; - verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, - __func__, __LINE__, pThreadInfo->buffer); - uint16_t iface; if (stbInfo) iface = stbInfo->iface; @@ -6105,12 +6088,18 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) switch(iface) { case TAOSC_IFACE: + verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, + __func__, __LINE__, pThreadInfo->buffer); + affectedRows = queryDbExec( pThreadInfo->taos, pThreadInfo->buffer, INSERT_TYPE, false); break; case REST_IFACE: + verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, + __func__, __LINE__, pThreadInfo->buffer); + if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port, pThreadInfo->buffer, pThreadInfo)) { affectedRows = -1; @@ -7088,12 +7077,12 @@ static int32_t prepareStbStmtBindRand( return 0; } -static int32_t prepareStbStmtBindWithSample( +static int32_t prepareStbStmtBindStartTime( + char *tableName, int64_t *ts, char *bindArray, SSuperTable *stbInfo, int64_t startTime, int32_t recSeq, - int32_t timePrec, - int64_t samplePos) + int32_t timePrec) { TAOS_BIND *bind; @@ -7110,6 +7099,10 @@ static int32_t prepareStbStmtBindWithSample( } else { *bind_ts = startTime + stbInfo->timeStampStep * recSeq; } + + verbosePrint("%s() LN%d, tableName: %s, bind_ts=%"PRId64"\n", + __func__, __LINE__, tableName, *bind_ts); + bind->buffer_length = sizeof(int64_t); bind->buffer = bind_ts; bind->length = &bind->buffer_length; @@ -7118,7 +7111,7 @@ static int32_t prepareStbStmtBindWithSample( return 0; } -static int32_t prepareStbStmtRand( +UNUSED_FUNC static int32_t prepareStbStmtRand( threadInfo *pThreadInfo, char *tableName, int64_t tableSeq, @@ -7299,14 +7292,14 @@ static int32_t prepareStbStmtWithSample( uint32_t k; for (k = 0; k < batch;) { char *bindArray = (char *)(*((uintptr_t *) - (stbInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos)))); + (pThreadInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos)))); /* columnCount + 1 (ts) */ - if (-1 == prepareStbStmtBindWithSample( + if (-1 == prepareStbStmtBindStartTime( + tableName, pThreadInfo->bind_ts, bindArray, stbInfo, startTime, k, - pThreadInfo->time_precision, - *pSamplePos + pThreadInfo->time_precision /* is column */)) { return -1; } @@ -7427,8 +7420,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int64_t nTimeStampStep; uint64_t insert_interval; - bool sourceRand; - SSuperTable* stbInfo = pThreadInfo->stbInfo; if (stbInfo) { @@ -7443,18 +7434,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { maxSqlLen = stbInfo->maxSqlLen; nTimeStampStep = stbInfo->timeStampStep; insert_interval = stbInfo->insertInterval; - if (0 == strncasecmp(stbInfo->dataSource, "rand", 4)) { - sourceRand = true; - } else { - sourceRand = false; // from sample data file - } } else { insertRows = g_args.num_of_DPT; interlaceRows = g_args.interlace_rows; maxSqlLen = g_args.max_sql_len; nTimeStampStep = g_args.timestamp_step; insert_interval = g_args.insert_interval; - sourceRand = true; } debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", @@ -7539,25 +7524,14 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int32_t generated; if (stbInfo) { if (stbInfo->iface == STMT_IFACE) { - if (sourceRand) { - generated = prepareStbStmtRand( - pThreadInfo, - tableName, - tableSeq, - batchPerTbl, - insertRows, 0, - startTime - ); - } else { - generated = prepareStbStmtWithSample( - pThreadInfo, - tableName, - tableSeq, - batchPerTbl, - insertRows, 0, - startTime, - &(pThreadInfo->samplePos)); - } + generated = prepareStbStmtWithSample( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime, + &(pThreadInfo->samplePos)); } else { generated = generateStbInterlaceData( pThreadInfo, @@ -7747,17 +7721,6 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->totalInsertRows = 0; pThreadInfo->totalAffectedRows = 0; - bool sourceRand; - if (stbInfo) { - if (0 == strncasecmp(stbInfo->dataSource, "rand", 4)) { - sourceRand = true; - } else { - sourceRand = false; // from sample data file - } - } else { - sourceRand = true; - } - pThreadInfo->samplePos = 0; int percentComplete = 0; @@ -7796,32 +7759,13 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { int32_t generated; if (stbInfo) { if (stbInfo->iface == STMT_IFACE) { - if (sourceRand) { -/* generated = prepareStbStmtRand( - pThreadInfo, - tableName, - tableSeq, - g_args.num_of_RPR, - insertRows, - i, start_time - ); - */ - generated = prepareStbStmtWithSample( - pThreadInfo, - tableName, - tableSeq, - g_args.num_of_RPR, - insertRows, i, start_time, - &(pThreadInfo->samplePos)); - } else { - generated = prepareStbStmtWithSample( - pThreadInfo, - tableName, - tableSeq, - g_args.num_of_RPR, - insertRows, i, start_time, - &(pThreadInfo->samplePos)); - } + generated = prepareStbStmtWithSample( + pThreadInfo, + tableName, + tableSeq, + g_args.num_of_RPR, + insertRows, i, start_time, + &(pThreadInfo->samplePos)); } else { generated = generateStbProgressiveData( stbInfo, @@ -7849,6 +7793,11 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { &remainderBufLen); } } + + verbosePrint("[%d] %s() LN%d generated=%d\n", + pThreadInfo->threadID, + __func__, __LINE__, generated); + if (generated > 0) i += generated; else @@ -8059,17 +8008,22 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in * return 0; } -static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) +static int parseSampleFileToStmt( + threadInfo *pThreadInfo, + SSuperTable *stbInfo, uint32_t timePrec) { - stbInfo->sampleBindArray = calloc(1, sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); - if (stbInfo->sampleBindArray == NULL) { + pThreadInfo->sampleBindArray = + calloc(1, sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); + if (pThreadInfo->sampleBindArray == NULL) { errorPrint2("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", - __func__, __LINE__, (uint64_t)sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); + __func__, __LINE__, + (uint64_t)sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); return -1; } for (int i=0; i < MAX_SAMPLES_ONCE_FROM_FILE; i++) { - char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); + char *bindArray = + calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); if (bindArray == NULL) { errorPrint2("%s() LN%d, Failed to allocate %d bind params\n", __func__, __LINE__, (stbInfo->columnCount + 1)); @@ -8122,7 +8076,8 @@ static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) free(bindBuffer); } } - *((uintptr_t *)(stbInfo->sampleBindArray + (sizeof(char *)) * i)) = (uintptr_t)bindArray; + *((uintptr_t *)(pThreadInfo->sampleBindArray + (sizeof(char *)) * i)) = + (uintptr_t)bindArray; } return 0; @@ -8312,10 +8267,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, pstr += sprintf(pstr, ")"); debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer); - - if (stbInfo) { - parseSampleFileToStmt(stbInfo, timePrec); - } } for (int i = 0; i < threads; i++) { @@ -8348,7 +8299,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, || ((stbInfo) && (stbInfo->iface == STMT_IFACE))) { - pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos); if (NULL == pThreadInfo->stmt) { free(pids); @@ -8370,6 +8320,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(EXIT_FAILURE); } pThreadInfo->bind_ts = malloc(sizeof(int64_t)); + + if (stbInfo) { + parseSampleFileToStmt(pThreadInfo, stbInfo, timePrec); + } } } else { pThreadInfo->taos = NULL; @@ -8420,6 +8374,21 @@ static void startMultiThreadInsertData(int threads, char* db_name, tsem_destroy(&(pThreadInfo->lock_sem)); taos_close(pThreadInfo->taos); + if (pThreadInfo->sampleBindArray) { + for (int k = 0; k < MAX_SAMPLES_ONCE_FROM_FILE; k++) { + uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)( + pThreadInfo->sampleBindArray + + sizeof(uintptr_t *) * k)); + for (int c = 1; c < pThreadInfo->stbInfo->columnCount + 1; c++) { + TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c)); + if (bind) + tmfree(bind->buffer); + } + tmfree((char *)tmp); + } + tmfree(pThreadInfo->sampleBindArray); + } + debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n", __func__, __LINE__, pThreadInfo->threadID, pThreadInfo->totalInsertRows, From 8f32f23de9c73b9106eda6780503b164aabd4064 Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Tue, 31 Aug 2021 09:29:52 +0800 Subject: [PATCH 35/71] [TD-6432]: fix batchFetch BufferUnderflowException (#7675) --- src/client/src/TSDBJNIConnector.c | 202 +++++++++++------- .../taosdata/jdbc/TSDBResultSetBlockData.java | 10 +- 2 files changed, 127 insertions(+), 85 deletions(-) diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 7ba613de88..506c8d64b9 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -20,12 +20,42 @@ #include "com_taosdata_jdbc_TSDBJNIConnector.h" -#define jniFatal(...) { if (jniDebugFlag & DEBUG_FATAL) { taosPrintLog("JNI FATAL ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); }} -#define jniError(...) { if (jniDebugFlag & DEBUG_ERROR) { taosPrintLog("JNI ERROR ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); }} -#define jniWarn(...) { if (jniDebugFlag & DEBUG_WARN) { taosPrintLog("JNI WARN ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); }} -#define jniInfo(...) { if (jniDebugFlag & DEBUG_INFO) { taosPrintLog("JNI ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); }} -#define jniDebug(...) { if (jniDebugFlag & DEBUG_DEBUG) { taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); }} -#define jniTrace(...) { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); }} +#define jniFatal(...) \ + { \ + if (jniDebugFlag & DEBUG_FATAL) { \ + taosPrintLog("JNI FATAL ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \ + } \ + } +#define jniError(...) \ + { \ + if (jniDebugFlag & DEBUG_ERROR) { \ + taosPrintLog("JNI ERROR ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \ + } \ + } +#define jniWarn(...) \ + { \ + if (jniDebugFlag & DEBUG_WARN) { \ + taosPrintLog("JNI WARN ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \ + } \ + } +#define jniInfo(...) \ + { \ + if (jniDebugFlag & DEBUG_INFO) { \ + taosPrintLog("JNI ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \ + } \ + } +#define jniDebug(...) \ + { \ + if (jniDebugFlag & DEBUG_DEBUG) { \ + taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); \ + } \ + } +#define jniTrace(...) \ + { \ + if (jniDebugFlag & DEBUG_TRACE) { \ + taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); \ + } \ + } int __init = 0; @@ -60,14 +90,14 @@ jmethodID g_blockdataSetByteArrayFp; jmethodID g_blockdataSetNumOfRowsFp; jmethodID g_blockdataSetNumOfColsFp; -#define JNI_SUCCESS 0 -#define JNI_TDENGINE_ERROR -1 +#define JNI_SUCCESS 0 +#define JNI_TDENGINE_ERROR -1 #define JNI_CONNECTION_NULL -2 #define JNI_RESULT_SET_NULL -3 #define JNI_NUM_OF_FIELDS_0 -4 -#define JNI_SQL_NULL -5 -#define JNI_FETCH_END -6 -#define JNI_OUT_OF_MEMORY -7 +#define JNI_SQL_NULL -5 +#define JNI_FETCH_END -6 +#define JNI_OUT_OF_MEMORY -7 static void jniGetGlobalMethod(JNIEnv *env) { // make sure init function executed once @@ -129,13 +159,13 @@ static void jniGetGlobalMethod(JNIEnv *env) { } static int32_t check_for_params(jobject jobj, jlong conn, jlong res) { - if ((TAOS*) conn == NULL) { + if ((TAOS *)conn == NULL) { jniError("jobj:%p, connection is closed", jobj); return JNI_CONNECTION_NULL; } - if ((TAOS_RES *) res == NULL) { - jniError("jobj:%p, conn:%p, res is null", jobj, (TAOS*) conn); + if ((TAOS_RES *)res == NULL) { + jniError("jobj:%p, conn:%p, res is null", jobj, (TAOS *)conn); return JNI_RESULT_SET_NULL; } @@ -216,7 +246,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEnv *env, jobject jobj, jstring jhost, jint jport, jstring jdbName, jstring juser, jstring jpass) { - jlong ret = 0; + jlong ret = 0; const char *host = NULL; const char *user = NULL; const char *pass = NULL; @@ -246,7 +276,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEn jniDebug("jobj:%p, pass not specified, use default password", jobj); } - ret = (jlong) taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport); + ret = (jlong)taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport); if (ret == 0) { jniError("jobj:%p, conn:%p, connect to database failed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret, (char *)host, (char *)user, (char *)dbname, (int32_t)jport); @@ -289,7 +319,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp( jsize len = (*env)->GetArrayLength(env, jsql); - char *str = (char *) calloc(1, sizeof(char) * (len + 1)); + char *str = (char *)calloc(1, sizeof(char) * (len + 1)); if (str == NULL) { jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon); return JNI_OUT_OF_MEMORY; @@ -315,16 +345,17 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp( } free(str); - return (jlong) pSql; + return (jlong)pSql; } -JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp(JNIEnv *env, jobject jobj, jlong con, jlong tres) { +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp(JNIEnv *env, jobject jobj, jlong con, + jlong tres) { int32_t code = check_for_params(jobj, con, tres); if (code != JNI_SUCCESS) { return code; } - return (jint)taos_errno((TAOS_RES*) tres); + return (jint)taos_errno((TAOS_RES *)tres); } JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(JNIEnv *env, jobject jobj, jlong tres) { @@ -334,7 +365,7 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(J JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(JNIEnv *env, jobject jobj, jlong con, jlong tres) { - TAOS *tscon = (TAOS *)con; + TAOS * tscon = (TAOS *)con; int32_t code = check_for_params(jobj, con, tres); if (code != JNI_SUCCESS) { return code; @@ -359,7 +390,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp( SSqlObj *pSql = (TAOS_RES *)tres; - return (tscIsUpdateQuery(pSql)? 1:0); + return (tscIsUpdateQuery(pSql) ? 1 : 0); } JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp(JNIEnv *env, jobject jobj, jlong con, @@ -370,21 +401,22 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp( } taos_free_result((void *)res); - jniDebug("jobj:%p, conn:%p, free resultset:%p", jobj, (TAOS*) con, (void *)res); + jniDebug("jobj:%p, conn:%p, free resultset:%p", jobj, (TAOS *)con, (void *)res); return JNI_SUCCESS; } JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsImp(JNIEnv *env, jobject jobj, jlong con, jlong res) { - TAOS *tscon = (TAOS *)con; + TAOS * tscon = (TAOS *)con; int32_t code = check_for_params(jobj, con, res); if (code != JNI_SUCCESS) { return code; } jint ret = taos_affected_rows((SSqlObj *)res); - jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (TAOS *)con, (TAOS_RES *)res, (int32_t)ret); + jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (TAOS *)con, (TAOS_RES *)res, + (int32_t)ret); return ret; } @@ -392,13 +424,13 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsIm JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaDataImp(JNIEnv *env, jobject jobj, jlong con, jlong res, jobject arrayListObj) { - TAOS *tscon = (TAOS *)con; + TAOS * tscon = (TAOS *)con; int32_t code = check_for_params(jobj, con, res); if (code != JNI_SUCCESS) { return code; } - TAOS_RES* tres = (TAOS_RES*) res; + TAOS_RES * tres = (TAOS_RES *)res; TAOS_FIELD *fields = taos_fetch_fields(tres); int32_t num_fields = taos_num_fields(tres); @@ -452,7 +484,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn int32_t numOfFields = taos_num_fields(result); if (numOfFields == 0) { - jniError("jobj:%p, conn:%p, resultset:%p, fields size %d", jobj, tscon, (void*)res, numOfFields); + jniError("jobj:%p, conn:%p, resultset:%p, fields size %d", jobj, tscon, (void *)res, numOfFields); return JNI_NUM_OF_FIELDS_0; } @@ -460,7 +492,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn if (row == NULL) { int code = taos_errno(result); if (code == TSDB_CODE_SUCCESS) { - jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void*)res, numOfFields); + jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void *)res, + numOfFields); return JNI_FETCH_END; } else { jniDebug("jobj:%p, conn:%p, interrupted query", jobj, tscon); @@ -468,7 +501,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn } } - int32_t* length = taos_fetch_lengths(result); + int32_t *length = taos_fetch_lengths(result); char tmp[TSDB_MAX_BYTES_PER_ROW] = {0}; @@ -533,7 +566,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn } JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNIEnv *env, jobject jobj, jlong con, - jlong res, jobject rowobj) { + jlong res, jobject rowobj) { TAOS * tscon = (TAOS *)con; int32_t code = check_for_params(jobj, con, res); if (code != JNI_SUCCESS) { @@ -564,8 +597,13 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNI (*env)->CallVoidMethod(env, rowobj, g_blockdataSetNumOfColsFp, (jint)numOfFields); for (int i = 0; i < numOfFields; i++) { - (*env)->CallVoidMethod(env, rowobj, g_blockdataSetByteArrayFp, i, fields[i].bytes * numOfRows, - jniFromNCharToByteArray(env, (char *)row[i], fields[i].bytes * numOfRows)); + int bytes = fields[i].bytes; + + if (fields[i].type == TSDB_DATA_TYPE_BINARY || fields[i].type == TSDB_DATA_TYPE_NCHAR) { + bytes += 2; + } + (*env)->CallVoidMethod(env, rowobj, g_blockdataSetByteArrayFp, i, bytes * numOfRows, + jniFromNCharToByteArray(env, (char *)row[i], bytes * numOfRows)); } return JNI_SUCCESS; @@ -585,7 +623,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm } JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jlong con, - jboolean restart, jstring jtopic, jstring jsql, jint jinterval) { + jboolean restart, jstring jtopic, + jstring jsql, jint jinterval) { jlong sub = 0; TAOS *taos = (TAOS *)con; char *topic = NULL; @@ -682,8 +721,8 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset(J * @param res the TAOS_RES object, i.e. the SSqlObject * @return precision 0:ms 1:us 2:ns */ -JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultTimePrecisionImp(JNIEnv *env, jobject jobj, jlong con, - jlong res) { +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultTimePrecisionImp(JNIEnv *env, jobject jobj, + jlong con, jlong res) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { jniError("jobj:%p, connection is closed", jobj); @@ -699,7 +738,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultTimePrec return taos_result_precision(result); } -JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(JNIEnv *env, jobject jobj, jbyteArray jsql, jlong con) { +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(JNIEnv *env, jobject jobj, + jbyteArray jsql, jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { jniError("jobj:%p, connection already closed", jobj); @@ -713,7 +753,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(J jsize len = (*env)->GetArrayLength(env, jsql); - char *str = (char *) calloc(1, sizeof(char) * (len + 1)); + char *str = (char *)calloc(1, sizeof(char) * (len + 1)); if (str == NULL) { jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon); return JNI_OUT_OF_MEMORY; @@ -724,25 +764,27 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(J // todo handle error } - TAOS_STMT* pStmt = taos_stmt_init(tscon); - int32_t code = taos_stmt_prepare(pStmt, str, len); + TAOS_STMT *pStmt = taos_stmt_init(tscon); + int32_t code = taos_stmt_prepare(pStmt, str, len); tfree(str); if (code != TSDB_CODE_SUCCESS) { jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); return JNI_TDENGINE_ERROR; } - return (jlong) pStmt; + return (jlong)pStmt; } -JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp(JNIEnv *env, jobject jobj, jlong stmt, jstring jname, jlong conn) { +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameImp(JNIEnv *env, jobject jobj, + jlong stmt, jstring jname, + jlong conn) { TAOS *tsconn = (TAOS *)conn; if (tsconn == NULL) { jniError("jobj:%p, connection already closed", jobj); return JNI_CONNECTION_NULL; } - TAOS_STMT* pStmt = (TAOS_STMT*) stmt; + TAOS_STMT *pStmt = (TAOS_STMT *)stmt; if (pStmt == NULL) { jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn); return JNI_SQL_NULL; @@ -750,7 +792,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI const char *name = (*env)->GetStringUTFChars(env, jname, NULL); - int32_t code = taos_stmt_set_tbname((void*)stmt, name); + int32_t code = taos_stmt_set_tbname((void *)stmt, name); if (code != TSDB_CODE_SUCCESS) { (*env)->ReleaseStringUTFChars(env, jname, name); @@ -763,8 +805,9 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI return JNI_SUCCESS; } -JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(JNIEnv *env, jobject jobj, jlong stmt, - jbyteArray colDataList, jbyteArray lengthList, jbyteArray nullList, jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) { +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp( + JNIEnv *env, jobject jobj, jlong stmt, jbyteArray colDataList, jbyteArray lengthList, jbyteArray nullList, + jint dataType, jint dataBytes, jint numOfRows, jint colIndex, jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { jniError("jobj:%p, connection already closed", jobj); @@ -798,14 +841,14 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(J } // bind multi-rows with only one invoke. - TAOS_MULTI_BIND* b = calloc(1, sizeof(TAOS_MULTI_BIND)); + TAOS_MULTI_BIND *b = calloc(1, sizeof(TAOS_MULTI_BIND)); - b->num = numOfRows; - b->buffer_type = dataType; // todo check data type - b->buffer_length = IS_VAR_DATA_TYPE(dataType)? dataBytes:tDataTypes[dataType].bytes; - b->is_null = nullArray; - b->buffer = colBuf; - b->length = (int32_t*)lengthArray; + b->num = numOfRows; + b->buffer_type = dataType; // todo check data type + b->buffer_length = IS_VAR_DATA_TYPE(dataType) ? dataBytes : tDataTypes[dataType].bytes; + b->is_null = nullArray; + b->buffer = colBuf; + b->length = (int32_t *)lengthArray; // set the length and is_null array if (!IS_VAR_DATA_TYPE(dataType)) { @@ -829,14 +872,15 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp(J return JNI_SUCCESS; } -JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, jlong con) { +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(JNIEnv *env, jobject jobj, jlong stmt, + jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { jniError("jobj:%p, connection already closed", jobj); return JNI_CONNECTION_NULL; } - TAOS_STMT *pStmt = (TAOS_STMT*) stmt; + TAOS_STMT *pStmt = (TAOS_STMT *)stmt; if (pStmt == NULL) { jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon); return JNI_SQL_NULL; @@ -853,14 +897,15 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(J return JNI_SUCCESS; } -JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, jlong con) { +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv *env, jobject jobj, jlong stmt, + jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { jniError("jobj:%p, connection already closed", jobj); return JNI_CONNECTION_NULL; } - TAOS_STMT *pStmt = (TAOS_STMT*) stmt; + TAOS_STMT *pStmt = (TAOS_STMT *)stmt; if (pStmt == NULL) { jniError("jobj:%p, conn:%p, invalid stmt", jobj, tscon); return JNI_SQL_NULL; @@ -876,15 +921,16 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv return JNI_SUCCESS; } -JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp(JNIEnv *env, jobject jobj, - jlong stmt, jstring tableName, jint numOfTags, jbyteArray tags, jbyteArray typeList, jbyteArray lengthList, jbyteArray nullList, jlong conn) { +JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp( + JNIEnv *env, jobject jobj, jlong stmt, jstring tableName, jint numOfTags, jbyteArray tags, jbyteArray typeList, + jbyteArray lengthList, jbyteArray nullList, jlong conn) { TAOS *tsconn = (TAOS *)conn; if (tsconn == NULL) { jniError("jobj:%p, connection already closed", jobj); return JNI_CONNECTION_NULL; } - TAOS_STMT* pStmt = (TAOS_STMT*) stmt; + TAOS_STMT *pStmt = (TAOS_STMT *)stmt; if (pStmt == NULL) { jniError("jobj:%p, conn:%p, invalid stmt handle", jobj, tsconn); return JNI_SQL_NULL; @@ -898,39 +944,39 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI } len = (*env)->GetArrayLength(env, lengthList); - int64_t *lengthArray = (int64_t*) calloc(1, len); - (*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte*) lengthArray); + int64_t *lengthArray = (int64_t *)calloc(1, len); + (*env)->GetByteArrayRegion(env, lengthList, 0, len, (jbyte *)lengthArray); if ((*env)->ExceptionCheck(env)) { } len = (*env)->GetArrayLength(env, typeList); - char *typeArray = (char*) calloc(1, len); - (*env)->GetByteArrayRegion(env, typeList, 0, len, (jbyte*) typeArray); + char *typeArray = (char *)calloc(1, len); + (*env)->GetByteArrayRegion(env, typeList, 0, len, (jbyte *)typeArray); if ((*env)->ExceptionCheck(env)) { } len = (*env)->GetArrayLength(env, nullList); - int32_t *nullArray = (int32_t*) calloc(1, len); - (*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte*) nullArray); + int32_t *nullArray = (int32_t *)calloc(1, len); + (*env)->GetByteArrayRegion(env, nullList, 0, len, (jbyte *)nullArray); if ((*env)->ExceptionCheck(env)) { } const char *name = (*env)->GetStringUTFChars(env, tableName, NULL); - char* curTags = tagsData; + char * curTags = tagsData; TAOS_BIND *tagsBind = calloc(numOfTags, sizeof(TAOS_BIND)); - for(int32_t i = 0; i < numOfTags; ++i) { + for (int32_t i = 0; i < numOfTags; ++i) { tagsBind[i].buffer_type = typeArray[i]; - tagsBind[i].buffer = curTags; + tagsBind[i].buffer = curTags; tagsBind[i].is_null = &nullArray[i]; - tagsBind[i].length = (uintptr_t*) &lengthArray[i]; + tagsBind[i].length = (uintptr_t *)&lengthArray[i]; curTags += lengthArray[i]; } - int32_t code = taos_stmt_set_tbname_tags((void*)stmt, name, tagsBind); + int32_t code = taos_stmt_set_tbname_tags((void *)stmt, name, tagsBind); - int32_t nTags = (int32_t) numOfTags; + int32_t nTags = (int32_t)numOfTags; jniDebug("jobj:%p, conn:%p, set table name:%s, numOfTags:%d", jobj, tsconn, name, nTags); tfree(tagsData); @@ -948,28 +994,28 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI } JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(JNIEnv *env, jobject jobj, - jobjectArray lines, jlong conn) { + jobjectArray lines, jlong conn) { TAOS *taos = (TAOS *)conn; if (taos == NULL) { jniError("jobj:%p, connection already closed", jobj); return JNI_CONNECTION_NULL; } - int numLines = (*env)->GetArrayLength(env, lines); - char** c_lines = calloc(numLines, sizeof(char*)); + int numLines = (*env)->GetArrayLength(env, lines); + char **c_lines = calloc(numLines, sizeof(char *)); if (c_lines == NULL) { jniError("c_lines:%p, alloc memory failed", c_lines); return JNI_OUT_OF_MEMORY; } for (int i = 0; i < numLines; ++i) { - jstring line = (jstring) ((*env)->GetObjectArrayElement(env, lines, i)); - c_lines[i] = (char*)(*env)->GetStringUTFChars(env, line, 0); + jstring line = (jstring)((*env)->GetObjectArrayElement(env, lines, i)); + c_lines[i] = (char *)(*env)->GetStringUTFChars(env, line, 0); } int code = taos_insert_lines(taos, c_lines, numLines); for (int i = 0; i < numLines; ++i) { - jstring line = (jstring) ((*env)->GetObjectArrayElement(env, lines, i)); + jstring line = (jstring)((*env)->GetObjectArrayElement(env, lines, i)); (*env)->ReleaseStringUTFChars(env, line, c_lines[i]); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java index 6211f61dc5..ff49677b01 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java @@ -32,6 +32,7 @@ import java.util.List; import com.taosdata.jdbc.utils.NullType; public class TSDBResultSetBlockData { + private static final int BINARY_LENGTH_OFFSET = 2; private int numOfRows = 0; private int rowIndex = 0; @@ -404,10 +405,8 @@ public class TSDBResultSetBlockData { case TSDBConstants.TSDB_DATA_TYPE_BINARY: { ByteBuffer bb = (ByteBuffer) this.colData.get(col); - bb.position(fieldSize * this.rowIndex); - + bb.position((fieldSize + BINARY_LENGTH_OFFSET) * this.rowIndex); int length = bb.getShort(); - byte[] dest = new byte[length]; bb.get(dest, 0, length); if (NullType.isBinaryNull(dest, length)) { @@ -419,16 +418,13 @@ public class TSDBResultSetBlockData { case TSDBConstants.TSDB_DATA_TYPE_NCHAR: { ByteBuffer bb = (ByteBuffer) this.colData.get(col); - bb.position(fieldSize * this.rowIndex); - + bb.position((fieldSize + BINARY_LENGTH_OFFSET) * this.rowIndex); int length = bb.getShort(); - byte[] dest = new byte[length]; bb.get(dest, 0, length); if (NullType.isNcharNull(dest, length)) { return null; } - try { String charset = TaosGlobalConfig.getCharset(); return new String(dest, charset); From f61eb53f1bed7892423ad00f803f74a819bd9c2c Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 31 Aug 2021 10:58:20 +0800 Subject: [PATCH 36/71] Hotfix/sangshuduo/td 6294 taosdemo long arg (#7722) * [TD-6294]: taosdemo support long arg fix conflict with develop branch. * fix few words. * declare default child tables number. * add support email prompt. * support the way no space between param and value * fix uncatched status. * fix -PP arg. * fix total child tables bug. * fix taosdemo long args bug. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 120 +++++++++++++++++++----------------- 1 file changed, 63 insertions(+), 57 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 29443f4fd4..9ae70b384b 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -754,12 +754,11 @@ static void printHelp() { "Set the replica parameters of the database, Default 1, min: 1, max: 3."); printf("%s%s%s%s\n", indent, "-m, --table-prefix=TABLEPREFIX", "\t", "Table prefix name. Default is 'd'."); - printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t", "The select sql file."); + printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t", + "The select sql file."); printf("%s%s%s%s\n", indent, "-N, --normal-table", "\t\t", "Use normal table flag."); printf("%s%s%s%s\n", indent, "-o, --output=FILE", "\t\t", "Direct output to the named file. Default is './output.txt'."); - printf("%s%s%s%s\n", indent, "-s, --sql-file=FILE", "\t\t", - "The select sql file."); printf("%s%s%s%s\n", indent, "-q, --query-mode=MODE", "\t\t", "Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC."); printf("%s%s%s%s\n", indent, "-b, --data-type=DATATYPE", "\t", @@ -831,6 +830,12 @@ static bool isStringNumber(char *input) return true; } +static void errorWrongValue(char *program, char *wrong_arg, char *wrong_value) +{ + fprintf(stderr, "%s %s: %s is an invalid value\n", program, wrong_arg, wrong_value); + fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); +} + static void errorUnreconized(char *program, char *wrong_arg) { fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg); @@ -900,7 +905,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); } else if (0 == strncmp(argv[i], "-c", strlen("-c"))) { - tstrncpy(configDir, (char *)(argv[i] + strlen("-")), TSDB_FILENAME_LEN); + tstrncpy(configDir, (char *)(argv[i] + strlen("-c")), TSDB_FILENAME_LEN); } else if (strlen("--config-dir") == strlen(argv[i])) { if (argc == i+1) { errorPrintReqArg3(argv[0], "--config-dir"); @@ -983,7 +988,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (0 == strcasecmp(argv[i+1], "stmt")) { arguments->iface = STMT_IFACE; } else { - errorPrintReqArg(argv[0], "I"); + errorWrongValue(argv[0], "-I", argv[i+1]); exit(EXIT_FAILURE); } i++; @@ -1006,7 +1011,8 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (0 == strcasecmp((char *)(argv[i] + strlen("-I")), "stmt")) { arguments->iface = STMT_IFACE; } else { - errorPrintReqArg3(argv[0], "-I"); + errorWrongValue(argv[0], "-I", + (char *)(argv[i] + strlen("-I"))); exit(EXIT_FAILURE); } } else if (strlen("--interface") == strlen(argv[i])) { @@ -1021,7 +1027,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (0 == strcasecmp(argv[i+1], "stmt")) { arguments->iface = STMT_IFACE; } else { - errorPrintReqArg3(argv[0], "--interface"); + errorWrongValue(argv[0], "--interface", argv[i+1]); exit(EXIT_FAILURE); } i++; @@ -1094,9 +1100,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->sqlFile = argv[++i]; } else if (0 == strncmp(argv[i], "--sql-file=", strlen("--sql-file="))) { - arguments->host = (char *)(argv[i++] + strlen("--sql-file=")); + arguments->sqlFile = (char *)(argv[i++] + strlen("--sql-file=")); } else if (0 == strncmp(argv[i], "-s", strlen("-s"))) { - arguments->host = (char *)(argv[i++] + strlen("-s")); + arguments->sqlFile = (char *)(argv[i++] + strlen("-s")); } else if (strlen("--sql-file") == strlen(argv[i])) { if (argc == i+1) { errorPrintReqArg3(argv[0], "--sql-file"); @@ -1644,6 +1650,54 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->debug_print = true; } else if (strcmp(argv[i], "-gg") == 0) { arguments->verbose_print = true; + } else if ((0 == strncmp(argv[i], "-R", strlen("-R"))) + || (0 == strncmp(argv[i], "--disorder-range", + strlen("--disorder-range")))) { + if (strlen("-R") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg(argv[0], "R"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "R"); + exit(EXIT_FAILURE); + } + arguments->disorderRange = atoi(argv[++i]); + } else if (0 == strncmp(argv[i], "--disorder-range=", + strlen("--disorder-range="))) { + if (isStringNumber((char *)(argv[i] + strlen("--disorder-range=")))) { + arguments->disorderRange = + atoi((char *)(argv[i]+strlen("--disorder-range="))); + } else { + errorPrintReqArg2(argv[0], "--disorder-range"); + exit(EXIT_FAILURE); + } + } else if (0 == strncmp(argv[i], "-R", strlen("-R"))) { + if (isStringNumber((char *)(argv[i] + strlen("-R")))) { + arguments->disorderRange = + atoi((char *)(argv[i]+strlen("-R"))); + } else { + errorPrintReqArg2(argv[0], "-R"); + exit(EXIT_FAILURE); + } + + if (arguments->disorderRange < 0) { + errorPrint("Invalid disorder range %d, will be set to %d\n", + arguments->disorderRange, 1000); + arguments->disorderRange = 1000; + } + } else if (strlen("--disorder-range") == strlen(argv[i])) { + if (argc == i+1) { + errorPrintReqArg3(argv[0], "--disorder-range"); + exit(EXIT_FAILURE); + } else if (!isStringNumber(argv[i+1])) { + errorPrintReqArg2(argv[0], "--disorder-range"); + exit(EXIT_FAILURE); + } + arguments->disorderRange = atoi(argv[++i]); + } else { + errorUnreconized(argv[0], argv[i]); + exit(EXIT_FAILURE); + } } else if ((0 == strncmp(argv[i], "-O", strlen("-O"))) || (0 == strncmp(argv[i], "--disorder", strlen("--disorder")))) { if (2 == strlen(argv[i])) { @@ -1694,54 +1748,6 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->disorderRatio, 0); arguments->disorderRatio = 0; } - } else if ((0 == strncmp(argv[i], "-R", strlen("-R"))) - || (0 == strncmp(argv[i], "--disorder-range", - strlen("--disorder-range")))) { - if (2 == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg(argv[0], "R"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "R"); - exit(EXIT_FAILURE); - } - arguments->disorderRange = atoi(argv[++i]); - } else if (0 == strncmp(argv[i], "--disorder-range=", - strlen("--disorder-range="))) { - if (isStringNumber((char *)(argv[i] + strlen("--disorder-range=")))) { - arguments->disorderRange = - atoi((char *)(argv[i]+strlen("--disorder-rnage="))); - } else { - errorPrintReqArg2(argv[0], "--disorder-range"); - exit(EXIT_FAILURE); - } - } else if (0 == strncmp(argv[i], "-R", strlen("-R"))) { - if (isStringNumber((char *)(argv[i] + strlen("-R")))) { - arguments->disorderRange = - atoi((char *)(argv[i]+strlen("-R"))); - } else { - errorPrintReqArg2(argv[0], "-R"); - exit(EXIT_FAILURE); - } - - if (arguments->disorderRange < 0) { - errorPrint("Invalid disorder range %d, will be set to %d\n", - arguments->disorderRange, 1000); - arguments->disorderRange = 1000; - } - } else if (strlen("--disorder-range") == strlen(argv[i])) { - if (argc == i+1) { - errorPrintReqArg3(argv[0], "--disorder-range"); - exit(EXIT_FAILURE); - } else if (!isStringNumber(argv[i+1])) { - errorPrintReqArg2(argv[0], "--disorder-range"); - exit(EXIT_FAILURE); - } - arguments->disorderRange = atoi(argv[++i]); - } else { - errorUnreconized(argv[0], argv[i]); - exit(EXIT_FAILURE); - } } else if ((0 == strncmp(argv[i], "-a", strlen("-a"))) || (0 == strncmp(argv[i], "--replica", strlen("--replica")))) { From 8f383b6f4324de1a10b23a3254ff963092060cbc Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 31 Aug 2021 13:38:38 +0800 Subject: [PATCH 37/71] [TD-6481] The establishment of TCP connection failed due to too high concurrent query --- src/util/src/tsocket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index 77941cba82..8d69a87e77 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -488,7 +488,7 @@ SOCKET taosOpenTcpServerSocket(uint32_t ip, uint16_t port) { return -1; } - if (listen(sockFd, 10) < 0) { + if (listen(sockFd, 1024) < 0) { uError("listen tcp server socket failed, 0x%x:%hu(%s)", ip, port, strerror(errno)); taosCloseSocket(sockFd); return -1; From bfccafc73aea5b5b05808c654e9708c9f02285c1 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 31 Aug 2021 13:46:09 +0800 Subject: [PATCH 38/71] Hotfix/sangshuduo/td 6294 taosdemo long arg (#7729) * [TD-6294]: taosdemo support long arg fix conflict with develop branch. * fix few words. * declare default child tables number. * add support email prompt. * support the way no space between param and value * fix uncatched status. * fix -PP arg. * fix total child tables bug. * fix taosdemo long args bug. * fix default column number Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 9ae70b384b..87102cc1c7 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -105,7 +105,7 @@ extern char configDir[]; #define DEFAULT_TIMESTAMP_STEP 1 #define DEFAULT_INTERLACE_ROWS 0 -#define DEFAULT_DATATYPE_NUM 3 +#define DEFAULT_DATATYPE_NUM 1 #define DEFAULT_CHILDTABLES 10000 From c7ded59437810f93d65510439eb5f89bd59be838 Mon Sep 17 00:00:00 2001 From: zhangqingqing Date: Tue, 31 Aug 2021 15:23:40 +0800 Subject: [PATCH 39/71] keep all "first end point"s' notation to "firstEp" --- documentation20/cn/10.cluster/docs.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index f39138d61d..b55dbc6944 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -79,13 +79,13 @@ Query OK, 1 row(s) in set (0.006385s) taos> ``` -上述命令里,可以看到这个刚启动的这个数据节点的End Point是:h1.taos.com:6030,就是这个新集群的firstEP。 +上述命令里,可以看到这个刚启动的这个数据节点的End Point是:h1.taos.com:6030,就是这个新集群的firstEp。 ## 启动后续数据节点 将后续的数据节点添加到现有集群,具体有以下几步: -1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEP 参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030) +1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEp参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030) 2. 在第一个数据节点,使用CLI程序taos,登录进TDengine系统,执行命令: @@ -110,7 +110,7 @@ taos> **提示:** -- 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEP。 +- 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEp。 - firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的 mnode 的 End Point 列表,不再依赖这个参数。 - 接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 taos shell 如果不加参数,会默认连接由 firstEp 指定的节点。 - 两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。**无法将两个独立的集群合并成为新的集群**。 From c547b894d9cb89b32c42be4fa4459f697571923f Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Tue, 31 Aug 2021 15:38:18 +0800 Subject: [PATCH 40/71] [TD-6468]: add test case for jdbc stmt --- .../jdbc/TSDBPreparedStatementTest.java | 124 ++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java index 6bddd3f428..3d76e1f98d 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java @@ -586,6 +586,130 @@ public class TSDBPreparedStatementTest { Assert.assertEquals(numOfRows, rows); } + @Test + public void bindDataQueryTest() throws SQLException { + Statement stmt = conn.createStatement(); + + stmt.execute("drop table if exists weather_test"); + stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t1 int, t2 binary(10))"); + + int numOfRows = 1; + + TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?) (ts, f2) values(?, ?)"); + s.setTableName("w2"); + s.setTagInt(0, 1); + s.setTagString(1, "test"); + + + ArrayList ts = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + ts.add(System.currentTimeMillis() + i); + } + s.setTimestamp(0, ts); + + ArrayList s2 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + s2.add("test" + i % 4); + } + s.setString(1, s2, 10); + + s.columnDataAddBatch(); + s.columnDataExecuteBatch(); + s.columnDataCloseBatch(); + + String sql = "select * from weather_test where t1 >= ? and t1 <= ?"; + TSDBPreparedStatement s1 = (TSDBPreparedStatement) conn.prepareStatement(sql); + s1.setInt(1, 0); + s1.setInt(2, 10); + + ResultSet rs = s1.executeQuery(); + int rows = 0; + while (rs.next()) { + rows++; + } + Assert.assertEquals(numOfRows, rows); + } + + @Test + public void setTagNullTest()throws SQLException { + Statement stmt = conn.createStatement(); + + stmt.execute("drop table if exists weather_test"); + stmt.execute("create table weather_test(ts timestamp, c1 int) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 bool, t8 binary(10), t9 nchar(10))"); + + int numOfRows = 1; + + TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?,?,?,?,?,?,?,?) values(?, ?)"); + s.setTableName("w3"); + s.setTagNull(0, TSDBConstants.TSDB_DATA_TYPE_TINYINT); + s.setTagNull(1, TSDBConstants.TSDB_DATA_TYPE_SMALLINT); + s.setTagNull(2, TSDBConstants.TSDB_DATA_TYPE_INT); + s.setTagNull(3, TSDBConstants.TSDB_DATA_TYPE_BIGINT); + s.setTagNull(4, TSDBConstants.TSDB_DATA_TYPE_FLOAT); + s.setTagNull(5, TSDBConstants.TSDB_DATA_TYPE_DOUBLE); + s.setTagNull(6, TSDBConstants.TSDB_DATA_TYPE_BOOL); + s.setTagNull(7, TSDBConstants.TSDB_DATA_TYPE_BINARY); + s.setTagNull(8, TSDBConstants.TSDB_DATA_TYPE_NCHAR); + + ArrayList ts = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + ts.add(System.currentTimeMillis() + i); + } + s.setTimestamp(0, ts); + + ArrayList s2 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + s2.add(i); + } + s.setInt(1, s2); + + s.columnDataAddBatch(); + s.columnDataExecuteBatch(); + s.columnDataCloseBatch(); + } + + private String stringGenerator(int length) { + String source = "abcdefghijklmnopqrstuvwxyz"; + StringBuilder sb = new StringBuilder(); + Random rand = new Random(); + for(int i = 0; i < length; i++) { + sb.append(source.charAt(rand.nextInt(26))); + } + return sb.toString(); + } + + @Test(expected = SQLException.class) + public void setMaxTableNameTest()throws SQLException { + Statement stmt = conn.createStatement(); + + stmt.execute("drop table if exists weather_test"); + stmt.execute("create table weather_test(ts timestamp, c1 int) tags (t1 int)"); + + TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?) values(?, ?)"); + String tbname = stringGenerator(193); + s.setTableName(tbname); + s.setTagInt(0, 1); + + int numOfRows = 1; + + ArrayList ts = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + ts.add(System.currentTimeMillis() + i); + } + s.setTimestamp(0, ts); + + ArrayList s2 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + s2.add(i); + } + s.setInt(1, s2); + + s.columnDataAddBatch(); + s.columnDataExecuteBatch(); + s.columnDataCloseBatch(); + } + + @Test(expected = SQLException.class) public void createTwoSameDbTest() throws SQLException { // when From d1a532257597f0c2b1755803d99fb8bfdd118df7 Mon Sep 17 00:00:00 2001 From: zhangqingqing Date: Tue, 31 Aug 2021 15:47:16 +0800 Subject: [PATCH 41/71] change support description of tdengine's connectors under different platforms. --- documentation20/cn/08.connector/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index ea3f9a4d55..6acf3e33f8 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -17,7 +17,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 | **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- | | **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ | -其中 ● 表示经过官方测试验证, ○ 表示非官方测试验证。 +其中 ● 表示官方测试验证通过, ○ 表示非官方测试验证通过,--表示未经验证。 注意: From 84a42afacdd4d6d99ee29a270c269f2752ede4bb Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 31 Aug 2021 16:30:24 +0800 Subject: [PATCH 42/71] [TD-2639] : describe operator "is null / is not null". --- documentation20/cn/08.connector/01.java/docs.md | 2 +- documentation20/cn/08.connector/docs.md | 4 ++-- documentation20/cn/11.administrator/docs.md | 4 ++-- documentation20/cn/12.taos-sql/docs.md | 3 ++- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index edd81a49cd..def8d4a905 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -46,7 +46,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 -注意:与 JNI 方式不同,RESTful 接口是无状态的。在使用JDBC-RESTful时,需要在sql中指定表、超级表的数据库名称。(从 TDengine 2.1.8.0 版本开始,也可以在 RESTful url 中指定当前 SQL 语句所使用的默认数据库名。)例如: +注意:与 JNI 方式不同,RESTful 接口是无状态的。在使用JDBC-RESTful时,需要在sql中指定表、超级表的数据库名称。(从 TDengine 2.2.0.0 版本开始,也可以在 RESTful url 中指定当前 SQL 语句所使用的默认数据库名。)例如: ```sql INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6); ``` diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index ea3f9a4d55..f5411bf80a 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -654,7 +654,7 @@ conn.close() 为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 RESTful API。为最大程度降低学习成本,不同于其他数据库 RESTful API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。RESTful 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 -注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.1.8.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。) +注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.2.0.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。) ### 安装 @@ -695,7 +695,7 @@ http://:/rest/sql/[db_name] - fqnd: 集群中的任一台主机 FQDN 或 IP 地址 - port: 配置文件中 httpPort 配置项,缺省为 6041 -- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。(从 2.1.8.0 版本开始支持) +- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。(从 2.2.0.0 版本开始支持) 例如:http://h1.taos.com:6041/rest/sql/test 是指向地址为 h1.taos.com:6041 的 url,并将默认使用的数据库库名设置为 test。 diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index 29e49aa902..99953233f0 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -800,7 +800,7 @@ taos -n sync -P 6042 -h `taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP` -从 2.1.8.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下: +从 2.2.0.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下: -n:设为“speed”时,表示对网络速度进行诊断。 -h:所要连接的服务端的 FQDN 或 ip 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。 @@ -813,7 +813,7 @@ taos -n sync -P 6042 -h `taos -n fqdn -h ` -从 2.1.8.0 版本开始,taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下: +从 2.2.0.0 版本开始,taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下: -n:设为“fqdn”时,表示对 FQDN 解析进行诊断。 -h:所要解析的目标 FQDN 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。 diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 9552a8fb2c..52ad98dcda 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -713,6 +713,7 @@ Query OK, 1 row(s) in set (0.001091s) | <= | smaller than or equal to | **`timestamp`** and all numeric types | | = | equal to | all types | | <> | not equal to | all types | +| is [not] null | is null or is not null | all types | | between and | within a certain range | **`timestamp`** and all numeric types | | in | match any value in a set | all types except first column `timestamp` | | like | match a wildcard string | **`binary`** **`nchar`** | @@ -722,7 +723,7 @@ Query OK, 1 row(s) in set (0.001091s) 1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。 2. like 算子使用通配符字符串进行匹配检查。 * 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意字符。 - * 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.1.8.0 版本开始支持) + * 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.2.0.0 版本开始支持) * 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) 3. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 4. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 From c014347dc2a74c6811a4efa35c8ecf2175dc1a39 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 31 Aug 2021 17:06:49 +0800 Subject: [PATCH 43/71] [TS-2] : describe nested query. --- documentation20/cn/12.taos-sql/docs.md | 28 ++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 52ad98dcda..93f5e8f56c 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -717,8 +717,6 @@ Query OK, 1 row(s) in set (0.001091s) | between and | within a certain range | **`timestamp`** and all numeric types | | in | match any value in a set | all types except first column `timestamp` | | like | match a wildcard string | **`binary`** **`nchar`** | -| % | match with any char sequences | **`binary`** **`nchar`** | -| _ | match with a single char | **`binary`** **`nchar`** | 1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。 2. like 算子使用通配符字符串进行匹配检查。 @@ -730,6 +728,32 @@ Query OK, 1 row(s) in set (0.001091s) 5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 + +### 嵌套查询 + +“嵌套查询”又称为“子查询”,也即在一条 SQL 语句中,“内层查询”的计算结果可以作为“外层查询”的计算对象来使用。 + +从 2.2.0.0 版本开始,TDengine 的查询引擎开始支持在 FROM 子句中使用非关联子查询(“非关联”的意思是,子查询不会用到父查询中的参数)。也即在普通 SELECT 语句的 tb_name_list 位置,用一个独立的 SELECT 语句来代替(这一 SELECT 语句被包含在英文圆括号内),于是完整的嵌套查询 SQL 语句形如: + +```mysql +SELECT ... FROM (SELECT ... FROM ...) ...; +``` + +说明: +1. 目前仅支持一层嵌套,也即不能在子查询中再嵌入子查询。 +2. 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表可以使用 AS 语法做重命名,以便于外层查询中方便引用。 +3. 目前不能在“连续查询”功能中使用子查询。 +4. 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。 +5. 目前内层查询、外层查询均不支持 UNION 操作。 +6. 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。 + * 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。 +7. 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制: + * 计算函数部分: + 1. 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。 + 2. 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。 + * 外层查询中不支持 IN 算子,但在内层中可以使用。 + * 外层查询不支持 GROUP BY。 + ### UNION ALL 操作符 From 4d9f80d1558dc42dd41cd73cb727f92d3c2c90eb Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 31 Aug 2021 17:46:27 +0800 Subject: [PATCH 44/71] [TD-2581] : support 'OR' join query conditions between columns. --- documentation20/cn/12.taos-sql/docs.md | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 93f5e8f56c..dc20054e3c 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -724,7 +724,9 @@ Query OK, 1 row(s) in set (0.001091s) * 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.2.0.0 版本开始支持) * 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) 3. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 + * 从 2.3.0.0 版本开始,已支持完整的同一列和/或不同列间的 AND/OR 运算。 4. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 + * 从 2.3.0.0 版本开始,允许使用多个时间过滤条件,但首列时间戳的过滤运算结果只能包含一个区间。 5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 @@ -1458,17 +1460,19 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P - SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。 -## TAOS SQL其他约定 +## TAOS SQL 其他约定 **GROUP BY的限制** -TAOS SQL支持对标签、TBNAME进行GROUP BY操作,也支持普通列进行GROUP BY,前提是:仅限一列且该列的唯一值小于10万个。 +TAOS SQL 支持对标签、TBNAME 进行 GROUP BY 操作,也支持普通列进行 GROUP BY,前提是:仅限一列且该列的唯一值小于 10 万个。 -**JOIN操作的限制** +**JOIN 操作的限制** -TAOS SQL支持表之间按主键时间戳来join两张表的列,暂不支持两个表之间聚合后的四则运算。 +TAOS SQL 支持表之间按主键时间戳来 join 两张表的列,暂不支持两个表之间聚合后的四则运算。 -**IS NOT NULL与不为空的表达式适用范围** +JOIN 查询的不同表的过滤条件之间不能为 OR。 -IS NOT NULL支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。 +**IS NOT NULL 与不为空的表达式适用范围** + +IS NOT NULL 支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。 From f506afb2697a5516edb9ae956252f9370e14b557 Mon Sep 17 00:00:00 2001 From: xiaolei li <85657333+xleili@users.noreply.github.com> Date: Wed, 1 Sep 2021 00:16:14 +0800 Subject: [PATCH 45/71] Nodejs/xiaolei/td 2979 (#7369) * nodejs/xiaolei/TD-2979-test-1970 * nodejs/xiaolei/TD-2979-test-1970 * nodejs/xiaolei/TD-2979-test-1970 * nodejs 1970 test&dry * Jenkins&test1970 * ci fix test1970 * ci fix twice * fix test1970 ci 3rd --- Jenkinsfile | 3 +- tests/examples/nodejs/test1970.js | 125 ++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 1 deletion(-) create mode 100644 tests/examples/nodejs/test1970.js diff --git a/Jenkinsfile b/Jenkinsfile index e9ea8bafd3..deee5f473b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -234,6 +234,7 @@ pipeline { cd ${WKC}/tests/examples/nodejs npm install td2.0-connector > /dev/null 2>&1 node nodejsChecker.js host=localhost + node test1970.js ''' sh ''' cd ${WKC}/tests/examples/C#/taosdemo @@ -451,4 +452,4 @@ pipeline { ) } } -} \ No newline at end of file +} diff --git a/tests/examples/nodejs/test1970.js b/tests/examples/nodejs/test1970.js new file mode 100644 index 0000000000..5177a7371e --- /dev/null +++ b/tests/examples/nodejs/test1970.js @@ -0,0 +1,125 @@ +const taos = require('td2.0-connector'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0}) +var c1 = conn.cursor(); // Initializing a new cursor + +let stime = new Date(); +let interval = 1000; + +function convertDateToTS(date) { + let tsArr = date.toISOString().split("T") + return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\""; +} + +function R(l, r) { + return Math.random() * (r - l) - r; +} + +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} + +// Initialize +const dbname = "nodejs_1970_db"; +const tbname = "t1"; + +let dropDB = "drop database if exists " + dbname +console.log(dropDB);//asdasdasd +c1.execute(dropDB);///asdasd + +let createDB = "create database " + dbname + " keep 36500" +console.log(createDB); +c1.execute(createDB); + +let useTbl = "use " + dbname +console.log(useTbl) +c1.execute(useTbl); + +let createTbl = "create table if not exists " + tbname + "(ts timestamp,id int)" +console.log(createTbl); +c1.execute(createTbl); + +//1969-12-31 23:59:59.999 +//1970-01-01 00:00:00.000 +//1970-01-01 07:59:59.999 +//1970-01-01 08:00:00.000a +//1628928479484 2021-08-14 08:07:59.484 +let sql1 = "insert into " + dbname + "." + tbname + " values('1969-12-31 23:59:59.999',1)" +console.log(sql1); +c1.execute(sql1); + +let sql2 = "insert into " + dbname + "." + tbname + " values('1970-01-01 00:00:00.000',2)" +console.log(sql2); +c1.execute(sql2); + +let sql3 = "insert into " + dbname + "." + tbname + " values('1970-01-01 07:59:59.999',3)" +console.log(sql3); +c1.execute(sql3); + +let sql4 = "insert into " + dbname + "." + tbname + " values('1970-01-01 08:00:00.000',4)" +console.log(sql4); +c1.execute(sql4); + +let sql5 = "insert into " + dbname + "." + tbname + " values('2021-08-14 08:07:59.484',5)" +console.log(sql5); +c1.execute(sql5); + +// Select +let query1 = "select * from " + dbname + "." + tbname +console.log(query1); +c1.execute(query1); + +var d = c1.fetchall(); +console.log(c1.fields); +for (let i = 0; i < d.length; i++) + console.log(d[i][0].valueOf()); + +//initialize +let initSql1 = "drop table if exists " + tbname +console.log(initSql1); +c1.execute(initSql1); + +console.log(createTbl); +c1.execute(createTbl); +c1.execute(useTbl) + +//-28800001 1969-12-31 23:59:59.999 +//-28800000 1970-01-01 00:00:00.000 +//-1 1970-01-01 07:59:59.999 +//0 1970-01-01 08:00:00.00 +//1628928479484 2021-08-14 08:07:59.484 +let sql11 = "insert into " + dbname + "." + tbname + " values(-28800001,11)"; +console.log(sql11); +c1.execute(sql11); + +let sql12 = "insert into " + dbname + "." + tbname + " values(-28800000,12)" +console.log(sql12); +c1.execute(sql12); + +let sql13 = "insert into " + dbname + "." + tbname + " values(-1,13)" +console.log(sql13); +c1.execute(sql13); + +let sql14 = "insert into " + dbname + "." + tbname + " values(0,14)" +console.log(sql14); +c1.execute(sql14); + +let sql15 = "insert into " + dbname + "." + tbname + " values(1628928479484,15)" +console.log(sql15); +c1.execute(sql15); + +// Select +console.log(query1); +c1.execute(query1); + +var d = c1.fetchall(); +console.log(c1.fields); +for (let i = 0; i < d.length; i++) + console.log(d[i][0].valueOf()); + +setTimeout(function () { + conn.close(); +}, 2000); + From ea87f4f92bfecfd4ac2927580c0dc20314eb6977 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Tue, 31 Aug 2021 13:23:06 +0800 Subject: [PATCH 46/71] [TD-6474]: fix coredump when merging empty block --- src/common/src/tdataformat.c | 15 +++++++++----- src/tsdb/src/tsdbCommit.c | 39 ++++++++++++++++-------------------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index aa60803dac..61378c79c4 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -448,6 +448,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols int dcol = 0; while (dcol < pCols->numOfCols) { + bool setCol = 0; SDataCol *pDataCol = &(pCols->cols[dcol]); if (rcol >= schemaNCols(pSchema)) { dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); @@ -458,13 +459,14 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols STColumn *pRowCol = schemaColAt(pSchema, rcol); if (pRowCol->colId == pDataCol->colId) { void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE); + if(!isNull(value, pDataCol->type)) setCol = 1; dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints); dcol++; rcol++; } else if (pRowCol->colId < pDataCol->colId) { rcol++; } else { - if(forceSetNull) { + if(forceSetNull || setCol) { dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); } dcol++; @@ -482,6 +484,7 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo int nRowCols = kvRowNCols(row); while (dcol < pCols->numOfCols) { + bool setCol = 0; SDataCol *pDataCol = &(pCols->cols[dcol]); if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) { dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); @@ -493,13 +496,14 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo if (colIdx->colId == pDataCol->colId) { void *value = tdGetKvRowDataOfCol(row, colIdx->offset); + if(!isNull(value, pDataCol->type)) setCol = 1; dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints); ++dcol; ++rcol; } else if (colIdx->colId < pDataCol->colId) { ++rcol; } else { - if (forceSetNull) { + if(forceSetNull || setCol) { dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); } ++dcol; @@ -518,7 +522,6 @@ void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, b } } -//TODO: refactor this function to eliminate additional memory copy int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull) { ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows); ASSERT(target->numOfCols == source->numOfCols); @@ -534,7 +537,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int * ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints); for (int i = 0; i < rowsToMerge; i++) { for (int j = 0; j < source->numOfCols; j++) { - if (source->cols[j].len > 0) { + if (source->cols[j].len > 0 || target->cols[j].len > 0) { dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i + (*pOffset)), target->numOfRows, target->maxPoints); } @@ -578,7 +581,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i if (key1 < key2) { for (int i = 0; i < src1->numOfCols; i++) { ASSERT(target->cols[i].type == src1->cols[i].type); - if (src1->cols[i].len > 0) { + if (src1->cols[i].len > 0 || target->cols[i].len > 0) { dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows, target->maxPoints); } @@ -596,6 +599,8 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i } else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) { dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows, target->maxPoints); + } else if(target->cols[i].len > 0) { + dataColSetNullAt(&target->cols[i], target->numOfRows); } } target->numOfRows++; diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 15fc3cc47d..0311048780 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -1418,13 +1418,11 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt while (true) { key1 = (*iter >= pDataCols->numOfRows) ? INT64_MAX : dataColsKeyAt(pDataCols, *iter); - bool isRowDel = false; SMemRow row = tsdbNextIterRow(pCommitIter->pIter); if (row == NULL || memRowKey(row) > maxKey) { key2 = INT64_MAX; } else { key2 = memRowKey(row); - isRowDel = memRowDeleted(row); } if (key1 == INT64_MAX && key2 == INT64_MAX) break; @@ -1439,36 +1437,33 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt pTarget->numOfRows++; (*iter)++; } else if (key1 > key2) { - if (!isRowDel) { - if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); - ASSERT(pSchema != NULL); - } - - tdAppendMemRowToDataCol(row, pSchema, pTarget, true); + if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); + ASSERT(pSchema != NULL); } + tdAppendMemRowToDataCol(row, pSchema, pTarget, true); + tSkipListIterNext(pCommitIter->pIter); } else { - if (update) { - if (!isRowDel) { - if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); - ASSERT(pSchema != NULL); - } - - tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE); - } - } else { - ASSERT(!isRowDel); - + if (update != TD_ROW_OVERWRITE_UPDATE) { + //copy disk data for (int i = 0; i < pDataCols->numOfCols; i++) { //TODO: dataColAppendVal may fail dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows, pTarget->maxPoints); } - pTarget->numOfRows++; + if(update == TD_ROW_DISCARD_UPDATE) pTarget->numOfRows++; + } + if (update != TD_ROW_DISCARD_UPDATE) { + //copy mem data + if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); + ASSERT(pSchema != NULL); + } + + tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE); } (*iter)++; tSkipListIterNext(pCommitIter->pIter); From 6363169a478a1772ac788139a3a6d4efb08251e5 Mon Sep 17 00:00:00 2001 From: Shenglian Zhou Date: Wed, 1 Sep 2021 10:15:44 +0800 Subject: [PATCH 47/71] [TD-6145]:add test for regex filter --- src/client/src/tscSQLParser.c | 2 +- src/tsdb/src/tsdbRead.c | 2 +- src/util/src/tcompare.c | 2 +- tests/script/fullGeneralSuite.sim | 1 + tests/script/general/parser/regex.sim | 51 +++++++++++++++++++++++++++ 5 files changed, 55 insertions(+), 3 deletions(-) create mode 100644 tests/script/general/parser/regex.sim diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index fea02900fb..3f3a3b0150 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -4525,7 +4525,7 @@ static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_ char regErrBuf[256] = {0}; const char* pattern = pRight->value.pz; - int cflags = REG_EXTENDED | REG_ICASE; + int cflags = REG_EXTENDED; if ((errCode = regcomp(®ex, pattern, cflags)) != 0) { regerror(errCode, ®ex, regErrBuf, sizeof(regErrBuf)); tscError("Failed to compile regex pattern %s. reason %s", pattern, regErrBuf); diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 60ecb238ca..c1b935e0ee 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -4236,7 +4236,7 @@ void getTableListfromSkipList(tExprNode *pExpr, SSkipList *pSkipList, SArray *re param->setupInfoFn(pExpr, param->pExtInfo); tQueryInfo *pQueryInfo = pExpr->_node.info; - if (pQueryInfo->indexed && (pQueryInfo->optr != TSDB_RELATION_LIKE && pQueryInfo->optr == TSDB_RELATION_MATCH + if (pQueryInfo->indexed && (pQueryInfo->optr != TSDB_RELATION_LIKE && pQueryInfo->optr != TSDB_RELATION_MATCH && pQueryInfo->optr != TSDB_RELATION_IN)) { queryIndexedColumn(pSkipList, pQueryInfo, result); } else { diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index e906eb8423..4c76724921 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -365,7 +365,7 @@ int32_t compareStrRegexComp(const void* pLeft, const void* pRight) { regex_t regex; char msgbuf[256] = {0}; - int cflags = REG_EXTENDED | REG_ICASE; + int cflags = REG_EXTENDED; if ((errCode = regcomp(®ex, pattern, cflags)) != 0) { regerror(errCode, ®ex, msgbuf, sizeof(msgbuf)); uError("Failed to compile regex pattern %s. reason %s", pattern, msgbuf); diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim index 9f46b07847..188ce14055 100644 --- a/tests/script/fullGeneralSuite.sim +++ b/tests/script/fullGeneralSuite.sim @@ -222,3 +222,4 @@ run general/stream/metrics_replica1_vnoden.sim run general/db/show_create_db.sim run general/db/show_create_table.sim run general/parser/like.sim +run general/parser/regex.sim diff --git a/tests/script/general/parser/regex.sim b/tests/script/general/parser/regex.sim new file mode 100644 index 0000000000..59c7384d6c --- /dev/null +++ b/tests/script/general/parser/regex.sim @@ -0,0 +1,51 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect + +$db = testdb +sql drop database if exists $db +sql create database $db +sql use $db + +print ======================== regular expression match test +$st_name = st +$ct1_name = ct1 +$ct2_name = ct2 + +sql create table $st_name (ts timestamp, c1b binary(20)) tags(t1b binary(20)); +sql create table $ct1_name using $st_name tags('taosdata1') +sql create table $ct2_name using $st_name tags('taosdata2') +sql create table not_match using $st_name tags('NOTMATCH') + +sql select tbname from $st_name where tbname match '^ct[[:digit:]]' + +if $rows != 2 then + return -1 +endi + +sql select tbname from $st_name where t1b match '[[:lower:]]+' +if $rows != 2 then + return -1 +endi + +sql insert into $ct1_name values(now, 'this is engine') +sql insert into $ct2_name values(now, 'this is app egnine') + +sql select c1b from $st_name where c1b match 'engine' +if $data00 != @this is engine@ then + return -1 +endi + +if $rows != 1 then + return -1 +endi + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT + + From a4604990a6c4f3ee703f908bc6ac1b91637470a5 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 1 Sep 2021 11:00:17 +0800 Subject: [PATCH 48/71] [TD-6256] add subscribe case for go --- tests/gotest/batchtest.sh | 2 +- tests/gotest/case001/case001.go | 11 +++-- tests/gotest/case002/case002.bat | 9 ++++ tests/gotest/case002/case002.go | 81 ++++++++++++++++++++++++++++++++ tests/gotest/case002/case002.sh | 22 +++++++++ 5 files changed, 120 insertions(+), 5 deletions(-) create mode 100644 tests/gotest/case002/case002.bat create mode 100644 tests/gotest/case002/case002.go create mode 100644 tests/gotest/case002/case002.sh diff --git a/tests/gotest/batchtest.sh b/tests/gotest/batchtest.sh index 0fbbf40714..8f5a7fe8f0 100755 --- a/tests/gotest/batchtest.sh +++ b/tests/gotest/batchtest.sh @@ -17,5 +17,5 @@ go env -w GO111MODULE=on go env -w GOPROXY=https://goproxy.io,direct bash ./case001/case001.sh $severIp $serverPort -#bash ./case002/case002.sh $severIp $serverPort +bash ./case002/case002.sh $severIp $serverPort #bash ./case003/case003.sh $severIp $serverPort diff --git a/tests/gotest/case001/case001.go b/tests/gotest/case001/case001.go index 9e912aab99..9d35888f31 100644 --- a/tests/gotest/case001/case001.go +++ b/tests/gotest/case001/case001.go @@ -19,7 +19,7 @@ import ( "database/sql" "flag" "fmt" - _ "github.com/taosdata/driver-go/taosSql" + _ "github.com/taosdata/driver-go/v2/taosSql" "log" "strconv" "time" @@ -63,6 +63,7 @@ func main() { url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/" // open connect to taos server + fmt.Printf("url:%s",url) db, err := sql.Open(taosDriverName, url) if err != nil { log.Fatalf("Open database error: %s\n", err) @@ -168,17 +169,18 @@ func insert_data(db *sql.DB, demot string) { func select_data(db *sql.DB, demot string) { st := time.Now().Nanosecond() - + fmt.Println(demot) rows, err := db.Query("select * from ? ", demot) // go text mode + fmt.Println("end query",err) checkErr(err, "select db.Query") fmt.Printf("%10s%s%8s %5s %9s%s %s %8s%s %7s%s %8s%s %4s%s %5s%s\n", " ", "ts", " ", "id", " ", "name", " ", "len", " ", "flag", " ", "notes", " ", "fv", " ", " ", "dv") var affectd int //decoder := mahonia.NewDecoder("gbk") // 把原来ANSI格式的文本文件里的字符,用gbk进行解码。 - + fmt.Println("start next") for rows.Next() { - var ts string + var ts time.Time var name string var id int var len int8 @@ -188,6 +190,7 @@ func select_data(db *sql.DB, demot string) { var dv float64 err = rows.Scan(&ts, &id, &name, &len, &flag, ¬es, &fv, &dv) + fmt.Println("rows:",err) checkErr(err, "select rows.Scan") fmt.Printf("%s|\t", ts) diff --git a/tests/gotest/case002/case002.bat b/tests/gotest/case002/case002.bat new file mode 100644 index 0000000000..ebec576e72 --- /dev/null +++ b/tests/gotest/case002/case002.bat @@ -0,0 +1,9 @@ +@echo off +echo ==== start run cases001.go + +del go.* +go mod init demotest +go build +demotest.exe -h %1 -p %2 +cd .. + diff --git a/tests/gotest/case002/case002.go b/tests/gotest/case002/case002.go new file mode 100644 index 0000000000..c69da04cb2 --- /dev/null +++ b/tests/gotest/case002/case002.go @@ -0,0 +1,81 @@ +package main + +import ( + "database/sql/driver" + "fmt" + "io" + "os" + "time" + + taos "github.com/taosdata/driver-go/v2/af" +) + +func Subscribe_check(topic taos.Subscriber, check int) bool { + count := 0 + rows, err := topic.Consume() + defer func() { rows.Close(); time.Sleep(time.Second) }() + if err != nil { + fmt.Println(err) + os.Exit(3) + } + for { + values := make([]driver.Value, 2) + err := rows.Next(values) + if err == io.EOF { + break + } else if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(4) + } + count++ + } + if count == check { + return false + } else { + return true + } +} +func main() { + ts := 1630461600000 + db, err := taos.Open("127.0.0.1", "", "", "", 0) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + defer db.Close() + db.Exec("drop if exists database test") + db.Exec("create if not exists database test") + db.Exec("use test") + db.Exec("drop if exists database test") + db.Exec("create table test (ts timestamp ,level int)") + for i := 0; i < 10; i++ { + sqlcmd := fmt.Sprintf("insert into test values(%d,%d)", ts+i, i) + db.Exec(sqlcmd) + } + + fmt.Println("consumption 01.") + topic, err := db.Subscribe(false, "test", "select ts, level from test", time.Second) + if Subscribe_check(topic, 10) { + os.Exit(3) + } + + fmt.Println("consumption 02: no new rows inserted") + if Subscribe_check(topic, 0) { + os.Exit(3) + } + + fmt.Println("consumption 03: after one new rows inserted") + sqlcmd := fmt.Sprintf("insert into test values(%d,%d)", ts+10, 10) + db.Exec(sqlcmd) + if Subscribe_check(topic, 1) { + os.Exit(3) + } + + fmt.Println("consumption 04: keep progress and continue previous subscription") + topic.Unsubscribe(true) + topic, err = db.Subscribe(false, "test", "select ts, level from test", time.Second) + if Subscribe_check(topic, 0) { + os.Exit(3) + } + +} diff --git a/tests/gotest/case002/case002.sh b/tests/gotest/case002/case002.sh new file mode 100644 index 0000000000..94e5bb44e0 --- /dev/null +++ b/tests/gotest/case002/case002.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +echo "==== start run cases001.go" + +set +e +#set -x + +script_dir="$(dirname $(readlink -f $0))" +#echo "pwd: $script_dir, para0: $0" + +#execName=$0 +#execName=`echo ${execName##*/}` +#goName=`echo ${execName%.*}` + +###### step 3: start build +cd $script_dir +rm -f go.* +go mod init demotest > /dev/null 2>&1 +go mod tidy > /dev/null 2>&1 +go build > /dev/null 2>&1 +sleep 1s +./demotest -h $1 -p $2 From 08f3d82a6a4fb23c5b67bd9386e2e4ca16cda9f5 Mon Sep 17 00:00:00 2001 From: Tsahi Edri Date: Wed, 1 Sep 2021 20:40:24 +0300 Subject: [PATCH 49/71] Update docs.md from the beginning to line 250 --- documentation20/en/03.architecture/docs.md | 46 +++++++++++----------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md index ac6c94fe40..87412705cc 100644 --- a/documentation20/en/03.architecture/docs.md +++ b/documentation20/en/03.architecture/docs.md @@ -4,7 +4,7 @@ ### A Typical IoT Scenario -In typical IoT, Internet of Vehicles and Operation Monitoring scenarios, there are often many different types of data collecting devices that collect one or more different physical metrics. However, for the collection devices of the same type, there are often many specific collection devices distributed in places. BigData processing system aims to collect all kinds of data, and then calculate and analyze them. For the same kind of devices, the data collected are very regular. Taking smart meters as an example, assuming that each smart meter collects three metrics of current, voltage and phase, the collected data are similar to the following table: +In typical IoT, Internet of Vehicles and Operation Monitoring scenarios, there are often many different types of data collecting devices that collect one or more different physical metrics. However, for the collection devices of the same type, there are often many specific collection devices distributed in places. BigData processing system aims to collect all kinds of data, then calculate and analyze them. For the same kind of devices, the data collected are very regular. Taking smart meters as an example, assuming that each smart meter collects three metrics of current, voltage and phase, the collected data are similar to the following table:
@@ -127,25 +127,25 @@ Since time-series data is most likely to be structured data, TDengine adopts the ### One Table for One Collection Point -To utilize this time-series and other data features, TDengine requires the user to create a table for each collection point to store collected time-series data. For example, if there are over 10 millions smart meters, means 10 millions tables shall be created. For the table above, 4 tables shall be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several advantages: +To utilize this time-series and other data features, TDengine requires the user to create a table for each collection point to store collected time-series data. For example, if there are over 10 million smart meters, means 10 million tables shall be created. For the table above, 4 tables shall be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several advantages: 1. Guarantee that all data from a collection point can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one point in a time range, this design will reduce the random read latency significantly, thus increase read and query speed by orders of magnitude. 2. Since the data generation process of each collection device is completely independent, means each device has its unique data source, thus writes can be carried out in a lock-free manner to greatly improve the speed. 3. Write latency can be significantly reduced too as the data points generated by the same device will arrive in time order, the new data point will be simply appended to a block. -If the data of multiple devices are written into a table in the traditional way, due to the uncontrollable network delay, the timing of the data from different devices arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the data of one device cannot be guaranteed to continuously stored together. **The method of one table for each data collection point can ensure the optimal performance of insertion and query of a single data collection point to the greatest extent.** +If the data of multiple devices are traditionally written into a table, due to the uncontrollable network delay, the timing of the data from different devices arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the data of one device cannot be guaranteed to continuously stored together. **The method of one table for each data collection point can ensure the optimal performance of insertion and query of a single data collection point to the greatest extent.** TDengine suggests using collection point ID as the table name (like D1001 in the above table). Each point may collect one or more metrics (like the current, voltage, phase as above). Each metric has a column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. All data will be stored in columns. ### STable: A Collection of Data Points in the Same Type -The method of one table for each point will bring a greatly increasing number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the STable (Super Table) concept is introduced by TDengine. +The method of one table for each point will bring a greatly increased number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the STable (Super Table) concept is introduced by TDengine. STable is an abstract collection for a type of data point. A STable contains a set of points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable (a combination of data collection points of a specific type), in addition to defining the table structure of the collected metrics, it is also necessary to define the schema of its tag. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established. In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. When creating a table for a specific data collection point, the user uses the definition of STable as a template and specifies the tag value of the specific collection point (table). Compared with the traditional relational database, the table (a data collection point) has static tags, and these tags can be added, deleted, and modified afterward. **A STable contains multiple tables with the same time-series data schema but different tag values.** -When aggregating multiple data collection points with the same data type, TDEngine will first find out the tables that meet the tag filters from the STables, and then scan the time-series data of these tables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of aggregation calculation. +When aggregating multiple data collection points with the same data type, TDEngine will first find out the tables that meet the tag filters from the STables, then scan the time-series data of these tables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of aggregation calculation. ## Cluster and Primary Logic Unit @@ -162,21 +162,21 @@ Logical structure diagram of TDengine distributed architecture as following: A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDEngine application driver (taosc) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through taosc's API. The following is a brief introduction to each logical unit. -**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please read the blog post "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)". +**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine, or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please read the blog post "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)". **Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node. A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE), zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes. -**Virtual node (vnode)**: In order to better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage, and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs, and is created and managed by the management node. +**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node. -**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is used to manage between mnodes, and the data synchronization is carried out in a strong consistent way. Any data update operation can only be done on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction. +**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is used to manage between mnodes, and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be done on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction. -**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high reliability of the system. The virtual node group is managed in a master/slave structure. Write operations can only be performed on the master vnode, and the system synchronizes data to the slave vnode via replication, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter replica when creating DB, and the default is 1. Using the multi-replica feature of TDengine, the same high data reliability can be done without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. +**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high reliability of the system. The virtual node group is managed in a master/slave structure. Write operations can only be performed on the master vnode, and the system synchronizes data to the slave vnode via replication, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter replica when creating DB, and the default is 1. Using the multi-replica feature of TDengine, the same high data reliability can be done without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. -**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster. +**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster. ### Node Communication -**Communication mode**: The communication among each data node of TDengine system, and among application driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transmission. +**Communication mode**: The communication among each data node of TDengine system, and among the application driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transmission. **FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter "fqdn". If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter fqdn of the node to its IP address. However, IP is not recommended because IP address is variable, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the normal operation of DNS service, or configure hosts files on nodes and the nodes where applications are located. @@ -184,13 +184,13 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc **Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option-h, and the configured port number can be specified through -p. If the port is not configured, the system configuration parameter serverPort of TDengine will be adopted. -**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: 1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; 2: Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; 3: Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connected. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again. +**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: 1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; 2: Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; 3: Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again. -**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. +**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server-side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage. **Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"; Step 2: In the system configuration parameter file taos.cfg of the new data node, set the firstEp and secondEp parameters to the EP of any two data nodes in the existing cluster. Please refer to the detailed user tutorial for detailed steps. In this way, the cluster will be established step by step. -**Redirection**: No matter about dnode or taosc, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or taosc, if it’s not an mnode by self, it will reply the mnode EP List back. After receiving this list, taosc or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies taosc through messaging interaction among nodes. +**Redirection**: No matter about dnode or taosc, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so the user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or taosc, if it’s not a mnode by self, it will reply to the mnode EP List back. After receiving this list, taosc or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies taosc through messaging interaction among nodes. ### A Typical Messaging Process @@ -206,9 +206,9 @@ To explain the relationship between vnode, mnode, taosc and application and thei 5. After vnode inserts the data, it gives a reply to taosc, indicating that the insertion is successful. If taosc doesn't get a response from vnode for a long time, taosc will judge the node as offline. In this case, if there are multiple replicas of the inserted database, taosc will issue an insert request to the next vnode in vgroup. 6. Taosc notifies APP that writing is successful. -For Step 2 and 3, when taosc starts, it does not know the End Point of mnode, so it will directly initiate a request to the externally serving End Point of the configured cluster. If the dnode that received the request does not have an mnode configured, it will inform the mnode EP list in a reply message, so that taosc will re-issue a request to obtain meta-data to the EP of another new mnode. +For Step 2 and 3, when taosc starts, it does not know the End Point of mnode, so it will directly initiate a request to the externally serving End Point of the configured cluster. If the dnode that received the request does not have a mnode configured, it will inform the mnode EP list in a reply message, so that taosc will re-issue a request to obtain meta-data to the EP of another new mnode. -For Step 4 and 5, without caching, taosc can't recognize the master in the virtual node group, so assumes that the first vnodeID is the master and send a request to it. If the requested vnode is not the master, it will reply the actual master as a new target taosc makes a request to. Once the reply of successful insertion is obtained, taosc will cache the information of master node. +For Step 4 and 5, without caching, taosc can't recognize the master in the virtual node group, so assumes that the first vnodeID is the master and send a request to it. If the requested vnode is not the master, it will reply to the actual master as a new target taosc requests to. Once the reply of successful insertion is obtained, taosc will cache the information of master node. The above is the process of inserting data, and the processes of querying and calculating are completely consistent. Taosc encapsulates and shields all these complicated processes, and has no perception and no special treatment for applications. @@ -220,26 +220,26 @@ Through taosc caching mechanism, mnode needs to be accessed only when a table is The data stored by TDengine include collected time-series data, metadata related to libraries and tables, tag data, etc. These data are specifically divided into three parts: -- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when update parameter is set to 1. By adopting the model with one table for each collection point, the data of a given time period is continuously stored, and the writing against one single table is a simple add operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single collection point with best performance. +- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when update parameter is set to 1. By adopting the model with one table for each collection point, the data of a given time period is continuously stored, and the writing against one single table is a simple add operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single collection point with the best performance. - Tag data: meta files stored in vnode support four standard operations of add, delete, modify and check. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. If there are many tag filtering operations, queries will be very frequent and TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the filtering results will return in milliseconds. -- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of add, delete, modify and query are supported. The amount of these data are not large and can be stored in memory, moreover the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck. +- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of add, delete, modify and query are supported. The amount of these data are not large and can be stored in memory, moreover, the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck. Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately, which has two major advantages: -- Greatly reduce the redundancy of tag data storage: general NoSQL database or time-series database adopts K-V storage, in which Key includes timestamp, device ID and various tags. Each record carries these duplicates, so wasting storage space. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite again, which is extremely expensive to operate. -- Realize extremely efficient aggregation query between multiple tables: when doing aggregation query between multiple tables, it firstly finds out the tag filtered tables, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the query efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. +- Greatly reduce the redundancy of tag data storage: general NoSQL database or time-series database adopts K-V storage, in which Key includes a timestamp, a device ID and various tags. Each record carries these duplicates, so wasting storage space. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite again, which is extremely expensive to operate. +- Realize extremely efficient aggregation query between multiple tables: when doing aggregation query between multiple tables, it first finds out the tag filtered tables, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the query efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. ### Data Sharding For large-scale data management, to achieve scale-out, it is generally necessary to adopt the a Partitioning strategy as Sharding. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for each time range. -VNode (Virtual Data Node) is responsible for providing writing, query and calculation functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and completely transparent to the application. +VNode (Virtual Data Node) is responsible for providing writing, query and calculation functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and is completely transparent to the application. For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G), so TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores. When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes. -The meda data of each table (including schema, tags, etc.) is also stored in vnode instead of centralized storage in mnode. In fact, this means sharding of meta data, which is convenient for efficient and parallel tag filtering operations. +The meta data of each table (including schema, tags, etc.) is also stored in vnode instead of centralized storage in mnode. In fact, this means sharding of meta data, which is convenient for efficient and parallel tag filtering operations. ### Data Partitioning @@ -249,7 +249,7 @@ In general, **TDengine splits big data by vnode and time as two dimensions**, wh ### Load Balancing -Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) for declaring the status of the entire cluster. Based on the overall state, when an mnode finds an overloaded dnode, it will migrate one or more vnodes to other dnodes. In the process, external services keep running and the data insertion, query and calculation operations are not affected. +Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) for declaring the status of the entire cluster. Based on the overall state, when a mnode finds an overloaded dnode, it will migrate one or more vnodes to other dnodes. In the process, external services keep running and the data insertion, query and calculation operations are not affected. If the mnode has not received the dnode status for a period of time, the dnode will be judged as offline. When offline lasts a certain period of time (the duration is determined by the configuration parameter ‘offlineThreshold’), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure t the replica number. From 6ae704363f0eead2542ece14952a6ac8c32dc4d0 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 2 Sep 2021 10:52:21 +0800 Subject: [PATCH 50/71] [TD-2639] : improve doc format slightly. --- documentation20/cn/02.getting-started/docs.md | 2 +- documentation20/cn/08.connector/docs.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index dd7c20fe18..a37afa9212 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -208,7 +208,7 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); | **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- | | **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● | -注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 +注:● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。 请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector) 查看更详细的信息。 diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index 0fc05cf331..4dc56e61da 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -17,7 +17,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 | **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- | | **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ | -其中 ● 表示官方测试验证通过, ○ 表示非官方测试验证通过,--表示未经验证。 +其中 ● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。 注意: From 8594afdbcc9c75150c438d3431380a195750f41d Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 2 Sep 2021 11:29:36 +0800 Subject: [PATCH 51/71] [TD-6133] : update installation url of Go connector to be latest version. --- documentation20/cn/08.connector/docs.md | 7 +++++-- documentation20/en/08.connector/docs.md | 12 ++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index 4dc56e61da..45395c6f45 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -984,15 +984,18 @@ go build ### Go连接器的使用 -TDengine提供了GO驱动程序包`taosSql`.`taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine。 +TDengine提供了GO驱动程序包`taosSql`。`taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine。 ```go import ( "database/sql" - _ "github.com/taosdata/driver-go/taosSql" + _ "github.com/taosdata/driver-go/v2/taosSql" ) ``` + **提示**:下划线与双引号之间必须有一个空格。 +`taosSql` 的 v2 版本进行了重构,分离出内置数据库操作接口 `database/sql/driver` 到目录 `taosSql`;订阅、stmt等其他功能放到目录 `af`。 + ### 常用API - `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md index 9cbd395206..e8087b79db 100644 --- a/documentation20/en/08.connector/docs.md +++ b/documentation20/en/08.connector/docs.md @@ -179,7 +179,7 @@ Clean up the running environment and call this API before the application exits. - `int taos_options(TSDB_OPTION option, const void * arg, ...)` -Set client options, currently only time zone setting (_TSDB_OPTIONTIMEZONE) and encoding setting (_TSDB_OPTIONLOCALE) are supported. The time zone and encoding default to the current operating system settings. +Set client options, currently only time zone setting (`_TSDB_OPTIONTIMEZONE`) and encoding setting (`_TSDB_OPTIONLOCALE`) are supported. The time zone and encoding default to the current operating system settings. - `char *taos_get_client_info()` @@ -296,9 +296,7 @@ Asynchronous APIs have relatively high requirements for users, who can selective The asynchronous APIs of TDengine all use non-blocking calling mode. Applications can use multithreading to open multiple tables at the same time, and can query or insert to each open table at the same time. It should be pointed out that the **application client must ensure that the operation on the same table is completely serialized**, that is, when the insertion or query operation on the same table is not completed (when no result returned), the second insertion or query operation cannot be performed. - - ### Parameter binding API In addition to calling `taos_query` directly for queries, TDengine also provides a Prepare API that supports parameter binding. Like MySQL, these APIs currently only support using question mark `?` to represent the parameters to be bound, as follows: @@ -823,12 +821,12 @@ https://www.taosdata.com/blog/2020/11/02/1901.html The TDengine provides the GO driver taosSql. taosSql implements the GO language's built-in interface database/sql/driver. Users can access TDengine in the application by simply importing the package as follows, see https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go for details. -Sample code for using the Go connector can be found in https://github.com/taosdata/TDengine/tree/develop/tests/examples/go and the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1951.html). +Sample code for using the Go connector can be found in https://github.com/taosdata/TDengine/tree/develop/tests/examples/go . ```Go import ( "database/sql" - _ "github.com/taosdata/driver-go/taosSql" + _ "github.com/taosdata/driver-go/v2/taosSql" ) ``` @@ -839,6 +837,8 @@ go env -w GO111MODULE=on go env -w GOPROXY=https://goproxy.io,direct ``` +`taosSql` v2 completed refactoring of the v1 version and separated the built-in database operation interface `database/sql/driver` to the directory `taosSql`, and put other advanced functions such as subscription and stmt into the directory `af`. + ### Common APIs - `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` @@ -937,7 +937,7 @@ After installing the TDengine client, the nodejsChecker.js program can verify wh Steps: -1. Create a new installation verification directory, for example: ~/tdengine-test, copy the nodejsChecker.js source program on github. Download address: (https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js). +1. Create a new installation verification directory, for example: `~/tdengine-test`, copy the nodejsChecker.js source program on github. Download address: (https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js). 2. Execute the following command: From 21dfbae7fdb734a507b46aa6662fb1b6b26dfd6a Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 2 Sep 2021 13:36:19 +0800 Subject: [PATCH 52/71] [TS-212] : LAST() & LAST_ROW() choose random result when multi rows having same timestamp. --- documentation20/cn/12.taos-sql/docs.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index fb6ccb6878..4c9765b276 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -1064,7 +1064,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ```mysql SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` - 功能说明:统计表/超级表中某列的值最后写入的非NULL值。 + 功能说明:统计表/超级表中某列的值最后写入的非 NULL 值。 返回结果数据类型:同应用的字段。 @@ -1074,9 +1074,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 说明: - 1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*); + 1)如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*); - 2)如果结果集中的某列全部为NULL值,则该列的返回结果也是NULL;如果结果集中所有列全部为NULL值,则不返回结果。 + 2)如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。 + + 3)在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 示例: ```mysql @@ -1225,7 +1227,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 适用于:**表、超级表**。 - 限制:LAST_ROW()不能与INTERVAL一起使用。 + 限制:LAST_ROW() 不能与 INTERVAL 一起使用。 + + 说明:在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。 示例: ```mysql From 5ab1c3abad785cc9d2b06b3b4e5b58b284ec9bc3 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Thu, 2 Sep 2021 15:47:26 +0800 Subject: [PATCH 53/71] [TD-6445]: add interp testcases not covered --- tests/pytest/functions/function_interp.py | 77 ++++++++++++++--------- 1 file changed, 49 insertions(+), 28 deletions(-) diff --git a/tests/pytest/functions/function_interp.py b/tests/pytest/functions/function_interp.py index 87d001d9e5..469e9186f6 100644 --- a/tests/pytest/functions/function_interp.py +++ b/tests/pytest/functions/function_interp.py @@ -11,45 +11,43 @@ # -*- coding: utf-8 -*- -import sys -import taos from util.log import * from util.cases import * from util.sql import * -import numpy as np - - class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) - self.rowNum = 10 - self.ts = 1537146000000 - def run(self): tdSql.prepare() tdSql.execute("create table ap1 (ts timestamp, pav float)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.119', 2.90799)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.317', 3.07399)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.517', 0.58117)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.717', 0.16150)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.918', 1.47885)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:56.569', 1.76472)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.381', 2.13722)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.574', 4.10256)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.776', 3.55345)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.976', 1.46624)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.187', 0.17943)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.372', 2.04101)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.573', 3.20924)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.768', 1.71807)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.964', 4.60900)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.155', 4.33907)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.359', 0.76940)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.553', 0.06458)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.742', 4.59857)") - tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.938', 1.55081)") + tdSql.execute("create table ap2 (ts timestamp, pav float) tags (t1 float)") + tdSql.execute("create table ap2_sub1 using ap2 tags (2.90799)") + tdSql.execute("create table ap2_sub2 using ap2 tags (2.90799)") + tdSql.execute("create table ap3 (ts timestamp, pav float) tags (t1 float)") + tdSql.execute("create table ap3_sub1 using ap3 tags (2.90799)") + for tb_name in ["ap1", "ap2_sub1", "ap3_sub1"]: + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:54.119', 2.90799)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:54.317', 3.07399)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:54.517', 0.58117)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:54.717', 0.16150)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:54.918', 1.47885)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:56.569', 1.76472)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:57.381', 2.13722)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:57.574', 4.10256)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:57.776', 3.55345)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:57.976', 1.46624)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:58.187', 0.17943)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:58.372', 2.04101)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:58.573', 3.20924)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:58.768', 1.71807)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:58.964', 4.60900)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:59.155', 4.33907)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:59.359', 0.76940)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:59.553', 0.06458)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:59.742', 4.59857)") + tdSql.execute(f"insert into {tb_name} values ('2021-07-25 02:19:59.938', 1.55081)") tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV)") tdSql.checkRows(0) @@ -57,6 +55,29 @@ class TDTestCase: tdSql.checkRows(0) tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR)") tdSql.checkRows(0) + # check None + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (None)") + tdSql.checkRows(0) + # check NULL + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (NULL)") + tdSql.checkRows(6) + for i in range(5): + tdSql.checkData(i,1,None) + # checkout VALUE + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (VALUE, 1)") + tdSql.checkRows(6) + for i in range(5): + tdSql.checkData(i,1,1.00000) + # check tag group by + tdSql.query("select interp(pav) from ap2 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (VALUE,1) group by t1;") + for i in range(5): + tdSql.checkData(i,1,1.00000) + tdSql.checkData(i,2,2.90799) + # check multi ts lines + tdSql.query("select z1.ts,z1.val1,z2.val2 from (select interp(pav) val1 from ap2 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (value,1)) z1,(select interp(pav) val2 from ap3 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (value,2)) z2 where z1.ts=z2.ts ;") + for i in range(5): + tdSql.checkData(i,1,1.00000) + tdSql.checkData(i,2,2.00000) tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (LINEAR)") tdSql.checkRows(6) tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' every(1000a) FILL (NEXT)") From 78c46a2739b768f9484467f48e3b1fda981046c7 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 2 Sep 2021 16:23:03 +0800 Subject: [PATCH 54/71] update C# test case --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index deee5f473b..91855a92fb 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -239,7 +239,7 @@ pipeline { sh ''' cd ${WKC}/tests/examples/C#/taosdemo mcs -out:taosdemo *.cs > /dev/null 2>&1 - echo '' |./taosdemo + echo '' |./taosdemo -c /etc/taos ''' sh ''' cd ${WKC}/tests/gotest From f842633b17ff3aca5d81ba35f7716d510c9b4aa7 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Fri, 3 Sep 2021 10:18:58 +0800 Subject: [PATCH 55/71] tagregex: fix test error --- tests/script/general/parser/regex.sim | 11 +++++++++++ tests/script/general/parser/where.sim | 10 +++++----- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/tests/script/general/parser/regex.sim b/tests/script/general/parser/regex.sim index 59c7384d6c..5351d914f3 100644 --- a/tests/script/general/parser/regex.sim +++ b/tests/script/general/parser/regex.sim @@ -22,12 +22,23 @@ sql create table $ct1_name using $st_name tags('taosdata1') sql create table $ct2_name using $st_name tags('taosdata2') sql create table not_match using $st_name tags('NOTMATCH') +sql select tbname from $st_name where tbname match '.*' +if $rows != 3 then + return -1 +endi + + sql select tbname from $st_name where tbname match '^ct[[:digit:]]' if $rows != 2 then return -1 endi +sql select tbname from $st_name where tbname match '.*' +if $rows !=3 then + return -1 +endi + sql select tbname from $st_name where t1b match '[[:lower:]]+' if $rows != 2 then return -1 diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim index 0f9e317b40..6b789de490 100644 --- a/tests/script/general/parser/where.sim +++ b/tests/script/general/parser/where.sim @@ -95,15 +95,15 @@ if $rows != 2 then return -1 endi -print $tbPrefix, $data00 $data10 +print $tbPrefix $tb = $tbPrefix . 0 -if $data00 != $tb then - print expect $tb, actual:$data00 +if $data00 != wh_tb1 then + print expect wh_tb1, actual:$data00 return -1 endi $tb = $tbPrefix . 1 -if $data10 != $tb then - print expect $tb, actual:$data00 +if $data10 != wh_tb0 then + print expect wh_tb0, actual:$data00 return -1 endi From e549596862a01149b902c4ef087dbf3556e10bce Mon Sep 17 00:00:00 2001 From: jtao1735 Date: Sun, 5 Sep 2021 01:51:00 +0000 Subject: [PATCH 56/71] correct some English writing errors --- documentation20/en/00.index/docs.md | 99 ++++++------ documentation20/en/01.evaluation/docs.md | 10 +- documentation20/en/02.getting-started/docs.md | 14 +- documentation20/en/03.architecture/docs.md | 141 +++++++++--------- documentation20/en/04.model/docs.md | 26 ++-- documentation20/en/05.insert/docs.md | 30 ++-- documentation20/en/06.queries/docs.md | 6 +- .../en/07.advanced-features/docs.md | 12 +- documentation20/en/08.connector/docs.md | 12 +- documentation20/en/09.connections/docs.md | 2 +- documentation20/en/10.cluster/docs.md | 16 +- documentation20/en/11.administrator/docs.md | 22 +-- documentation20/en/12.taos-sql/docs.md | 65 ++++---- 13 files changed, 219 insertions(+), 236 deletions(-) diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md index 0ac4a06ef4..05c16aa3eb 100644 --- a/documentation20/en/00.index/docs.md +++ b/documentation20/en/00.index/docs.md @@ -1,37 +1,50 @@ # TDengine Documentation -TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Model and Data Modeling sections. In addition to this document, you should also download and read our technology white paper. For the older TDengine version 1.6 documentation, please click here. +TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Modeling sections. In addition to this document, you should also download and read the technology white paper. For the older TDengine version 1.6 documentation, please click [here](https://www.taosdata.com/en/documentation16/). ## [TDengine Introduction](/evaluation) * [TDengine Introduction and Features](/evaluation#intro) * [TDengine Use Scenes](/evaluation#scenes) -* [TDengine Performance Metrics and Verification](/evaluation#) +* [TDengine Performance Metrics and Verification]((/evaluation#)) ## [Getting Started](/getting-started) -* [Quickly Install](/getting-started#install): install via source code/package / Docker within seconds - -- [Easy to Launch](/getting-started#start): start / stop TDengine with systemctl -- [Command-line](/getting-started#console) : an easy way to access TDengine server -- [Experience Lightning Speed](/getting-started#demo): running a demo, inserting/querying data to experience faster speed -- [List of Supported Platforms](/getting-started#platforms): a list of platforms supported by TDengine server and client -- [Deploy to Kubernetes](https://taosdata.github.io/TDengine-Operator/en/index.html):a detailed guide for TDengine deployment in Kubernetes environment +* [Quick Install](/getting-started#install): install via source code/package / Docker within seconds +* [Quick Launch](/getting-started#start): start / stop TDengine quickly with systemctl +* [Command-line](/getting-started#console) : an easy way to access TDengine server +* [Experience Lightning Speed](/getting-started#demo): running a demo, inserting/querying data to experience faster speed +* [List of Supported Platforms](/getting-started#platforms): a list of platforms supported by TDengine server and client +* [Deploy to Kubernetes](https://taosdata.github.io/TDengine-Operator/en/index.html):a detailed guide for TDengine deployment in Kubernetes environment ## [Overall Architecture](/architecture) -- [Data Model](/architecture#model): relational database model, but one table for one device with static tags -- [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQL, support scale-out and high-reliability -- [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data will be separated from time-series data, segmented by vnode and time -- [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement is sent back to client, while supporting multi-replicas +- [Data Model](/architecture#model): relational database model, but one table for one data collection point with static tags +- [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQ architecture, highly available and horizontal scalable +- [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data is separated from time-series data, sharded by vnodes and partitioned by time +- [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement sent back to client, while supporting data replications. - [Caching and Persistence](/architecture#persistence): latest records are cached in memory, but are written in columnar format with an ultra-high compression ratio -- [Data Query](/architecture#query): support various functions, time-axis aggregation, interpolation, and multi-table aggregation +- [Data Query](/architecture#query): support various SQL functions, downsampling, interpolation, and multi-table aggregation ## [Data Modeling](/model) -- [Create a Database](/model#create-db): create a database for all data collection points with similar features +- [Create a Database](/model#create-db): create a database for all data collection points with similar data characteristics - [Create a Super Table(STable)](/model#create-stable): create a STable for all data collection points with the same type -- [Create a Table](/model#create-table): use STable as the template, to create a table for each data collecting point +- [Create a Table](/model#create-table): use STable as the template to create a table for each data collecting point + +## [Efficient Data Ingestion](/insert) + +- [Data Writing via SQL](/insert#sql): write one or multiple records into one or multiple tables via SQL insert command +- [Data Writing via Prometheus](/insert#prometheus): Configure Prometheus to write data directly without any code +- [Data Writing via Telegraf](/insert#telegraf): Configure Telegraf to write collected data directly without any code +- [Data Writing via EMQ X](/insert#emq): Configure EMQ X to write MQTT data directly without any code +- [Data Writing via HiveMQ Broker](/insert#hivemq): Configure HiveMQ to write MQTT data directly without any code + +## [Efficient Data Querying](/queries) + +- [Major Features](/queries#queries): support various standard query functions, setting filter conditions, and querying per time segment +- [Multi-table Aggregation](/queries#aggregation): use STable and set tag filter conditions to perform efficient aggregation +- [Downsampling](/queries#sampling): aggregate data in successive time windows, support interpolation ## [TAOS SQL](/taos-sql) @@ -40,27 +53,13 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [Table Management](/taos-sql#table): add, drop, check, alter tables - [STable Management](/taos-sql#super-table): add, drop, check, alter STables - [Tag Management](/taos-sql#tags): add, drop, alter tags -- [Inserting Records](/taos-sql#insert): support to write single/multiple items per table, multiple items across tables, and support to write historical data +- [Inserting Records](/taos-sql#insert): write single/multiple records a table, multiple records across tables, and historical data - [Data Query](/taos-sql#select): support time segment, value filtering, sorting, manual paging of query results, etc - [SQL Function](/taos-sql#functions): support various aggregation functions, selection functions, and calculation functions, such as avg, min, diff, etc -- [Time Dimensions Aggregation](/taos-sql#aggregation): aggregate and reduce the dimension after cutting table data by time segment +- [Cutting and Aggregation](/taos-sql#aggregation): aggregate and reduce the dimension after cutting table data by time segment - [Boundary Restrictions](/taos-sql#limitation): restrictions for the library, table, SQL, and others - [Error Code](/taos-sql/error-code): TDengine 2.0 error codes and corresponding decimal codes -## [Efficient Data Ingestion](/insert) - -- [SQL Ingestion](/insert#sql): write one or multiple records into one or multiple tables via SQL insert command -- [Prometheus Ingestion](/insert#prometheus): Configure Prometheus to write data directly without any code -- [Telegraf Ingestion](/insert#telegraf): Configure Telegraf to write collected data directly without any code -- [EMQ X Broker](/insert#emq): Configure EMQ X to write MQTT data directly without any code -- [HiveMQ Broker](/insert#hivemq): Configure HiveMQ to write MQTT data directly without any code - -## [Efficient Data Querying](/queries) - -- [Main Query Features](/queries#queries): support various standard functions, setting filter conditions, and querying per time segment -- [Multi-table Aggregation Query](/queries#aggregation): use STable and set tag filter conditions to perform efficient aggregation queries -- [Downsampling to Query Value](/queries#sampling): aggregate data in successive time windows, support interpolation - ## [Advanced Features](/advanced-features) - [Continuous Query](/advanced-features#continuous-query): Based on sliding windows, the data stream is automatically queried and calculated at regular intervals @@ -71,7 +70,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ## [Connector](/connector) - [C/C++ Connector](/connector#c-cpp): primary method to connect to TDengine server through libtaos client library -- [Java Connector(JDBC)](/connector/java): driver for connecting to the server from Java applications using the JDBC API +- [Java Connector(JDBC)](https://www.taosdata.com/en/documentation20/connector/java): driver for connecting to the server from Java applications using the JDBC API - [Python Connector](/connector#python): driver for connecting to TDengine server from Python applications - [RESTful Connector](/connector#restful): a simple way to interact with TDengine via HTTP - [Go Connector](/connector#go): driver for connecting to TDengine server from Go applications @@ -88,12 +87,12 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ## [Installation and Management of TDengine Cluster](/cluster) -- [Preparation](/cluster#prepare): important considerations before deploying TDengine for production usage -- [Create Your First Node](/cluster#node-one): simple to follow the quick setup +- [Preparation](/cluster#prepare): important steps before deploying TDengine for production usage +- [Create the First Node](/cluster#node-one): just follow the steps in quick start - [Create Subsequent Nodes](/cluster#node-other): configure taos.cfg for new nodes to add more to the existing cluster - [Node Management](/cluster#management): add, delete, and check nodes in the cluster -- [High-availability of Vnode](/cluster#high-availability): implement high-availability of Vnode through multi-replicas -- [Mnode Management](/cluster#mnode): automatic system creation without any manual intervention +- [High-availability of Vnode](/cluster#high-availability): implement high-availability of Vnode through replicas +- [Mnode Management](/cluster#mnode): mnodes are created automatically without any manual intervention - [Load Balancing](/cluster#load-balancing): automatically performed once the number of nodes or load changes - [Offline Node Processing](/cluster#offline): any node that offline for more than a certain period will be removed from the cluster - [Arbitrator](/cluster#arbitrator): used in the case of an even number of replicas to prevent split-brain @@ -108,27 +107,14 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [Export Data](/administrator#export): export data either from TDengine shell or from the taosdump tool - [System Monitor](/administrator#status): monitor the system connections, queries, streaming calculation, logs, and events - [File Directory Structure](/administrator#directories): directories where TDengine data files and configuration files located -- [Parameter Restrictions and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter restrictions and reserved keywords - -## TDengine Technical Design - -- [System Module]: taosd functions and modules partitioning -- [Data Replication]: support real-time synchronous/asynchronous replication, to ensure high-availability of the system -- [Technical Blog](https://www.taosdata.com/cn/blog/?categories=3): More technical analysis and architecture design articles - -## Common Tools - -- [TDengine sample import tools](https://www.taosdata.com/blog/2020/01/18/1166.html) -- [TDengine performance comparison test tools](https://www.taosdata.com/blog/2020/01/18/1166.html) -- [Use TDengine visually through IDEA Database Management Tool](https://www.taosdata.com/blog/2020/08/27/1767.html) +- [Parameter Limitss and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter limits and reserved keywords ## Performance: TDengine vs Others -- [Performance: TDengine vs InfluxDB with InfluxDB’s open-source performance testing tool](https://www.taosdata.com/blog/2020/01/13/1105.html) -- [Performance: TDengine vs OpenTSDB](https://www.taosdata.com/blog/2019/08/21/621.html) -- [Performance: TDengine vs Cassandra](https://www.taosdata.com/blog/2019/08/14/573.html) -- [Performance: TDengine vs InfluxDB](https://www.taosdata.com/blog/2019/07/19/419.html) -- [Performance Test Reports of TDengine vs InfluxDB/OpenTSDB/Cassandra/MySQL/ClickHouse](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf) +- [Performance: TDengine vs OpenTSDB](https://www.taosdata.com/blog/2019/09/12/710.html) +- [Performance: TDengine vs Cassandra](https://www.taosdata.com/blog/2019/09/12/708.html) +- [Performance: TDengine vs InfluxDB](https://www.taosdata.com/blog/2019/09/12/706.html) +- [Performance Test Reports of TDengine vs InfluxDB/OpenTSDB/Cassandra/MySQL/ClickHouse](https://www.taosdata.com/downloads/TDengine_Testing_Report_en.pdf) ## More on IoT Big Data @@ -136,7 +122,8 @@ TDengine is a highly efficient platform to store, query, and analyze time-series - [Features and Functions of IoT Big Data platforms](https://www.taosdata.com/blog/2019/07/29/542.html) - [Why don’t General Big Data Platforms Fit IoT Scenarios?](https://www.taosdata.com/blog/2019/07/09/why-does-the-general-big-data-platform-not-fit-iot-data-processing/) - [Why TDengine is the best choice for IoT, Internet of Vehicles, and Industry Internet Big Data platforms?](https://www.taosdata.com/blog/2019/07/09/why-tdengine-is-the-best-choice-for-iot-big-data-processing/) +- [Technical Blog](https://www.taosdata.com/cn/blog/?categories=3): More technical analysis and architecture design articles ## FAQ -- [FAQ: Common questions and answers](/faq) +- [FAQ: Common questions and answers](/faq) \ No newline at end of file diff --git a/documentation20/en/01.evaluation/docs.md b/documentation20/en/01.evaluation/docs.md index 250f465d7b..58e30b2e1c 100644 --- a/documentation20/en/01.evaluation/docs.md +++ b/documentation20/en/01.evaluation/docs.md @@ -2,18 +2,18 @@ ## About TDengine -TDengine is an innovative Big Data processing product launched by Taos Data in the face of the fast-growing Internet of Things (IoT) Big Data market and technical challenges. It does not rely on any third-party software, nor does it optimize or package any open-source database or stream computing product. Instead, it is a product independently developed after absorbing the advantages of many traditional relational databases, NoSQL databases, stream computing engines, message queues, and other software. TDengine has its own unique Big Data processing advantages in time-series space. +TDengine is an innovative Big Data processing product launched by TAOS Data in the face of the fast-growing Internet of Things (IoT) Big Data market and technical challenges. It does not rely on any third-party software, nor does it optimize or package any open-source database or stream computing product. Instead, it is a product independently developed after absorbing the advantages of many traditional relational databases, NoSQL databases, stream computing engines, message queues, and other software. TDengine has its own unique Big Data processing advantages in time-series space. One of the modules of TDengine is the time-series database. However, in addition to this, to reduce the complexity of research and development and the difficulty of system operation, TDengine also provides functions such as caching, message queuing, subscription, stream computing, etc. TDengine provides a full-stack technical solution for the processing of IoT and Industrial Internet BigData. It is an efficient and easy-to-use IoT Big Data platform. Compared with typical Big Data platforms such as Hadoop, TDengine has the following distinct characteristics: - **Performance improvement over 10 times**: An innovative data storage structure is defined, with each single core can process at least 20,000 requests per second, insert millions of data points, and read more than 10 million data points, which is more than 10 times faster than other existing general database. - **Reduce the cost of hardware or cloud services to 1/5**: Due to its ultra-performance, TDengine’s computing resources consumption is less than 1/5 of other common Big Data solutions; through columnar storage and advanced compression algorithms, the storage consumption is less than 1/10 of other general databases. - **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance. -- **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and MATLAB. -- **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to. +- **Highly Available and Horizontal Scalabe **: With the distributed architecture and consistency algorithm, via multi-replication and clustering features, TDengine ensures high availability and horizontal scalability to support the mission-critical applications. - **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C++/C#/Go/Node.js, and similar to MySQL with zero learning cost. +- **Core is Open Sourced:** Except some auxiliary features, the core of TDengine is open sourced. Enterprise won't be locked by the database anymore. Ecosystem is more strong, product is more stable, and developer communities are more active. -With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources. +With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, since it makes full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources. ![TDengine Technology Ecosystem](page://images/eco_system.png) @@ -62,4 +62,4 @@ From the perspective of data sources, designers can analyze the applicability of | ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | | Require system with high-reliability | | | √ | TDengine has a very robust and reliable system architecture to implement simple and convenient daily operation with streamlined experiences for operators, thus human errors and accidents are eliminated to the greatest extent. | | Require controllable operation learning cost | | | √ | As above. | -| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counselling services. | +| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counselling services. | \ No newline at end of file diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md index 6941e44cf5..307ccadf9a 100644 --- a/documentation20/en/02.getting-started/docs.md +++ b/documentation20/en/02.getting-started/docs.md @@ -2,7 +2,7 @@ ## Quick Install -TDengine software consists of 3 components: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). +TDengine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDengine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package). ### Install from Source @@ -10,13 +10,11 @@ Please visit our [TDengine github page](https://github.com/taosdata/TDengine) fo ### Install from Docker Container -For the time being, it is not recommended to use Docker to deploy the client or server side of TDengine in production environments, but it is convenient to use Docker to deploy in development environments or when trying it for the first time. In particular, with Docker, it is easy to try TDengine in Mac OS X and Windows environments. - -Please refer to the detailed operation in [Quickly experience TDengine through Docker](https://www.taosdata.com/en/documentation/getting-started/docker). +Please visit our [TDengine Official Docker Image: Distribution, Downloading, and Usage](https://www.taosdata.com/blog/2020/05/13/1509.html). ### Install from Package -It’s extremely easy to install for TDengine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs: +Three different packages for TDengine server are provided, please pick up the one you like. (Lite packages only have execution files and connector of C/C++, but standard packages support connectors of nearly all programming languages.) Beta version has more features, but we suggest you to install stable version for production or testing. Click [here](https://www.taosdata.com/en/getting-started/#Install-from-Package) to download the install package. @@ -131,7 +129,7 @@ After starting the TDengine server, you can execute the command `taosdemo` in th $ taosdemo ``` -Using this command, a STable named `meters` will be created in the database `test` There are 10k tables under this stable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai". +Using this command, a STable named `meters` will be created in the database `test`. There are 10k tables under this STable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai". It takes about 10 minutes to execute this command. Once finished, 1 billion rows of records will be inserted. @@ -201,7 +199,7 @@ Note: ● has been verified by official tests; ○ has been verified by unoffici List of platforms supported by TDengine client and connectors -At the moment, TDengine connectors can support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and development environments such as Linux/Win64/Win32. +At the moment, TDengine connectors can support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and operating system such as Linux/Win64/Win32. Comparison matrix as following: @@ -218,4 +216,4 @@ Comparison matrix as following: Note: ● has been verified by official tests; ○ has been verified by unofficial tests. -Please visit [Connectors](https://www.taosdata.com/en/documentation/connector) section for more detailed information. +Please visit Connectors section for more detailed information. \ No newline at end of file diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md index ac6c94fe40..b1d47684c3 100644 --- a/documentation20/en/03.architecture/docs.md +++ b/documentation20/en/03.architecture/docs.md @@ -4,7 +4,7 @@ ### A Typical IoT Scenario -In typical IoT, Internet of Vehicles and Operation Monitoring scenarios, there are often many different types of data collecting devices that collect one or more different physical metrics. However, for the collection devices of the same type, there are often many specific collection devices distributed in places. BigData processing system aims to collect all kinds of data, and then calculate and analyze them. For the same kind of devices, the data collected are very regular. Taking smart meters as an example, assuming that each smart meter collects three metrics of current, voltage and phase, the collected data are similar to the following table: +In typical industry IoT, Internet of Vehicles and Operation Monitoring scenarios, there are often many different types of data collecting devices that collect one or more different physical metrics. However, for the data collection devices of the same type, there are often many specific collection devices distributed in places. Big Data processing system aims to collect all kinds of data, and then store and analyze them. For the same kind of devices, the data collected are very structured. Taking smart meters as an example, assuming that each smart meter collects three metrics of current, voltage and phase, the collected data are similar to the following table:
@@ -108,48 +108,48 @@ Each data record contains the device ID, timestamp, collected metrics (current, As the data points are a series of data points over time, the data points generated by IoT, Internet of Vehicles, and Operation Monitoring have some strong common characteristics: -1. Metrics are always structured data; -2. There are rarely delete/update operations on collected data; -3. No need for transactions of traditional databases -4. The ratio of reading is lower but write is higher than typical Internet applications; -5. data flow is uniform and can be predicted according to the number of devices and collection frequency; +1. metrics are always structured data; +2. there are rarely delete/update operations on collected data; +3. unlike traditional databases, transaction processing is not required; +4. the ratio of writing over reading is much higher than typical Internet applications; +5. data volume is stable and can be predicted according to the number of devices and sampling rate; 6. the user pays attention to the trend of data, not a specific value at a specific time; 7. there is always a data retention policy; 8. the data query is always executed in a given time range and a subset of space; -9. in addition to storage and query operations, various statistical and real-time calculation operations are also required; +9. in addition to storage and query operations, various statistical and real-time computing are also required; 10. data volume is huge, a system may generate over 10 billion data points in a day. -In light of the characteristics mentioned above, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency. +By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency. ### Relational Database Model -Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a shallow learning curve. You need to create a database, create tables with schema definitions, then insert data points and execute queries to explore the data. Standard SQL is used, instead of NoSQL’s key-value storage. +Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a short learning curve. You need to create a database, create tables with schema definitions, then insert data points and execute queries to explore the data. SQL like syntax is used, instead of NoSQL’s key-value storage. -### One Table for One Collection Point +### One Table for One Data Collection Point -To utilize this time-series and other data features, TDengine requires the user to create a table for each collection point to store collected time-series data. For example, if there are over 10 millions smart meters, means 10 millions tables shall be created. For the table above, 4 tables shall be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several advantages: +To utilize this time-series and other data features, TDengine requires the user to create a table for each data collection point to store collected time-series data. For example, if there are over 10 millions smart meters, it means 10 millions tables shall be created. For the table above, 4 tables shall be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several advantages: -1. Guarantee that all data from a collection point can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one point in a time range, this design will reduce the random read latency significantly, thus increase read and query speed by orders of magnitude. -2. Since the data generation process of each collection device is completely independent, means each device has its unique data source, thus writes can be carried out in a lock-free manner to greatly improve the speed. +1. Guarantee that all data from a data collection point can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one data collection point in a time range, this design will reduce the random read latency significantly, thus increase read and query speed by orders of magnitude. +2. Since the data generation process of each data collection device is completely independent, and each data collection point has its unique data source, thus writes can be carried out in a lock-free manner to greatly improve the performance. 3. Write latency can be significantly reduced too as the data points generated by the same device will arrive in time order, the new data point will be simply appended to a block. -If the data of multiple devices are written into a table in the traditional way, due to the uncontrollable network delay, the timing of the data from different devices arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the data of one device cannot be guaranteed to continuously stored together. **The method of one table for each data collection point can ensure the optimal performance of insertion and query of a single data collection point to the greatest extent.** +If the data of multiple devices are written into a table in the traditional way, due to the uncontrollable network delay, the timing of the data from different devices arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the data of one device cannot be guaranteed to be continuously stored together. **One table for each data collection point can ensure the optimal performance of insert and query of a single data collection point to the greatest extent.** -TDengine suggests using collection point ID as the table name (like D1001 in the above table). Each point may collect one or more metrics (like the current, voltage, phase as above). Each metric has a column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. All data will be stored in columns. +TDengine suggests using data collection point ID as the table name (like D1001 in the above table). Each point may collect one or more metrics (like the current, voltage, phase as above). Each metric has a column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. All data will be stored in columns. ### STable: A Collection of Data Points in the Same Type -The method of one table for each point will bring a greatly increasing number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the STable (Super Table) concept is introduced by TDengine. +The design of one table for each data collection point will require a huge number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between data collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the [STable(Super Table)](https://www.taosdata.com/en/documentation/super-table) concept is introduced by TDengine. -STable is an abstract collection for a type of data point. A STable contains a set of points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable (a combination of data collection points of a specific type), in addition to defining the table structure of the collected metrics, it is also necessary to define the schema of its tag. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established. +STable is an abstract set for a type of data collection point. A STable contains a set of data collection points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable (a set of data collection points of a specific type), in addition to defining the table structure of the collected metrics, it is also necessary to define the schema of its tags. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established. In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. When creating a table for a specific data collection point, the user uses the definition of STable as a template and specifies the tag value of the specific collection point (table). Compared with the traditional relational database, the table (a data collection point) has static tags, and these tags can be added, deleted, and modified afterward. **A STable contains multiple tables with the same time-series data schema but different tag values.** -When aggregating multiple data collection points with the same data type, TDEngine will first find out the tables that meet the tag filters from the STables, and then scan the time-series data of these tables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of aggregation calculation. +When aggregating multiple data collection points with the same data type, TDengine will first find out the tables that meet the tag filter conditions from the STables, and then scan the time-series data of these tables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of data aggregation. ## Cluster and Primary Logic Unit -The design of TDengine is based on the assumption that one single hardware or software system is unreliable and that no single computer can provide sufficient computing and storage resources to process massive data. Therefore, TDengine has been designed according to a distributed and high-reliability architecture since Day One of R&D, which supports scale-out, so that hardware failure or software failure of any single or multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware investment. +The design of TDengine is based on the assumption that one single node or software system is unreliable and that no single node can provide sufficient computing and storage resources to process massive data. Therefore, TDengine has been designed in a distributed and high-reliability architecture since day one of the development, so that hardware failure or software failure of any single or multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware investment. ### Primary Logic Unit @@ -168,21 +168,21 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc **Virtual node (vnode)**: In order to better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage, and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs, and is created and managed by the management node. -**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is used to manage between mnodes, and the data synchronization is carried out in a strong consistent way. Any data update operation can only be done on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction. +**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strong consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction. -**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high reliability of the system. The virtual node group is managed in a master/slave structure. Write operations can only be performed on the master vnode, and the system synchronizes data to the slave vnode via replication, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter replica when creating DB, and the default is 1. Using the multi-replica feature of TDengine, the same high data reliability can be done without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. +**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `replica` when creating DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. -**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster. +**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interaction between application and cluster, and provides the native interface of C/C++ language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster. ### Node Communication -**Communication mode**: The communication among each data node of TDengine system, and among application driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transmission. +**Communication mode**: The communication among each data node of TDengine system, and among application driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transportation. -**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter "fqdn". If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter fqdn of the node to its IP address. However, IP is not recommended because IP address is variable, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the normal operation of DNS service, or configure hosts files on nodes and the nodes where applications are located. +**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter "fqdn". If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter fqdn of the node to its IP address. However, IP is not recommended because IP address may be changed, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the DNS service is running, or hosts files on nodes are configured properly. -**Port configuration**: The external port of a data node is determined by the system configuration parameter serverPort in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. When using, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort. +**Port configuration**: The external port of a data node is determined by the system configuration parameter serverPort in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. To run the system, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort. -**Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option-h, and the configured port number can be specified through -p. If the port is not configured, the system configuration parameter serverPort of TDengine will be adopted. +**Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter serverPort of TDengine will be adopted. **Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: 1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; 2: Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; 3: Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connected. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again. @@ -192,7 +192,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc **Redirection**: No matter about dnode or taosc, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or taosc, if it’s not an mnode by self, it will reply the mnode EP List back. After receiving this list, taosc or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies taosc through messaging interaction among nodes. -### A Typical Messaging Process +### A Typical Data Writinfg Process To explain the relationship between vnode, mnode, taosc and application and their respective roles, the following is an analysis of a typical data writing process. @@ -200,62 +200,62 @@ To explain the relationship between vnode, mnode, taosc and application and thei
Picture 2 typical process of TDengine
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs. -2. Cache be checked by taosc that if meta data existing for the table. If so, go straight to Step 4. If not, taosc sends a get meta-data request to mnode. +2. taosc checks if meta data existing for the table in the cache. If so, go straight to Step 4. If not, taosc sends a get meta-data request to mnode. 3. Mnode returns the meta-data of the table to taosc. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If taosc does not receive a response from the mnode for a long time, and there are multiple mnodes, taosc will send a request to the next mnode. 4. Taosc initiates an insert request to master vnode. -5. After vnode inserts the data, it gives a reply to taosc, indicating that the insertion is successful. If taosc doesn't get a response from vnode for a long time, taosc will judge the node as offline. In this case, if there are multiple replicas of the inserted database, taosc will issue an insert request to the next vnode in vgroup. +5. After vnode inserts the data, it gives a reply to taosc, indicating that the insertion is successful. If taosc doesn't get a response from vnode for a long time, taosc will treat this node as offline. In this case, if there are multiple replicas of the inserted database, taosc will issue an insert request to the next vnode in vgroup. 6. Taosc notifies APP that writing is successful. -For Step 2 and 3, when taosc starts, it does not know the End Point of mnode, so it will directly initiate a request to the externally serving End Point of the configured cluster. If the dnode that received the request does not have an mnode configured, it will inform the mnode EP list in a reply message, so that taosc will re-issue a request to obtain meta-data to the EP of another new mnode. +For Step 2 and 3, when taosc starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have an mnode configured, it will inform the mnode EP list in a reply message, so that taosc will re-issue a request to obtain meta-data to the EP of another new mnode. -For Step 4 and 5, without caching, taosc can't recognize the master in the virtual node group, so assumes that the first vnodeID is the master and send a request to it. If the requested vnode is not the master, it will reply the actual master as a new target taosc makes a request to. Once the reply of successful insertion is obtained, taosc will cache the information of master node. +For Step 4 and 5, without caching, taosc can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply the actual master as a new target where taosc shall send a request to. Once the reply of successful insertion is obtained, taosc will cache the information of master node. -The above is the process of inserting data, and the processes of querying and calculating are completely consistent. Taosc encapsulates and shields all these complicated processes, and has no perception and no special treatment for applications. +The above is the process of inserting data, and the processes of querying and computing are the same. Taosc encapsulates and hides all these complicated processes, and it is transparent to applications. -Through taosc caching mechanism, mnode needs to be accessed only when a table is operated for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), taosc will interact with mnode regularly to automatically update the cache. +Through taosc caching mechanism, mnode needs to be accessed only when a table is accessed for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), taosc will interact with mnode regularly to automatically update the cache. ## Storage Model and Data Partitioning/Sharding ### Storage Model -The data stored by TDengine include collected time-series data, metadata related to libraries and tables, tag data, etc. These data are specifically divided into three parts: +The data stored by TDengine include collected time-series data, metadata related to database and tables, tag data, etc. These data are specifically divided into three parts: -- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when update parameter is set to 1. By adopting the model with one table for each collection point, the data of a given time period is continuously stored, and the writing against one single table is a simple add operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single collection point with best performance. -- Tag data: meta files stored in vnode support four standard operations of add, delete, modify and check. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. If there are many tag filtering operations, queries will be very frequent and TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the filtering results will return in milliseconds. -- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of add, delete, modify and query are supported. The amount of these data are not large and can be stored in memory, moreover the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck. +- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when database update parameter is set to 1. By adopting the model with one table for each data collection point, the data of a given time period is continuously stored, and the writing against one single table is a simple appending operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single data collection point with best performance. +- Tag data: meta files stored in vnode. Four standard operations of create, read, update and delete are supported. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. To make tag filtering efficient, TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the tag filtering results will return in milliseconds. +- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of create, delete, update and read are supported. The amount of these data are not large and can be stored in memory, moreover the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck. Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately, which has two major advantages: -- Greatly reduce the redundancy of tag data storage: general NoSQL database or time-series database adopts K-V storage, in which Key includes timestamp, device ID and various tags. Each record carries these duplicates, so wasting storage space. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite again, which is extremely expensive to operate. -- Realize extremely efficient aggregation query between multiple tables: when doing aggregation query between multiple tables, it firstly finds out the tag filtered tables, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the query efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. +- Greatly reduce the redundancy of tag data storage: general NoSQL database or time-series database adopts K-V storage, in which Key includes timestamp, device ID and various tags. Each record carries these duplicated tags, so storage space is wasted. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite them again, which is extremely expensive to operate. +- Aggregate data efficiently between multiple tables: when aggregating data between multiple tables, it firstly finds out the tables which satisfy the filtering conditions, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the aggregation efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds. ### Data Sharding -For large-scale data management, to achieve scale-out, it is generally necessary to adopt the a Partitioning strategy as Sharding. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for each time range. +For large-scale data management, to achieve scale-out, it is generally necessary to adopt the Partitioning or Sharding strategy. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for each time range. -VNode (Virtual Data Node) is responsible for providing writing, query and calculation functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and completely transparent to the application. +VNode (Virtual Data Node) is responsible for providing writing, query and computing functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and completely transparent to the application. For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G), so TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores. When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes. -The meda data of each table (including schema, tags, etc.) is also stored in vnode instead of centralized storage in mnode. In fact, this means sharding of meta data, which is convenient for efficient and parallel tag filtering operations. +The meda data of each table (including schema, tags, etc.) is also stored in vnode instead of centralized storage in mnode. In fact, this means sharding of meta data, which is good for efficient and parallel tag filtering operations. ### Data Partitioning -In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter “days”. This method of partitioning by time rang is also convenient to efficiently implement the data retention strategy. As long as the data file exceeds the specified number of days (system configuration parameter ‘keep’), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the cold/hot management of big data and realize tiered-storage. +In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter “`days`”. This method of partitioning by time rang is also convenient to efficiently implement the data retention policy. As long as the data file exceeds the specified number of days (system configuration parameter "`keep`"), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the tiered-storage. Cold/hot data can be stored in different storage meida to reduce the storage cost. -In general, **TDengine splits big data by vnode and time as two dimensions**, which is convenient for parallel and efficient management with scale-out. +In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability. ### Load Balancing -Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) for declaring the status of the entire cluster. Based on the overall state, when an mnode finds an overloaded dnode, it will migrate one or more vnodes to other dnodes. In the process, external services keep running and the data insertion, query and calculation operations are not affected. +Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node), so mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected. -If the mnode has not received the dnode status for a period of time, the dnode will be judged as offline. When offline lasts a certain period of time (the duration is determined by the configuration parameter ‘offlineThreshold’), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure t the replica number. +If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. When offline lasts a certain period of time (configured by parameter ‘`offlineThreshold`’), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number. -When new data nodes are added to the cluster, with new computing and storage are added, the system will automatically start the load balancing process. +When new data nodes are added to the cluster, with new computing and storage resources are added, the system will automatically start the load balancing process. -The load balancing process does not require any manual intervention without application restarted. It will automatically connect new nodes with completely transparence. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.** +The load balancing process does not require any manual intervention, and it is transparent to the application. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.** ## Data Writing and Replication Process @@ -267,8 +267,8 @@ Master Vnode uses a writing process as follows: Figure 3: TDengine Master writing process -1. Master vnode receives the application data insertion request, verifies, and to next step; -2. If the system configuration parameter “walLevel” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; +1. Master vnode receives the application data insertion request, verifies, and moves to next step; +2. If the system configuration parameter “`walLevel`” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; 3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data; 4. Write into memory and add the record to “skip list”; 5. Master vnode returns a confirmation message to the application, indicating a successful writing. @@ -285,27 +285,27 @@ For a slave vnode, the write process as follows: 2. If the system configuration parameter “walLevel” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; 3. Write into memory and add the record to “skip list”; -Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory is exactly the same as WAL. +Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same. ### Remote Disaster Recovery and IDC Migration -As above Master and Slave processes discussed, TDengine adopts asynchronous replication for data synchronization. This method can greatly improve the writing performance, with not obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. +As above Master and Slave processes discussed, TDengine adopts asynchronous replication for data synchronization. This method can greatly improve the writing performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. -On the other hand, TDengine supports dynamic modification of the replicas number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization completed, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can realize IDC room migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. +On the other hand, TDengine supports dynamic modification of the replicas number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization completed, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. -However, this asynchronous replication method has a tiny time window of written data lost. The specific scenario is as follows: +However, the asynchronous replication has a tiny time window where data can be lost. The specific scenario is as follows: -1. Master vnode has completed its 5-step operations, confirmed the success of writing to APP, and then went down; +1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then went down; 2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2; 3. Slave vnode will become the new master, thus losing one record -In theory, as long as in asynchronous replication, there is no guarantee for no losing. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before. +In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before. Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet** ### Master/slave Selection -Vnode maintains a Version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is collecting time-series data or metadata, this version number will be increased by one. +Vnode maintains a version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one. When a vnode starts, the roles (master, slave) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a master-selection process. The rules are as follows: @@ -318,7 +318,7 @@ See [TDengine 2.0 Data Replication Module Design](https://www.taosdata.com/cn/do ### Synchronous Replication -For scenarios with higher data consistency requirements, asynchronous data replication is not applicable, because there is some small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application. +For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application. With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication. @@ -336,17 +336,17 @@ Each vnode has its own independent memory, and it is composed of multiple memory TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will also pull up the disk-writing thread to write the cached data into persistent storage in order not to block subsequent data writing. TDengine will open a new database log file when the data is written, and delete the old database log file after written successfully to avoid unlimited log growth. -To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter “days”. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations. +To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter “`days`”. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations. -For collected data, there is generally a retention period, which is determined by the system configuration parameter “keep”. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space. +For time-series data, there is generally a retention policy, which is determined by the system configuration parameter “`keep`”. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space. Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set. -In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter “maxRows” (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. +In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter “`maxRows`” (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. -Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter “minRows” (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file. +Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter “`minRows`” (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file. -When data is written to disk, it is decided whether to compress the data according to system configuration parameter “comp”. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. +When data is written to disk, it is decided whether to compress the data according to system configuration parameter “`comp`”. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. ### Tiered Storage @@ -393,17 +393,15 @@ When client obtains query result, the worker thread in query execution queue of ### Aggregation by Time Axis, Downsampling, Interpolation -The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and unique function from common databases. From this point of view, it is similar to the window query of stream computing engine. +The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and distinct feature from common databases. From this point of view, it is similar to the window query of stream computing engine. -The keyword “interval” is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated according to time windows, and the data within window range are aggregated as needed. For example: +The keyword “`interval`” is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated based on time windows, and the data within window range are aggregated as needed. For example: ```mysql select count(*) from d1001 interval(1h); ``` -According to the data collected by device D1001, the number of records stored per hour is returned by a 1-hour time window. - - +For the data collected by device D1001, the number of records stored per hour is returned by a 1-hour time window. In application scenarios where query results need to be obtained continuously, if there is data missing in a given time interval, the data results in this interval will also be lost. TDengine provides a strategy to interpolate the results of timeline aggregation calculation. The results of time axis aggregation can be interpolated by using keyword Fill. For example: @@ -411,11 +409,11 @@ In application scenarios where query results need to be obtained continuously, i select count(*) from d1001 interval(1h) fill(prev); ``` -According to the data collected by device D1001, the number of records per hour is counted. If there is no data in a certain hour, statistical data of the previous hour is returned. TDengine provides forward interpolation (prev), linear interpolation (linear), NULL value populating (NULL), and specific value populating (value). +For the data collected by device D1001, the number of records per hour is counted. If there is no data in a certain hour, statistical data of the previous hour is returned. TDengine provides forward interpolation (prev), linear interpolation (linear), NULL value populating (NULL), and specific value populating (value). ### Multi-table Aggregation Query -TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is completely consistent, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure: +TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure: ![Diagram of multi-table aggregation query](page://images/architecture/multi_tables.png)
Picture 4 Diagram of multi-table aggregation query
@@ -427,8 +425,9 @@ TDengine creates a separate table for each data collection point, but in practic 5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to taosc; 6. taosc finally aggregates the results returned by multiple data nodes and send them back to application. -Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation calculation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation calculation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. +Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details. ### Precomputation In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the index BRIN (Block Range Index) of PostgreSQL. + diff --git a/documentation20/en/04.model/docs.md b/documentation20/en/04.model/docs.md index 08d952d317..e28dd906f3 100644 --- a/documentation20/en/04.model/docs.md +++ b/documentation20/en/04.model/docs.md @@ -2,17 +2,15 @@ TDengine adopts a relational data model, so we need to build the "database" and "table". Therefore, for a specific application scenario, it is necessary to consider the design of the database, STable and ordinary table. This section does not discuss detailed syntax rules, but only concepts. -Please watch the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1945.html) for data modeling. - ## Create a Database -Different types of data collection points often have different data characteristics, including frequency of data collecting, length of data retention time, number of replicas, size of data blocks, whether to update data or not, and so on. To ensure TDengine working with great efficiency in various scenarios, TDengine suggests creating tables with different data characteristics in different databases, because each database can be configured with different storage strategies. When creating a database, in addition to SQL standard options, the application can also specify a variety of parameters such as retention duration, number of replicas, number of memory blocks, time accuracy, max and min number of records in a file block, whether it is compressed or not, and number of days a data file will be overwritten. For example: +Different types of data collection points often have different data characteristics, including data sampling rate, length of data retention time, number of replicas, size of data blocks, whether to update data or not, and so on. To ensure TDengine working with great efficiency in various scenarios, TDengine suggests creating tables with different data characteristics in different databases, because each database can be configured with different storage strategies. When creating a database, in addition to SQL standard options, the application can also specify a variety of parameters such as retention duration, number of replicas, number of memory blocks, time resolution, max and min number of records in a file block, whether it is compressed or not, and number of days covered by a data file. For example: ```mysql CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1; ``` -The above statement will create a database named “power”. The data of this database will be kept for 365 days (it will be automatically deleted 365 days later), one data file created per 10 days, and the number of memory blocks is 4 for data updating. For detailed syntax and parameters, please refer to [Data Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#management). +The above statement will create a database named “power”. The data of this database will be kept for 365 days (data will be automatically deleted 365 days later), one data file will be created per 10 days, the number of memory blocks is 4, and data updating is allowed. For detailed syntax and parameters, please refer to [Data Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#management). After the database created, please use SQL command USE to switch to the new database, for example: @@ -20,13 +18,12 @@ After the database created, please use SQL command USE to switch to the new data USE power; ``` -Replace the database operating in the current connection with “power”, otherwise, before operating on a specific table, you need to use "database name. table name" to specify the name of database to use. +Specify the database operating in the current connection with “power”, otherwise, before operating on a specific table, you need to use "database-name.table-name" to specify the name of database to use. **Note:** - Any table or STable belongs to a database. Before creating a table, a database must be created first. - Tables in two different databases cannot be JOIN. -- You need to specify a timestamp when creating and inserting records, or querying history records. ## Create a STable @@ -38,11 +35,11 @@ CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAG **Note:** The STABLE keyword in this instruction needs to be written as TABLE in versions before 2.0.15. -Just like creating an ordinary table, you need to provide the table name (‘meters’ in the example) and the table structure Schema, that is, the definition of data columns. The first column must be a timestamp (‘ts’ in the example), the other columns are the physical metrics collected (current, volume, phase in the example), and the data types can be int, float, string, etc. In addition, you need to provide the schema of the tag (location, groupId in the example), and the data types of the tag can be int, float, string and so on. Static attributes of collection points can often be used as tags, such as geographic location of collection points, device model, device group ID, administrator ID, etc. The schema of the tag can be added, deleted and modified afterwards. Please refer to the [STable Management section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#super-table) for specific definitions and details. +Just like creating an ordinary table, you need to provide the table name (‘meters’ in the example) and the table structure Schema, that is, the definition of data columns. The first column must be a timestamp (‘ts’ in the example), the other columns are the physical metrics collected (current, volume, phase in the example), and the data types can be int, float, string, etc. In addition, you need to provide the schema of the tag (location, groupId in the example), and the data types of the tag can be int, float, string and so on. Static attributes of data collection points can often be used as tags, such as geographic location of collection points, device model, device group ID, administrator ID, etc. The schema of the tags can be added, deleted and modified afterwards. Please refer to the [STable Management section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#super-table) for specific definitions and details. -Each type of data collection point needs an established STable, so an IoT system often has multiple STables. For the power grid, we need to build a STable for smart meters, transformers, buses, switches, etc. For IoT, a device may have multiple data collection points (for example, a fan for wind-driven generator, some collection points capture parameters such as current and voltage, and some capture environmental parameters such as temperature, humidity and wind direction). In this case, multiple STables need to be established for corresponding types of devices. All collected physical metrics contained in one and the same STable must be collected at the same time (with a consistent timestamp). +A STable must be created for each type of data collection point, so an IoT system often has multiple STables. For the power grid, we need to build a STable for smart meters, a STable for transformers, a STable for buses, a STable for switches, etc. For IoT, a device may have multiple data collection points (for example, a fan for wind-driven generator, one data collection point captures metrics such as current and voltage, and one data collection point captures environmental parameters such as temperature, humidity and wind direction). In this case, multiple STables need to be established for corresponding types of devices. All metrics contained in a STable must be collected at the same time (with the same timestamp). -A STable allows up to 1024 columns. If the number of physical metrics collected at a collection point exceeds 1024, multiple STables need to be built to process them. A system can have multiple DBs, and a DB can have one or more STables. +A STable allows up to 1024 columns. If the number of metrics collected at a data collection point exceeds 1024, multiple STables need to be built to process them. A system can have multiple DBs, and a DB can have one or more STables. ## Create a Table @@ -54,22 +51,23 @@ CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2); Where d1001 is the table name, meters is the name of the STable, followed by the specific tag value of tag Location as "Beijing.Chaoyang", and the specific tag value of tag groupId 2. Although the tag value needs to be specified when creating the table, it can be modified afterwards. Please refer to the [Table Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#table) for details. -**Note: ** At present, TDengine does not technically restrict the use of a STable of a database (dbA) as a template to create a sub-table of another database (dbB). This usage will be prohibited later, and it is not recommended to use this method to create a table. +**Note: ** At present, TDengine does not technically restrict the use of a STable of a database (dbA) as a template to create a sub-table of another database (dbB). This usage will be prohibited later, and it is not recommended to use this way to create a table. TDengine suggests to use the globally unique ID of data collection point as a table name (such as device serial number). However, in some scenarios, there is no unique ID, and multiple IDs can be combined into a unique ID. It is not recommended to use a unique ID as tag value. -**Automatic table creating** : In some special scenarios, user is not sure whether the table of a certain data collection point exists when writing data. In this case, the non-existent table can be created by using automatic table building syntax when writing data. If the table already exists, no new table will be created. For example: +**Automatic table creating** : In some special scenarios, user is not sure whether the table of a certain data collection point exists when writing data. In this case, the non-existent table can be created by using automatic table creating syntax when writing data. If the table already exists, no new table will be created. For example: ```mysql INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); ``` -The SQL statement above inserts records (now, 10.2, 219, 0.32) into table d1001. If table d1001 has not been created yet, the STable meters is used as the template to automatically create it, and the tag value "Beijing.Chaoyang", 2 is marked at the same time. +The SQL statement above inserts records (now, 10.2, 219, 0.32) into table d1001. If table d1001 has not been created yet, the STable meters is used as the template to create it automatically, and the tag value "Beijing.Chaoyang", 2 is set at the same time. For detailed syntax of automatic table building, please refer to the "[Automatic Table Creation When Inserting Records](https://www.taosdata.com/en/documentation/taos-sql#auto_create_table)" section. ## Multi-column Model vs Single-column Model -TDengine supports multi-column model. As long as physical metrics are collected simultaneously by a data collection point (with a consistent timestamp), these metrics can be placed in a STable as different columns. However, there is also an extreme design, a single-column model, in which each collected physical metric is set up separately, so each type of physical metrics is set up separately with a STable. For example, create 3 Stables, one each for current, voltage and phase. +TDengine supports multi-column model. As long as metrics are collected simultaneously by a data collection point (with the same timestamp), these metrics can be placed in a STable as different columns. However, there is also an extreme design, a single-column model, in which a STable is created for each metric. For smart meter example, we need to create 3 Stables, one for current, one for voltage and one for phase. + +TDengine recommends using multi-column model as much as possible because of higher insertion and storage efficiency. However, for some scenarios, types of collected metrics often change. In this case, if multi-column model is adopted, the schema definition of STable needs to be modified frequently and the application becomes complicated. To avoid that, single-column model is recommended. -TDengine recommends using multi-column model as much as possible because of higher insertion and storage efficiency. However, for some scenarios, types of collected metrics often change. In this case, if multi-column model is adopted, the structure definition of STable needs to be frequently modified so make the application complicated. To avoid that, single-column model is recommended. diff --git a/documentation20/en/05.insert/docs.md b/documentation20/en/05.insert/docs.md index 88746ea608..b646403b56 100644 --- a/documentation20/en/05.insert/docs.md +++ b/documentation20/en/05.insert/docs.md @@ -1,8 +1,8 @@ # Efficient Data Writing -TDengine supports multiple interfaces to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in a single piece or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, nonsequential data insertion, and also historical data insertion. +TDengine supports multiple ways to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in one single record or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, out-of-order data insertion, and also historical data insertion. -## SQL Writing +## Data Writing via SQL Applications insert data by executing SQL insert statements through C/C++, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001: @@ -10,13 +10,13 @@ Applications insert data by executing SQL insert statements through C/C++, JDBC, INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31); ``` -TDengine supports writing multiple records at a time. For example, the following command writes two records to table d1001: +TDengine supports writing multiple records in a single statement. For example, the following command writes two records to table d1001: ```mysql INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25); ``` -TDengine also supports writing data to multiple tables at a time. For example, the following command writes two records to d1001 and one record to d1002: +TDengine also supports writing data to multiple tables in a single statement. For example, the following command writes two records to d1001 and one record to d1002: ```mysql INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31); @@ -26,22 +26,22 @@ For the SQL INSERT Grammar, please refer to [Taos SQL insert](https://www.taosd **Tips:** -- To improve writing efficiency, batch writing is required. The more records written in a batch, the higher the insertion efficiency. However, a record cannot exceed 16K, and the total length of an SQL statement cannot exceed 64K (it can be configured by parameter maxSQLLength, and the maximum can be configured to 1M). -- TDengine supports multi-thread parallel writing. To further improve writing speed, a client needs to open more than 20 threads to write parallelly. However, after the number of threads reaches a certain threshold, it cannot be increased or even become decreased, because too much frequent thread switching brings extra overhead. -- For a same table, if the timestamp of a newly inserted record already exists, (no database was created using UPDATE 1) the new record will be discarded as default, that is, the timestamp must be unique in a table. If an application automatically generates records, it is very likely that the generated timestamps will be the same, so the number of records successfully inserted will be smaller than the number of records the application try to insert. If you use UPDATE 1 option when creating a database, inserting a new record with the same timestamp will overwrite the original record. +- To improve writing efficiency, batch writing is required. The more records written in a batch, the higher the insertion efficiency. However, a record size cannot exceed 16K, and the total length of an SQL statement cannot exceed 64K (it can be configured by parameter maxSQLLength, and the maximum can be configured to 1M). +- TDengine supports multi-thread parallel writing. To further improve writing speed, a client needs to open more than 20 threads to write parallelly. However, after the number of threads reaches a certain threshold, it cannot be increased or even become decreased, because too much thread switching brings extra overhead. +- For the same table, if the timestamp of a newly inserted record already exists, the new record will be discarded as default (database option update = 0), that is, the timestamp must be unique in a table. If an application automatically generates records, it is very likely that the generated timestamps will be the same, so the number of records successfully inserted will be smaller than the number of records the application try to insert. If you use UPDATE 1 option when creating a database, inserting a new record with the same timestamp will overwrite the original record. - The timestamp of written data must be greater than the current time minus the time of configuration parameter keep. If keep is configured for 3650 days, data older than 3650 days cannot be written. The timestamp for writing data cannot be greater than the current time plus configuration parameter days. If days is configured to 2, data 2 days later than the current time cannot be written. -## Direct Writing of Prometheus +## Data Writing via Prometheus As a graduate project of Cloud Native Computing Foundation, [Prometheus](https://www.prometheus.io/) is widely used in the field of performance monitoring and K8S performance monitoring. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Prometheus without any code, and can directly write the data collected by Prometheus into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine. ### Compile blm_prometheus From Source -Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares: +Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to prepare: - A server running Linux OS - Golang version 1.10 and higher installed -- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server) +- Since the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side. For example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server) Bailongma project has a folder, blm_prometheus, which holds the prometheus writing API. The compiling process is as follows: @@ -134,7 +134,7 @@ The format of generated data by Prometheus is as follows: } ``` -Where apiserver_request_latencies_bucket is the name of the time-series data collected by prometheus, and the tag of the time-series data is in the following {}. blm_prometheus automatically creates a STable in TDengine with the name of the time series data, and converts the tag in {} into the tag value of TDengine, with Timestamp as the timestamp and value as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction. +Where apiserver_request_latencies_bucket is the name of the time-series data collected by prometheus, and the tag of the time-series data is in the following {}. blm_prometheus automatically creates a STable in TDengine with the name of the time series data, and converts the tag in {} into the tag value of TDengine, with Timestamp as the timestamp and value as the value of the time-series data. Therefore, in the client of TDengine, you can check whether this data was successfully written through the following instruction. ```mysql use prometheus; @@ -144,7 +144,7 @@ select * from apiserver_request_latencies_bucket; -## Direct Writing of Telegraf +## Data Writing via Telegraf [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is a popular open source tool for IT operation data collection. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Telegraf without any code, and can directly write the data collected by Telegraf into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine. @@ -271,12 +271,12 @@ select * from cpu; MQTT is a popular data transmission protocol in the IoT. TDengine can easily access the data received by MQTT Broker and write it to TDengine. -## Direct Writing of EMQ Broker +## Data Writing via EMQ Broker [EMQ](https://github.com/emqx/emqx) is an open source MQTT Broker software, with no need of coding, only to use "rules" in EMQ Dashboard for simple configuration, and MQTT data can be directly written into TDengine. EMQ X supports storing data to the TDengine by sending it to a Web service, and also provides a native TDengine driver on Enterprise Edition for direct data store. Please refer to [EMQ official documents](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine) for more details. -## Direct Writing of HiveMQ Broker +## Data Writing via HiveMQ Broker -[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details. +[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details. \ No newline at end of file diff --git a/documentation20/en/06.queries/docs.md b/documentation20/en/06.queries/docs.md index c4f1359820..7688a941f0 100644 --- a/documentation20/en/06.queries/docs.md +++ b/documentation20/en/06.queries/docs.md @@ -28,7 +28,7 @@ For specific query syntax, please see the [Data Query section of TAOS SQL](https ## Multi-table Aggregation Query -In an IoT scenario, there are often multiple data collection points in a same type. TDengine uses the concept of STable to describe a certain type of data collection point, and an ordinary table to describe a specific data collection point. At the same time, TDengine uses tags to describe the statical attributes of data collection points. A given data collection point has a specific tag value. By specifying the filters of tags, TDengine provides an efficient method to aggregate and query the sub-tables of STables (data collection points of a certain type). Aggregation functions and most operations on ordinary tables are applicable to STables, and the syntax is exactly the same. +In an IoT scenario, there are often multiple data collection points in a same type. TDengine uses the concept of STable to describe a certain type of data collection point, and an ordinary table to describe a specific data collection point. At the same time, TDengine uses tags to describe the static attributes of data collection points. A given data collection point has a specific tag value. By specifying the filters of tags, TDengine provides an efficient method to aggregate and query the sub-tables of STables (data collection points of a certain type). Aggregation functions and most operations on ordinary tables are applicable to STables, and the syntax is exactly the same. **Example 1**: In TAOS Shell, look up the average voltages collected by all smart meters in Beijing and group them by location @@ -55,7 +55,7 @@ TDengine only allows aggregation queries between tables belonging to a same STab ## Down Sampling Query, Interpolation -In a scenario of IoT, it is often necessary to aggregate the collected data by intervals through down sampling. TDengine provides a simple keyword interval, which makes query operations according to time windows extremely simple. For example, the current values collected by smart meter d1001 are summed every 10 seconds. +In a scenario of IoT, it is often necessary to aggregate the collected data by intervals through down sampling. TDengine provides a simple keyword `interval`, which makes query operations according to time windows extremely simple. For example, the current values collected by smart meter d1001 are summed every 10 seconds. ```mysql taos> SELECT sum(current) FROM d1001 INTERVAL(10s); @@ -94,6 +94,6 @@ taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); Query OK, 5 row(s) in set (0.001521s) ``` -In a scenario of IoT, it is difficult to synchronize the time stamp of collected data at each point, but many analysis algorithms (such as FFT) need to align the collected data strictly at equal intervals of time. In many systems, it’s required to write their own programs to process, but the down sampling operation of TDengine can be easily solved. If there is no collected data in an interval, TDengine also provides interpolation calculation function. +In IoT scenario, it is difficult to synchronize the time stamp of collected data at each point, but many analysis algorithms (such as FFT) need to align the collected data strictly at equal intervals of time. In many systems, it’s required to write their own programs to process, but the down sampling operation of TDengine can be used to solve the problem easily. If there is no collected data in an interval, TDengine also provides interpolation calculation function. For details of syntax rules, please refer to the [Time-dimension Aggregation section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#aggregation). \ No newline at end of file diff --git a/documentation20/en/07.advanced-features/docs.md b/documentation20/en/07.advanced-features/docs.md index d9103c7021..38c70862b6 100644 --- a/documentation20/en/07.advanced-features/docs.md +++ b/documentation20/en/07.advanced-features/docs.md @@ -9,8 +9,8 @@ Continuous query of TDengine adopts time-driven mode, which can be defined direc The continuous query provided by TDengine differs from the time window calculation in ordinary stream computing in the following ways: - Unlike the real-time feedback calculated results of stream computing, continuous query only starts calculation after the time window is closed. For example, if the time period is 1 day, the results of that day will only be generated after 23:59:59. -- If a history record is written to the time interval that has been calculated, the continuous query will not recalculate and will not push the results to the user again. For the mode of writing back to TDengine, the existing calculated results will not be updated. -- Using the mode of continuous query pushing results, the server does not cache the client's calculation status, nor does it provide Exactly-Once semantic guarantee. If the user's application side crashed, the continuous query pulled up again would only recalculate the latest complete time window from the time pulled up again. If writeback mode is used, TDengine can ensure the validity and continuity of data writeback. +- If a history record is written to the time interval that has been calculated, the continuous query will not re-calculate and will not push the new results to the user again. +- TDengine server does not cache or save the client's status, nor does it provide Exactly-Once semantic guarantee. If the application crashes, the continuous query will be pull up again and starting time must be provided by the application. ### How to use continuous query @@ -29,7 +29,7 @@ We already know that the average voltage of these meters can be counted with one select avg(voltage) from meters interval(1m) sliding(30s); ``` -Every time this statement is executed, all data will be recalculated. If you need to execute every 30 seconds to incrementally calculate the data of the latest minute, you can improve the above statement as following, using a different `startTime` each time and executing it regularly: +Every time this statement is executed, all data will be re-calculated. If you need to execute every 30 seconds to incrementally calculate the data of the latest minute, you can improve the above statement as following, using a different `startTime` each time and executing it regularly: ```sql select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s); @@ -65,7 +65,7 @@ It should be noted that now in the above example refers to the time when continu ### Manage the Continuous Query -Users can view all continuous queries running in the system through the show streams command in the console, and can kill the corresponding continuous queries through the kill stream command. Subsequent versions will provide more finer-grained and convenient continuous query management commands. +Users can view all continuous queries running in the system through the `show streams` command in the console, and can kill the corresponding continuous queries through the `kill stream` command. Subsequent versions will provide more finer-grained and convenient continuous query management commands. ## Publisher/Subscriber @@ -101,7 +101,7 @@ Another method is to query the STable. In this way, no matter how many meters th select * from meters where ts > {last_timestamp} and current > 10; ``` -However, how to choose `last_timestamp` has become a new problem. Because, on the one hand, the time of data generation (the data timestamp) and the time of data storage are generally not the same, and sometimes the deviation is still very large; On the other hand, the time when the data of different meters arrive at TDengine will also vary. Therefore, if we use the timestamp of the data from the slowest meter as `last_timestamp` in the query, we may repeatedly read the data of other meters; If the timestamp of the fastest meter is used, the data of other meters may be missed. +However, how to choose `last_timestamp` has become a new problem. Because, on the one hand, the time of data generation (the data timestamp) and the time of data writing are generally not the same, and sometimes the deviation is still very large; On the other hand, the time when the data of different meters arrive at TDengine will also vary. Therefore, if we use the timestamp of the data from the slowest meter as `last_timestamp` in the query, we may repeatedly read the data of other meters; If the timestamp of the fastest meter is used, the data of other meters may be missed. The subscription function of TDengine provides a thorough solution to the above problem. @@ -357,4 +357,4 @@ This SQL statement will obtain the last recorded voltage value of all smart mete In scenarios of TDengine, alarm monitoring is a common requirement. Conceptually, it requires the program to filter out data that meet certain conditions from the data of the latest period of time, and calculate a result according to a defined formula based on these data. When the result meets certain conditions and lasts for a certain period of time, it will notify the user in some form. -In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html). +In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html). \ No newline at end of file diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md index e8087b79db..9cbd395206 100644 --- a/documentation20/en/08.connector/docs.md +++ b/documentation20/en/08.connector/docs.md @@ -179,7 +179,7 @@ Clean up the running environment and call this API before the application exits. - `int taos_options(TSDB_OPTION option, const void * arg, ...)` -Set client options, currently only time zone setting (`_TSDB_OPTIONTIMEZONE`) and encoding setting (`_TSDB_OPTIONLOCALE`) are supported. The time zone and encoding default to the current operating system settings. +Set client options, currently only time zone setting (_TSDB_OPTIONTIMEZONE) and encoding setting (_TSDB_OPTIONLOCALE) are supported. The time zone and encoding default to the current operating system settings. - `char *taos_get_client_info()` @@ -296,7 +296,9 @@ Asynchronous APIs have relatively high requirements for users, who can selective The asynchronous APIs of TDengine all use non-blocking calling mode. Applications can use multithreading to open multiple tables at the same time, and can query or insert to each open table at the same time. It should be pointed out that the **application client must ensure that the operation on the same table is completely serialized**, that is, when the insertion or query operation on the same table is not completed (when no result returned), the second insertion or query operation cannot be performed. + + ### Parameter binding API In addition to calling `taos_query` directly for queries, TDengine also provides a Prepare API that supports parameter binding. Like MySQL, these APIs currently only support using question mark `?` to represent the parameters to be bound, as follows: @@ -821,12 +823,12 @@ https://www.taosdata.com/blog/2020/11/02/1901.html The TDengine provides the GO driver taosSql. taosSql implements the GO language's built-in interface database/sql/driver. Users can access TDengine in the application by simply importing the package as follows, see https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go for details. -Sample code for using the Go connector can be found in https://github.com/taosdata/TDengine/tree/develop/tests/examples/go . +Sample code for using the Go connector can be found in https://github.com/taosdata/TDengine/tree/develop/tests/examples/go and the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1951.html). ```Go import ( "database/sql" - _ "github.com/taosdata/driver-go/v2/taosSql" + _ "github.com/taosdata/driver-go/taosSql" ) ``` @@ -837,8 +839,6 @@ go env -w GO111MODULE=on go env -w GOPROXY=https://goproxy.io,direct ``` -`taosSql` v2 completed refactoring of the v1 version and separated the built-in database operation interface `database/sql/driver` to the directory `taosSql`, and put other advanced functions such as subscription and stmt into the directory `af`. - ### Common APIs - `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` @@ -937,7 +937,7 @@ After installing the TDengine client, the nodejsChecker.js program can verify wh Steps: -1. Create a new installation verification directory, for example: `~/tdengine-test`, copy the nodejsChecker.js source program on github. Download address: (https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js). +1. Create a new installation verification directory, for example: ~/tdengine-test, copy the nodejsChecker.js source program on github. Download address: (https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js). 2. Execute the following command: diff --git a/documentation20/en/09.connections/docs.md b/documentation20/en/09.connections/docs.md index b693d228cf..19544af0fa 100644 --- a/documentation20/en/09.connections/docs.md +++ b/documentation20/en/09.connections/docs.md @@ -2,7 +2,7 @@ ## Grafana -TDengine can quickly integrate with [Grafana](https://www.grafana.com/), an open source data visualization system, to build a data monitoring and alarming system. The whole process does not require any code to write. The contents of the data table in TDengine can be visually showed on DashBoard. +TDengine can be quickly integrated with [Grafana](https://www.grafana.com/), an open source data visualization system, to build a data monitoring and alarming system. The whole process does not require any code to write. The contents of the data table in TDengine can be visually showed on DashBoard. ### Install Grafana diff --git a/documentation20/en/10.cluster/docs.md b/documentation20/en/10.cluster/docs.md index 05d0a463aa..c0623f43f7 100644 --- a/documentation20/en/10.cluster/docs.md +++ b/documentation20/en/10.cluster/docs.md @@ -1,8 +1,8 @@ # TDengine Cluster Management -Multiple TDengine servers, that is, multiple running instances of taosd, can form a cluster to ensure the highly reliable operation of TDengine and provide scale-out features. To understand cluster management in TDengine 2.0, it is necessary to understand the basic concepts of clustering. Please refer to the chapter "Overall Architecture of TDengine 2.0". And before installing the cluster, please follow the chapter ["Getting started"](https://www.taosdata.com/en/documentation/getting-started/) to install and experience the single node function. +Multiple TDengine servers, that is, multiple running instances of taosd, can form a cluster to ensure the highly reliable operation of TDengine and provide scale-out features. To understand cluster management in TDengine 2.0, it is necessary to understand the basic concepts of clustering. Please refer to the chapter "Overall Architecture of TDengine 2.0". And before installing the cluster, please follow the chapter ["Getting started"](https://www.taosdata.com/en/documentation/getting-started/) to install and experience the single node TDengine. -Each data node of the cluster is uniquely identified by End Point, which is composed of FQDN (Fully Qualified Domain Name) plus Port, such as [h1.taosdata.com](http://h1.taosdata.com/):6030. The general FQDN is the hostname of the server, which can be obtained through the Linux command `hostname -f` (how to configure FQDN, please refer to: [All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)). Port is the external service port number of this data node. The default is 6030, but it can be modified by configuring the parameter serverPort in taos.cfg. A physical node may be configured with multiple hostnames, and TDengine will automatically get the first one, but it can also be specified through the configuration parameter fqdn in taos.cfg. If you are accustomed to direct IP address access, you can set the parameter fqdn to the IP address of this node. +Each data node of the cluster is uniquely identified by End Point, which is composed of FQDN (Fully Qualified Domain Name) plus Port, such as [h1.taosdata.com](http://h1.taosdata.com/):6030. The general FQDN is the hostname of the server, which can be obtained through the Linux command `hostname -f` (how to configure FQDN, please refer to: [All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)). Port is the external service port number of this data node. The default is 6030, but it can be modified by configuring the parameter serverPort in taos.cfg. A physical node may be configured with multiple hostnames, and TDengine will automatically get the first one, but it can also be specified through the configuration parameter `fqdn` in taos.cfg. If you want to access via direct IP address, you can set the parameter `fqdn` to the IP address of this node. The cluster management of TDengine is extremely simple. Except for manual intervention in adding and deleting nodes, all other tasks are completed automatically, thus minimizing the workload of operation. This chapter describes the operations of cluster management in detail. @@ -12,11 +12,11 @@ Please refer to the [video tutorial](https://www.taosdata.com/blog/2020/11/11/19 **Step 0:** Plan FQDN of all physical nodes in the cluster, and add the planned FQDN to /etc/hostname of each physical node respectively; modify the /etc/hosts of each physical node, and add the corresponding IP and FQDN of all cluster physical nodes. [If DNS is deployed, contact your network administrator to configure it on DNS] -**Step 1:** If the physical nodes have previous test data, installed with version 1. x, or installed with other versions of TDengine, please delete it first and drop all data. For specific steps, please refer to the blog "[Installation and Uninstallation of Various Packages of TDengine](https://www.taosdata.com/blog/2019/08/09/566.html)" +**Step 1:** If the physical nodes have previous test data, installed with version 1. x, or installed with other versions of TDengine, please backup all data, then delete it and drop all data. For specific steps, please refer to the blog "[Installation and Uninstallation of Various Packages of TDengine](https://www.taosdata.com/blog/2019/08/09/566.html)" **Note 1:** Because the information of FQDN will be written into a file, if FQDN has not been configured or changed before, and TDengine has been started, be sure to clean up the previous data (`rm -rf /var/lib/taos/*`)on the premise of ensuring that the data is useless or backed up; -**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or modify hosts file. +**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or Host file. **Step 2:** It is recommended to close the firewall of all physical nodes, and at least ensure that the TCP and UDP ports of ports 6030-6042 are open. It is **strongly recommended** to close the firewall first and configure the ports after the cluster is built; @@ -136,7 +136,7 @@ Execute the CLI program taos, log in to the TDengine system using the root accou DROP DNODE "fqdn:port"; ``` -Where fqdn is the FQDN of the deleted node, and port is the port number of its external server. +Where fqdn is the FQDN of the deleted node, and port is the port number. **【Note】** @@ -185,7 +185,7 @@ Because of the introduction of vnode, it is impossible to simply draw a conclusi TDengine cluster is managed by mnode (a module of taosd, management node). In order to ensure the high-availability of mnode, multiple mnode replicas can be configured. The number of replicas is determined by system configuration parameter numOfMnodes, and the effective range is 1-3. In order to ensure the strong consistency of metadata, mnode replicas are duplicated synchronously. -A cluster has multiple data node dnodes, but a dnode runs at most one mnode instance. In the case of multiple dnodes, which dnode can be used as an mnode? This is automatically specified by the system according to the resource situation on the whole. User can execute the following command in the console of TDengine through the CLI program taos: +A cluster has multiple data node dnodes, but a dnode runs at most one mnode instance. In the case of multiple dnodes, which dnode can be used as an mnode? This is automatically selected by the system based on the resource on the whole. User can execute the following command in the console of TDengine through the CLI program taos: ``` SHOW MNODES; @@ -213,7 +213,7 @@ When the above three situations occur, the system will start a load computing of If a data node is offline, the TDengine cluster will automatically detect it. There are two detailed situations: -- If the data node is offline for more than a certain period of time (configuration parameter offlineThreshold in taos.cfg controls the duration), the system will automatically delete the data node, generate system alarm information and trigger the load balancing process. If the deleted data node is online again, it will not be able to join the cluster, and the system administrator will need to add it to the cluster again. +- If the data node is offline for more than a certain period of time (configuration parameter `offlineThreshold` in taos.cfg controls the duration), the system will automatically delete the data node, generate system alarm information and trigger the load balancing process. If the deleted data node is online again, it will not be able to join the cluster, and the system administrator will need to add it to the cluster again. - After offline, the system will automatically start the data recovery process if it goes online again within the duration of offlineThreshold. After the data is fully recovered, the node will start to work normally. **Note:** If each data node belonging to a virtual node group (including mnode group) is in offline or unsynced state, Master can only be elected after all data nodes in the virtual node group are online and can exchange status information, and the virtual node group can serve externally. For example, the whole cluster has 3 data nodes with 3 replicas. If all 3 data nodes go down and then 2 data nodes restart, it will not work. Only when all 3 data nodes restart successfully can serve externally again. @@ -229,7 +229,7 @@ The name of the executable for Arbitrator is tarbitrator. The executable has alm 1. Click [Package Download](https://www.taosdata.com/cn/all-downloads/), and in the TDengine Arbitrator Linux section, select the appropriate version to download and install. -2. The command line parameter -p of this application can specify the port number of its external service, and the default is 6042. +2. The command line parameter -p of this application can specify the port number of its service, and the default is 6042. 3. Modify the configuration file of each taosd instance, and set parameter arbitrator to the End Point corresponding to the tarbitrator in taos.cfg. (If this parameter is configured, when the number of replicas is even, the system will automatically connect the configured Arbitrator. If the number of replicas is odd, even if the Arbitrator is configured, the system will not establish a connection.) 4. The Arbitrator configured in the configuration file will appear in the return result of instruction `SHOW DNODES`; the value of the corresponding role column will be "arb". diff --git a/documentation20/en/11.administrator/docs.md b/documentation20/en/11.administrator/docs.md index 3817a41766..5dc1f9c7a9 100644 --- a/documentation20/en/11.administrator/docs.md +++ b/documentation20/en/11.administrator/docs.md @@ -22,8 +22,8 @@ If there is plenty of memory, the configuration of Blocks can be increased so th CPU requirements depend on the following two aspects: -- **Data insertion** TDengine single core can handle at least 10,000 insertion requests per second. Each insertion request can take multiple records, and inserting one record at a time is almost the same as inserting 10 records in computing resources consuming. Therefore, the larger the number of inserts, the higher the insertion efficiency. If an insert request has more than 200 records, a single core can insert 1 million records per second. However, the faster the insertion speed, the higher the requirement for front-end data collection, because records need to be cached and then inserted in batches. -- **Query requirements** TDengine to provide efficient queries, but the queries in each scenario vary greatly and the query frequency too, making it difficult to give objective figures. Users need to write some query statements for their own scenes to determine. +- **Data insertion**: TDengine single core can handle at least 10,000 insertion requests per second. Each insertion request can take multiple records, and inserting one record at a time is almost the same as inserting 10 records in computing resources consuming. Therefore, the larger the number of records per insert, the higher the insertion efficiency. If an insert request has more than 200 records, a single core can insert 1 million records per second. However, the faster the insertion speed, the higher the requirement for front-end data collection, because records need to be cached and then inserted in batches. +- **Query**: TDengine provides efficient queries, but the queries in each scenario vary greatly and the query frequency too, making it difficult to give objective figures. Users need to write some query statements for their own scenes to estimate. Therefore, only for data insertion, CPU can be estimated, but the computing resources consumed by query cannot be that clear. In the actual operation, it is not recommended to make CPU utilization rate over 50%. After that, new nodes need to be added to bring more computing resources. @@ -78,7 +78,7 @@ When the nodes in TDengine cluster are deployed on different physical machines a ## Server-side Configuration -The background service of TDengine system is provided by taosd, and the configuration parameters can be modified in the configuration file taos.cfg to meet the requirements of different scenarios. The default location of the configuration file is the /etc/taos directory, which can be specified by executing the parameter -c from the taosd command line. Such as taosd-c/home/user, to specify that the configuration file is located in the /home/user directory. +The background service of TDengine system is provided by taosd, and the configuration parameters can be modified in the configuration file taos.cfg to meet the requirements of different scenarios. The default location of the configuration file is the /etc/taos directory, which can be specified by executing the parameter `-c` from the taosd command line. Such as `taosd -c /home/user,` to specify that the configuration file is located in the /home/user directory. You can also use “-C” to show the current server configuration parameters: @@ -88,14 +88,14 @@ taosd -C Only some important configuration parameters are listed below. For more parameters, please refer to the instructions in the configuration file. Please refer to the previous chapters for detailed introduction and function of each parameter, and the default of these parameters is working and generally does not need to be set. **Note: After the configuration is modified, \*taosd service\* needs to be restarted to take effect.** -- firstEp: end point of the first dnode in the actively connected cluster when taosd starts, the default value is localhost: 6030. -- fqdn: FQDN of the data node, which defaults to the first hostname configured by the operating system. If you are accustomed to IP address access, you can set it to the IP address of the node. +- firstEp: end point of the first dnode which will be connected in the cluster when taosd starts, the default value is localhost: 6030. +- fqdn: FQDN of the data node, which defaults to the first hostname configured by the operating system. If you want to access via IP address directly, you can set it to the IP address of the node. - serverPort: the port number of the external service after taosd started, the default value is 6030. - httpPort: the port number used by the RESTful service to which all HTTP requests (TCP) require a query/write request. The default value is 6041. - dataDir: the data file directory to which all data files will be written. [Default:/var/lib/taos](http://default/var/lib/taos). - logDir: the log file directory to which the running log files of the client and server will be written. [Default:/var/log/taos](http://default/var/log/taos). -- arbitrator: the end point of the arbiter in the system; the default value is null. -- role: optional role for dnode. 0-any; it can be used as an mnode and to allocate vnodes; 1-mgmt; It can only be an mnode, but not to allocate vnodes; 2-dnode; caannot be an mnode, only vnode can be allocated +- arbitrator: the end point of the arbitrator in the system; the default value is null. +- role: optional role for dnode. 0-any; it can be used as an mnode and to allocate vnodes; 1-mgmt; It can only be an mnode, but not to allocate vnodes; 2-dnode; cannot be an mnode, only vnode can be allocated - debugFlage: run the log switch. 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs). Default value: 131 or 135 (different modules have different default values). - numOfLogLines: the maximum number of lines allowed for a single log file. Default: 10,000,000 lines. - logKeepDays: the maximum retention time of the log file. When it is greater than 0, the log file will be renamed to taosdlog.xxx, where xxx is the timestamp of the last modification of the log file in seconds. Default: 0 days. @@ -161,18 +161,18 @@ For example: ## Client Configuration -The foreground interactive client application of TDengine system is taos and application driver, which shares the same configuration file taos.cfg with taosd. When running taos, use the parameter -c to specify the configuration file directory, such as taos-c/home/cfg, which means using the parameters in the taos.cfg configuration file under the /home/cfg/ directory. The default directory is /etc/taos. For more information on how to use taos, see the help information taos --help. This section mainly describes the parameters used by the taos client application in the configuration file taos.cfg. +The foreground interactive client application of TDengine system is taos and application driver, which shares the same configuration file taos.cfg with taosd. When running taos, use the parameter `-c` to specify the configuration file directory, such as `taos -c /home/cfg`, which means using the parameters in the taos.cfg configuration file under the /home/cfg/ directory. The default directory is /etc/taos. For more information on how to use taos, see the help information `taos --help`. This section mainly describes the parameters used by the taos client application in the configuration file taos.cfg. **Versions after 2.0. 10.0 support the following parameters on command line to display the current client configuration parameters** ```bash -taos -C 或 taos --dump-config +taos -C or taos --dump-config ``` Client configuration parameters: - firstEp: end point of the first taosd instance in the actively connected cluster when taos is started, the default value is localhost: 6030. -- secondEp: when taos starts, if not impossible to connect to firstEp, it will try to connect to secondEp. +- secondEp: when taos starts, if unable to connect to firstEp, it will try to connect to secondEp. - locale Default value: obtained dynamically from the system. If the automatic acquisition fails, user needs to set it in the configuration file or through API @@ -493,4 +493,4 @@ At the moment, TDengine has nearly 200 internal reserved keywords, which cannot | CONCAT | GLOB | METRICS | SET | VIEW | | CONFIGS | GRANTS | MIN | SHOW | WAVG | | CONFLICT | GROUP | MINUS | SLASH | WHERE | -| CONNECTION | | | | | +| CONNECTION | | | | | \ No newline at end of file diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md index dfa1742c99..680a3854b6 100644 --- a/documentation20/en/12.taos-sql/docs.md +++ b/documentation20/en/12.taos-sql/docs.md @@ -1,8 +1,8 @@ # TAOS SQL -TDengine provides a SQL-style language, TAOS SQL, to insert or query data, and support other common tips. To finish this document, you should have some understanding about SQL. +TDengine provides a SQL-style language, TAOS SQL, to insert or query data. To read through this document, you should have some basic understanding about SQL. -TAOS SQL is the main tool for users to write and query data to TDengine. TAOS SQL provides a style and mode similar to standard SQL to facilitate users to get started quickly. Strictly speaking, TAOS SQL is not and does not attempt to provide SQL standard syntax. In addition, since TDengine does not provide deletion function for temporal structured data, the relevant function of data deletion is non-existent in TAO SQL. +TAOS SQL is the main way for users to write and query data to TDengine. TAOS SQL is similar to standard SQL to facilitate users to get started quickly. Strictly speaking, TAOS SQL is not and does not attempt to provide SQL standard syntax. In addition, since TDengine does not provide deletion function for time-series data, the relevant function of data deletion is non-existent in TAO SQL. Let’s take a look at the conventions used for syntax descriptions. @@ -127,7 +127,7 @@ Note: ALTER DATABASE db_name CACHELAST 0; ``` CACHELAST parameter controls whether last_row of the data subtable is cached in memory. The default value is 0, and the value range is [0, 1]. Where 0 means not enabled and 1 means enabled. (supported from version 2.0. 11) - + **Tips**: After all the above parameters are modified, show databases can be used to confirm whether the modification is successful. - **Show all databases in system** @@ -138,14 +138,17 @@ Note: ## Table Management -- Create a table -Note: +- **Create a table** -1. The first field must be a timestamp, and system will set it as the primary key; -2. The max length of table name is 192; -3. The length of each row of the table cannot exceed 16k characters; -4. Sub-table names can only consist of letters, numbers, and underscores, and cannot begin with numbers -5. If the data type binary or nchar is used, the maximum number of bytes should be specified, such as binary (20), which means 20 bytes; + ```mysql + CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]); + ``` + Note: + 1. The first field must be a timestamp, and system will set it as the primary key; + 2. The max length of table name is 192; + 3. The length of each row of the table cannot exceed 16k characters; + 4. Sub-table names can only consist of letters, numbers, and underscores, and cannot begin with numbers + 5. If the data type binary or nchar is used, the maximum number of bytes should be specified, such as binary (20), which means 20 bytes; - **Create a table via STable** @@ -171,10 +174,10 @@ Note: Note: 1. The method of batch creating tables requires that the data table must use STable as a template. - 2. On the premise of not exceeding the length limit of SQL statements, it is suggested that the number of tables in a single statement should be controlled between 1000 and 3000, which will obtain an ideal speed of table building. + 2. On the premise of not exceeding the length limit of SQL statements, it is suggested that the number of tables in a single statement should be controlled between 1000 and 3000, which will obtain an ideal speed of table creating. - **Drop a table** - + ```mysql DROP TABLE [IF EXISTS] tb_name; ``` @@ -218,7 +221,7 @@ Note: ## STable Management -Note: In 2.0. 15.0 and later versions, STABLE reserved words are supported. That is, in the instruction description later in this section, the three instructions of CREATE, DROP and ALTER need to write TABLE instead of STABLE in the old version as the reserved word. +Note: In 2.0.15.0 and later versions, STABLE reserved words are supported. That is, in the instruction description later in this section, the three instructions of CREATE, DROP and ALTER need to write TABLE instead of STABLE in the old version as the reserved word. - **Create a STable** @@ -290,7 +293,7 @@ Note: In 2.0. 15.0 and later versions, STABLE reserved words are supported. That Modify a tag name of STable. After modifying, all sub-tables under the STable will automatically update the new tag name. - **Modify a tag value of sub-table** - + ```mysql ALTER TABLE tb_name SET TAG tag_name=new_tag_value; ``` @@ -306,7 +309,7 @@ Note: In 2.0. 15.0 and later versions, STABLE reserved words are supported. That Insert a record into table tb_name. - **Insert a record with data corresponding to a given column** - + ```mysql INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...); ``` @@ -320,14 +323,14 @@ Note: In 2.0. 15.0 and later versions, STABLE reserved words are supported. That Insert multiple records into table tb_name. - **Insert multiple records into a given column** - + ```mysql INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...; ``` Insert multiple records into a given column of table tb_name. - **Insert multiple records into multiple tables** - + ```mysql INSERT INTO tb1_name VALUES (field1_value1, ...) (field1_value2, ...) ... tb2_name VALUES (field1_value1, ...) (field1_value2, ...) ...; @@ -421,7 +424,7 @@ taos> SELECT * FROM d1001; Query OK, 3 row(s) in set (0.001165s) ``` -For Stables, wildcards contain *tag columns*. +For STables, wildcards contain *tag columns*. ```mysql taos> SELECT * FROM meters; @@ -720,7 +723,7 @@ TDengine supports aggregations over data, they are listed below: ================================================ 9 | 9 | Query OK, 1 row(s) in set (0.004475s) - + taos> SELECT COUNT(*), COUNT(voltage) FROM d1001; count(*) | count(voltage) | ================================================ @@ -758,7 +761,7 @@ TDengine supports aggregations over data, they are listed below: ``` - **TWA** - + ```mysql SELECT TWA(field_name) FROM tb_name WHERE clause; ``` @@ -799,7 +802,7 @@ TDengine supports aggregations over data, they are listed below: ================================================================================ 35.200000763 | 658 | 0.950000018 | Query OK, 1 row(s) in set (0.000980s) - ``` + ``` - **STDDEV** @@ -896,7 +899,7 @@ TDengine supports aggregations over data, they are listed below: ====================================== 13.40000 | 223 | Query OK, 1 row(s) in set (0.001123s) - + taos> SELECT MAX(current), MAX(voltage) FROM d1001; max(current) | max(voltage) | ====================================== @@ -937,8 +940,6 @@ TDengine supports aggregations over data, they are listed below: Query OK, 1 row(s) in set (0.001023s) ``` -- - - **LAST** ```mysql @@ -972,7 +973,7 @@ TDengine supports aggregations over data, they are listed below: ``` - **TOP** - + ```mysql SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` @@ -1029,7 +1030,7 @@ TDengine supports aggregations over data, they are listed below: 2018-10-03 14:38:15.000 | 218 | 2018-10-03 14:38:16.650 | 218 | Query OK, 2 row(s) in set (0.001332s) - + taos> SELECT BOTTOM(current, 2) FROM d1001; ts | bottom(current, 2) | ================================================= @@ -1092,7 +1093,7 @@ TDengine supports aggregations over data, they are listed below: ======================= 12.30000 | Query OK, 1 row(s) in set (0.001238s) - + taos> SELECT LAST_ROW(current) FROM d1002; last_row(current) | ======================= @@ -1146,7 +1147,7 @@ TDengine supports aggregations over data, they are listed below: ============================ 5.000000000 | Query OK, 1 row(s) in set (0.001792s) - + taos> SELECT SPREAD(voltage) FROM d1001; spread(voltage) | ============================ @@ -1172,7 +1173,7 @@ TDengine supports aggregations over data, they are listed below: ## Time-dimension Aggregation -TDengine supports aggregating by intervals. Data in a table can partitioned by intervals and aggregated to generate results. For example, a temperature sensor collects data once per second, but the average temperature needs to be queried every 10 minutes. This aggregation is suitable for down sample operation, and the syntax is as follows: +TDengine supports aggregating by intervals(time range). Data in a table can partitioned by intervals and aggregated to generate results. For example, a temperature sensor collects data once per second, but the average temperature needs to be queried every 10 minutes. This aggregation is suitable for down sample operation, and the syntax is as follows: ```mysql SELECT function_list FROM tb_name @@ -1235,12 +1236,12 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P **Restrictions on group by** -TAOS SQL supports group by operation on tags, tbnames and ordinary columns, required that only one column and whichhas less than 100,000 unique values. +TAOS SQL supports group by operation on tags, tbnames and ordinary columns, required that only one column and which has less than 100,000 unique values. **Restrictions on join operation** -TAOS SQL supports join columns of two tables by Primary Key timestamp between them, and does not support four operations after tables aggregated for the time being. +TAOS SQL supports join columns of two tables by Primary Key timestamp between them, and does not support four arithmetic operations after tables aggregated for the time being. **Availability of is no null** -Is not null supports all types of columns. Non-null expression is < > "" and only applies to columns of non-numeric types. +Is not null supports all types of columns. Non-null expression is < > "" and only applies to columns of non-numeric types. \ No newline at end of file From a47f664cf265c6a11ecb90bc15454370bc04a8ef Mon Sep 17 00:00:00 2001 From: Zhiqiang Wang <1296468573@qq.com> Date: Mon, 6 Sep 2021 14:25:20 +0800 Subject: [PATCH 57/71] [TD-6169]: windows dll client can not quit. (#7732) * [TD-6169]: windows dll client can not quit. * [TD-6169]: windows dll client can not quit. * [TD-6169]: windows dll client can not quit. * [TD-6169]: windows dll client can not quit. * [TD-6169]: windows dll client can not quit. --- src/client/src/tscSystem.c | 8 ++++---- src/util/src/tcache.c | 20 ++++++++++++++++---- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index c04765b065..8af340030c 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -122,6 +122,10 @@ int32_t tscAcquireRpc(const char *key, const char *user, const char *secretEncry void taos_init_imp(void) { char temp[128] = {0}; + + // In the APIs of other program language, taos_cleanup is not available yet. + // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. + atexit(taos_cleanup); errno = TSDB_CODE_SUCCESS; srand(taosGetTimestampSec()); @@ -197,10 +201,6 @@ void taos_init_imp(void) { tscRefId = taosOpenRef(200, tscCloseTscObj); - // In the APIs of other program language, taos_cleanup is not available yet. - // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. - atexit(taos_cleanup); - tscDebug("client is initialized successfully"); } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index b4cf2b6658..776a0ad9c8 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -71,6 +71,8 @@ static pthread_once_t cacheThreadInit = PTHREAD_ONCE_INIT; static pthread_mutex_t guard = PTHREAD_MUTEX_INITIALIZER; static SArray* pCacheArrayList = NULL; static bool stopRefreshWorker = false; +static bool refreshWorkerNormalStopped = false; +static bool refreshWorkerUnexpectedStopped = false; static void doInitRefreshThread(void) { pCacheArrayList = taosArrayInit(4, POINTER_BYTES); @@ -537,8 +539,10 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { pCacheObj->deleting = 1; // wait for the refresh thread quit before destroying the cache object. - // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for 2 seconds. - for (int i = 0; i < 40&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { + // But in the dll, the child thread will be killed before atexit takes effect. + while(atomic_load_8(&pCacheObj->deleting) != 0) { + if (refreshWorkerNormalStopped) break; + if (refreshWorkerUnexpectedStopped) return; taosMsleep(50); } @@ -677,6 +681,12 @@ static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t taosHashCondTraverse(pCacheObj->pHashTable, travHashTableFn, &sup); } +void taosCacheRefreshWorkerUnexpectedStopped(void) { + if(!refreshWorkerNormalStopped) { + refreshWorkerUnexpectedStopped=true; + } +} + void* taosCacheTimedRefresh(void *handle) { assert(pCacheArrayList != NULL); uDebug("cache refresh thread starts"); @@ -685,6 +695,7 @@ void* taosCacheTimedRefresh(void *handle) { const int32_t SLEEP_DURATION = 500; //500 ms int64_t count = 0; + atexit(taosCacheRefreshWorkerUnexpectedStopped); while(1) { taosMsleep(SLEEP_DURATION); @@ -749,6 +760,7 @@ void* taosCacheTimedRefresh(void *handle) { pCacheArrayList = NULL; pthread_mutex_destroy(&guard); + refreshWorkerNormalStopped=true; uDebug("cache refresh thread quits"); return NULL; @@ -763,6 +775,6 @@ void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp) { doCacheRefresh(pCacheObj, now, fp); } -void taosStopCacheRefreshWorker() { - stopRefreshWorker = false; +void taosStopCacheRefreshWorker(void) { + stopRefreshWorker = true; } \ No newline at end of file From e8469ca511d6265fe0f71b2b51ed9524853be380 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 6 Sep 2021 15:43:02 +0800 Subject: [PATCH 58/71] [TD-6221]Perfect scene of concurrent inquery --- tests/pytest/concurrent_inquiry.py | 31 +++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py index 7af38c3b56..1bb2081d7f 100644 --- a/tests/pytest/concurrent_inquiry.py +++ b/tests/pytest/concurrent_inquiry.py @@ -23,7 +23,7 @@ import string from requests.auth import HTTPBasicAuth func_list=['avg','count','twa','sum','stddev','leastsquares','min', 'max','first','last','top','bottom','percentile','apercentile', -'last_row','diff','spread'] +'last_row','diff','spread','distinct'] condition_list=[ "where _c0 > now -10d ", 'interval(10s)', @@ -33,7 +33,7 @@ condition_list=[ 'fill(null)' ] -where_list = ['_c0>now-10d',' <50','like',' is null'] +where_list = ['_c0>now-10d',' <50','like',' is null','in'] class ConcurrentInquiry: # def __init__(self,ts=1500000001000,host='127.0.0.1',user='root',password='taosdata',dbname='test', # stb_prefix='st',subtb_prefix='t',n_Therads=10,r_Therads=10,probabilities=0.05,loop=5, @@ -152,6 +152,20 @@ class ConcurrentInquiry: elif 'is null' in c: conlist = ' ' + random.choice(tlist) + random.choice([' is null',' is not null']) l.append(conlist) + elif 'in' in c: + in_list = [] + temp = [] + for i in range(random.randint(0,100)): + temp.append(random.randint(-10000,10000)) + temp = (str(i) for i in temp) + in_list.append(temp) + temp1 = [] + for i in range(random.randint(0,100)): + temp1.append("'" + ''.join(random.sample(string.ascii_letters, random.randint(0,10))) + "'") + in_list.append(temp1) + in_list.append(['NULL','NULL']) + conlist = ' ' + random.choice(tlist) + ' in (' + ','.join(random.choice(in_list)) + ')' + l.append(conlist) else: s_all = string.ascii_letters conlist = ' ' + random.choice(tlist) + " like \'%" + random.choice(s_all) + "%\' " @@ -182,7 +196,14 @@ class ConcurrentInquiry: def con_order(self,tlist,col_list,tag_list): return 'order by '+random.choice(tlist) - + + def con_state_window(self,tlist,col_list,tag_list): + return 'state_window(' + random.choice(tlist + tag_list) + ')' + + def con_session_window(self,tlist,col_list,tag_list): + session_window = 'session_window(' + random.choice(tlist + tag_list) + ',' + str(random.randint(0,20)) + random.choice(['a','s','d','w','n','y']) + ')' + return session_window + def gen_subquery_sql(self): subsql ,col_num = self.gen_query_sql(1) if col_num == 0: @@ -221,7 +242,7 @@ class ConcurrentInquiry: else: sql=sql+','.join(sel_col_list) #select col & func sql = sql + ' from ('+ subsql +') ' - con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill] + con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill,self.con_state_window,self.con_session_window] sel_con=random.sample(con_func,random.randint(0,len(con_func))) sel_con_list=[] for i in sel_con: @@ -281,7 +302,7 @@ class ConcurrentInquiry: sql = sql + ' from '+random.choice(self.subtb_list)+' ' else: sql = sql + ' from '+random.choice(self.stb_list)+' ' - con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill] + con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill,self.con_state_window,self.con_session_window] sel_con=random.sample(con_func,random.randint(0,len(con_func))) sel_con_list=[] for i in sel_con: From d31c70337b146d44397805a5e002464aa6cd5ab6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 6 Sep 2021 16:22:03 +0800 Subject: [PATCH 59/71] [td-255] code refactor. --- src/client/inc/tscUtil.h | 1 + src/client/src/tscSubquery.c | 122 +++++++++++++++-------------------- src/client/src/tscUtil.c | 27 +++++--- 3 files changed, 69 insertions(+), 81 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index c59ec3e624..ebd5de1ab3 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -318,6 +318,7 @@ void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex, SSqlC int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid); int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId); +int32_t doInitSubState(SSqlObj* pSql, int32_t numOfSubqueries); void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex); diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index e3bec2c2ea..edc3dbfc82 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2459,11 +2459,48 @@ static void doSendQueryReqs(SSchedMsg* pSchedMsg) { tfree(p); } +static void doConcurrentlySendSubQueries(SSqlObj* pSql) { + SSubqueryState *pState = &pSql->subState; + + // concurrently sent the query requests. + const int32_t MAX_REQUEST_PER_TASK = 8; + + int32_t numOfTasks = (pState->numOfSub + MAX_REQUEST_PER_TASK - 1)/MAX_REQUEST_PER_TASK; + assert(numOfTasks >= 1); + + int32_t num; + if (pState->numOfSub / numOfTasks == MAX_REQUEST_PER_TASK) { + num = MAX_REQUEST_PER_TASK; + } else { + num = pState->numOfSub / numOfTasks + 1; + } + tscDebug("0x%"PRIx64 " query will be sent by %d threads", pSql->self, numOfTasks); + + for(int32_t j = 0; j < numOfTasks; ++j) { + SSchedMsg schedMsg = {0}; + schedMsg.fp = doSendQueryReqs; + schedMsg.ahandle = (void*)pSql; + + schedMsg.thandle = NULL; + SPair* p = calloc(1, sizeof(SPair)); + p->first = j * num; + + if (j == numOfTasks - 1) { + p->second = pState->numOfSub; + } else { + p->second = (j + 1) * num; + } + + schedMsg.msg = p; + taosScheduleTask(tscQhandle, &schedMsg); + } +} + int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; SSqlCmd *pCmd = &pSql->cmd; - // pRes->code check only serves in launching metric sub-queries + // pRes->code check only serves in launching super table sub-queries if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) { pCmd->command = TSDB_SQL_RETRIEVE_GLOBALMERGE; // enable the abort of kill super table function. return pRes->code; @@ -2474,22 +2511,23 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { pRes->qId = 0x1; // hack the qhandle check - const uint32_t nBufferSize = (1u << 18u); // 256KB + const uint32_t nBufferSize = (1u << 18u); // 256KB, default buffer size SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + SSubqueryState *pState = &pSql->subState; - pState->numOfSub = 0; - if (pTableMetaInfo->pVgroupTables == NULL) { - pState->numOfSub = pTableMetaInfo->vgroupList->numOfVgroups; - } else { - pState->numOfSub = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + int32_t numOfSub = (pTableMetaInfo->pVgroupTables == NULL) ? pTableMetaInfo->vgroupList->numOfVgroups + : (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + + int32_t ret = doInitSubState(pSql, numOfSub); + if (ret != 0) { + tscAsyncResultOnError(pSql); + return ret; } - assert(pState->numOfSub > 0); - - int32_t ret = tscCreateGlobalMergerEnv(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self); + ret = tscCreateGlobalMergerEnv(pQueryInfo, &pMemoryBuf, pSql->subState.numOfSub, &pDesc, nBufferSize, pSql->self); if (ret != 0) { pRes->code = ret; tscAsyncResultOnError(pSql); @@ -2499,32 +2537,6 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { } tscDebug("0x%"PRIx64" retrieved query data from %d vnode(s)", pSql->self, pState->numOfSub); - pSql->pSubs = calloc(pState->numOfSub, POINTER_BYTES); - if (pSql->pSubs == NULL) { - tfree(pSql->pSubs); - pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; - tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub); - - tscAsyncResultOnError(pSql); - return ret; - } - - if (pState->states == NULL) { - pState->states = calloc(pState->numOfSub, sizeof(*pState->states)); - if (pState->states == NULL) { - pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; - tscDestroyGlobalMergerEnv(pMemoryBuf, pDesc,pState->numOfSub); - - tscAsyncResultOnError(pSql); - return ret; - } - - pthread_mutex_init(&pState->mutex, NULL); - } - - memset(pState->states, 0, sizeof(*pState->states) * pState->numOfSub); - tscDebug("0x%"PRIx64" reset all sub states to 0", pSql->self); - pRes->code = TSDB_CODE_SUCCESS; int32_t i = 0; @@ -2545,8 +2557,8 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { break; } - trs->subqueryIndex = i; - trs->pParentSql = pSql; + trs->subqueryIndex = i; + trs->pParentSql = pSql; SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL); if (pNew == NULL) { @@ -2582,39 +2594,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { return pRes->code; } - // concurrently sent the query requests. - const int32_t MAX_REQUEST_PER_TASK = 8; - - int32_t numOfTasks = (pState->numOfSub + MAX_REQUEST_PER_TASK - 1)/MAX_REQUEST_PER_TASK; - assert(numOfTasks >= 1); - - int32_t num; - if (pState->numOfSub / numOfTasks == MAX_REQUEST_PER_TASK) { - num = MAX_REQUEST_PER_TASK; - } else { - num = pState->numOfSub / numOfTasks + 1; - } - tscDebug("0x%"PRIx64 " query will be sent by %d threads", pSql->self, numOfTasks); - - for(int32_t j = 0; j < numOfTasks; ++j) { - SSchedMsg schedMsg = {0}; - schedMsg.fp = doSendQueryReqs; - schedMsg.ahandle = (void*)pSql; - - schedMsg.thandle = NULL; - SPair* p = calloc(1, sizeof(SPair)); - p->first = j * num; - - if (j == numOfTasks - 1) { - p->second = pState->numOfSub; - } else { - p->second = (j + 1) * num; - } - - schedMsg.msg = p; - taosScheduleTask(tscQhandle, &schedMsg); - } - + doConcurrentlySendSubQueries(pSql); return TSDB_CODE_SUCCESS; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 31631560af..ef63852c9a 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3944,6 +3944,21 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { taos_fetch_rows_a(tres, tscSubqueryRetrieveCallback, param); } +int32_t doInitSubState(SSqlObj* pSql, int32_t numOfSubqueries) { + assert(pSql->subState.numOfSub == 0 && pSql->pSubs == NULL && pSql->subState.states == NULL); + pSql->subState.numOfSub = numOfSubqueries; + + pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); + pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t)); + + int32_t code = pthread_mutex_init(&pSql->subState.mutex, NULL); + if (pSql->pSubs == NULL || pSql->subState.states == NULL || code != 0) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + return TSDB_CODE_SUCCESS; +} + // do execute the query according to the query execution plan void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { int32_t code = TSDB_CODE_SUCCESS; @@ -3959,16 +3974,8 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { } if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly - assert(pSql->subState.numOfSub == 0); - pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream); - assert(pSql->pSubs == NULL); - pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); - assert(pSql->subState.states == NULL); - pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t)); - code = pthread_mutex_init(&pSql->subState.mutex, NULL); - - if (pSql->pSubs == NULL || pSql->subState.states == NULL || code != TSDB_CODE_SUCCESS) { - code = TSDB_CODE_TSC_OUT_OF_MEMORY; + code = doInitSubState(pSql, (int32_t) taosArrayGetSize(pQueryInfo->pUpstream)); + if (code != TSDB_CODE_SUCCESS) { goto _error; } From 1f15dda4adc05685292a09554a6fa055bb3a7546 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 6 Sep 2021 16:49:52 +0800 Subject: [PATCH 60/71] [ts-218] fix the invalid write. --- src/util/src/tarray.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index d0d126c1e4..2d6c513cb5 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -112,14 +112,15 @@ void taosArrayRemoveBatch(SArray *pArray, const int32_t* pData, int32_t numOfEle i += 1; } - assert(i == pData[numOfElems - 1] + 1); + assert(i == pData[numOfElems - 1] + 1 && i <= size); - int32_t dstIndex = pData[numOfElems - 1] - numOfElems + 1; int32_t srcIndex = pData[numOfElems - 1] + 1; - - char* dst = TARRAY_GET_ELEM(pArray, dstIndex); - char* src = TARRAY_GET_ELEM(pArray, srcIndex); - memmove(dst, src, pArray->elemSize * (pArray->size - numOfElems)); + int32_t dstIndex = pData[numOfElems - 1] - numOfElems + 1; + if (pArray->size - srcIndex > 0) { + char* dst = TARRAY_GET_ELEM(pArray, dstIndex); + char* src = TARRAY_GET_ELEM(pArray, srcIndex); + memmove(dst, src, pArray->elemSize * (pArray->size - srcIndex)); + } pArray->size -= numOfElems; } From b4ed1124a4dbb1456057eac59846a23f7ace0233 Mon Sep 17 00:00:00 2001 From: tinybright <774994940@qq.com> Date: Mon, 6 Sep 2021 16:52:50 +0800 Subject: [PATCH 61/71] Update docs.md removing log in as root.adding use database. --- documentation20/cn/10.cluster/docs.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index b55dbc6944..0565e1ee5b 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -121,7 +121,7 @@ taos> ### 添加数据节点 -执行CLI程序taos,使用root账号登录进系统,执行: +执行CLI程序taos,执行: ``` CREATE DNODE "fqdn:port"; @@ -131,7 +131,7 @@ CREATE DNODE "fqdn:port"; ### 删除数据节点 -执行CLI程序taos,使用root账号登录进TDengine系统,执行: +执行CLI程序taos,执行: ```mysql DROP DNODE "fqdn:port | dnodeID"; @@ -153,7 +153,7 @@ DROP DNODE "fqdn:port | dnodeID"; 手动将某个vnode迁移到指定的dnode。 -执行CLI程序taos,使用root账号登录进TDengine系统,执行: +执行CLI程序taos,执行: ```mysql ALTER DNODE BALANCE "VNODE:-DNODE:"; @@ -169,7 +169,7 @@ ALTER DNODE BALANCE "VNODE:-DNODE:"; ### 查看数据节点 -执行CLI程序taos,使用root账号登录进TDengine系统,执行: +执行CLI程序taos,执行: ```mysql SHOW DNODES; ``` @@ -180,8 +180,9 @@ SHOW DNODES; 为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个数据节点dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。 -执行CLI程序taos,使用root账号登录进TDengine系统,执行: +执行CLI程序taos,执行: ```mysql +USE SOME_DATABASE; SHOW VGROUPS; ``` From e2e4487fc2fec86bb90445a69703a060833b3cee Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 6 Sep 2021 18:24:18 +0800 Subject: [PATCH 62/71] [td-255] fix bug found by regression test. --- src/client/src/tscUtil.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ef63852c9a..fe3e330aa9 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4322,7 +4322,9 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) { } tfree(pSql->pSubs); + tfree(pSql->subState.states); pSql->subState.numOfSub = 0; + pthread_mutex_destroy(&pSql->subState.mutex); pSql->fp = fp; From f2e6a3d982fbaa1c0df81445c07bb1c58354da32 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 7 Sep 2021 10:00:47 +0800 Subject: [PATCH 63/71] Hotfix/sangshuduo/td 2936 change mac default path (#7799) * change mac default path back after homebrew install support merged. * fix lib/log path install for mac * fix library link for mac. --- packaging/tools/make_install.sh | 36 +++++++++++++-------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 0849a76e31..d400d0b91a 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -20,44 +20,33 @@ fi # Dynamic directory - if [ "$osType" != "Darwin" ]; then data_dir="/var/lib/taos" log_dir="/var/log/taos" -else - data_dir="/usr/local/var/lib/taos" - log_dir="/usr/local/var/log/taos" -fi -if [ "$osType" != "Darwin" ]; then cfg_install_dir="/etc/taos" -else - cfg_install_dir="/usr/local/etc/taos" -fi -if [ "$osType" != "Darwin" ]; then bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" lib64_link_dir="/usr/lib64" inc_link_dir="/usr/include" + + install_main_dir="/usr/local/taos" + + bin_dir="/usr/local/taos/bin" else + data_dir="/usr/local/var/lib/taos" + log_dir="/usr/local/var/log/taos" + + cfg_install_dir="/usr/local/etc/taos" + bin_link_dir="/usr/local/bin" lib_link_dir="/usr/local/lib" inc_link_dir="/usr/local/include" -fi -#install main path -if [ "$osType" != "Darwin" ]; then - install_main_dir="/usr/local/taos" -else install_main_dir="/usr/local/Cellar/tdengine/${verNumber}" -fi -# old bin dir -if [ "$osType" != "Darwin" ]; then - bin_dir="/usr/local/taos/bin" -else - bin_dir="/usr/local/Cellar/tdengine/${verNumber}/bin" + bin_dir="/usr/local/Cellar/tdengine/${verNumber}/bin" fi service_config_dir="/etc/systemd/system" @@ -254,7 +243,10 @@ function install_lib() { ${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so fi else - ${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + ${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.${verNumber}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib + ${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib fi install_jemalloc From b277d0dcbb9c277f4ee936ff650c67941446efc8 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Tue, 7 Sep 2021 11:11:36 +0800 Subject: [PATCH 64/71] fix typo in cmake --- cmake/version.inc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/version.inc b/cmake/version.inc index 261e3e8162..e2b581e39e 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -86,7 +86,7 @@ ENDIF () MESSAGE(STATUS "============= compile version parameter information start ============= ") MESSAGE(STATUS "ver number:" ${TD_VER_NUMBER}) MESSAGE(STATUS "compatible ver number:" ${TD_VER_COMPATIBLE}) -MESSAGE(STATUS "communit commit id:" ${TD_VER_GIT}) +MESSAGE(STATUS "communiy commit id:" ${TD_VER_GIT}) MESSAGE(STATUS "internal commit id:" ${TD_VER_GIT_INTERNAL}) MESSAGE(STATUS "build date:" ${TD_VER_DATE}) MESSAGE(STATUS "ver type:" ${TD_VER_VERTYPE}) From 732e2876c96f80e55fb11d835d5f670b912ff6b7 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Tue, 7 Sep 2021 11:13:07 +0800 Subject: [PATCH 65/71] fix typo --- cmake/version.inc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/version.inc b/cmake/version.inc index e2b581e39e..dfeb26454f 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -86,7 +86,7 @@ ENDIF () MESSAGE(STATUS "============= compile version parameter information start ============= ") MESSAGE(STATUS "ver number:" ${TD_VER_NUMBER}) MESSAGE(STATUS "compatible ver number:" ${TD_VER_COMPATIBLE}) -MESSAGE(STATUS "communiy commit id:" ${TD_VER_GIT}) +MESSAGE(STATUS "community commit id:" ${TD_VER_GIT}) MESSAGE(STATUS "internal commit id:" ${TD_VER_GIT_INTERNAL}) MESSAGE(STATUS "build date:" ${TD_VER_DATE}) MESSAGE(STATUS "ver type:" ${TD_VER_VERTYPE}) From c1a7b9fe20a5884a73847acad064c5c2127ac288 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 7 Sep 2021 11:43:36 +0800 Subject: [PATCH 66/71] [TD-4669] : fix some typo in English documents. --- documentation20/en/00.index/docs.md | 8 ++++---- documentation20/en/01.evaluation/docs.md | 2 +- documentation20/en/02.getting-started/docs.md | 4 +++- documentation20/en/03.architecture/docs.md | 20 +++++++++---------- documentation20/en/08.connector/docs.md | 10 +++++----- documentation20/en/10.cluster/docs.md | 2 +- documentation20/en/11.administrator/docs.md | 2 +- documentation20/en/12.taos-sql/docs.md | 2 +- 8 files changed, 26 insertions(+), 24 deletions(-) diff --git a/documentation20/en/00.index/docs.md b/documentation20/en/00.index/docs.md index 05c16aa3eb..258b2f718f 100644 --- a/documentation20/en/00.index/docs.md +++ b/documentation20/en/00.index/docs.md @@ -6,7 +6,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series * [TDengine Introduction and Features](/evaluation#intro) * [TDengine Use Scenes](/evaluation#scenes) -* [TDengine Performance Metrics and Verification]((/evaluation#)) +* [TDengine Performance Metrics and Verification](/evaluation#) ## [Getting Started](/getting-started) @@ -20,9 +20,9 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ## [Overall Architecture](/architecture) - [Data Model](/architecture#model): relational database model, but one table for one data collection point with static tags -- [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQ architecture, highly available and horizontal scalable +- [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQL architecture, high availability and horizontal scalability - [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data is separated from time-series data, sharded by vnodes and partitioned by time -- [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement sent back to client, while supporting data replications. +- [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement sent back to client, while supporting data replications - [Caching and Persistence](/architecture#persistence): latest records are cached in memory, but are written in columnar format with an ultra-high compression ratio - [Data Query](/architecture#query): support various SQL functions, downsampling, interpolation, and multi-table aggregation @@ -70,7 +70,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series ## [Connector](/connector) - [C/C++ Connector](/connector#c-cpp): primary method to connect to TDengine server through libtaos client library -- [Java Connector(JDBC)](https://www.taosdata.com/en/documentation20/connector/java): driver for connecting to the server from Java applications using the JDBC API +- [Java Connector(JDBC)](/connector/java): driver for connecting to the server from Java applications using the JDBC API - [Python Connector](/connector#python): driver for connecting to TDengine server from Python applications - [RESTful Connector](/connector#restful): a simple way to interact with TDengine via HTTP - [Go Connector](/connector#go): driver for connecting to TDengine server from Go applications diff --git a/documentation20/en/01.evaluation/docs.md b/documentation20/en/01.evaluation/docs.md index 58e30b2e1c..ecbde8c577 100644 --- a/documentation20/en/01.evaluation/docs.md +++ b/documentation20/en/01.evaluation/docs.md @@ -9,7 +9,7 @@ One of the modules of TDengine is the time-series database. However, in addition - **Performance improvement over 10 times**: An innovative data storage structure is defined, with each single core can process at least 20,000 requests per second, insert millions of data points, and read more than 10 million data points, which is more than 10 times faster than other existing general database. - **Reduce the cost of hardware or cloud services to 1/5**: Due to its ultra-performance, TDengine’s computing resources consumption is less than 1/5 of other common Big Data solutions; through columnar storage and advanced compression algorithms, the storage consumption is less than 1/10 of other general databases. - **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance. -- **Highly Available and Horizontal Scalabe **: With the distributed architecture and consistency algorithm, via multi-replication and clustering features, TDengine ensures high availability and horizontal scalability to support the mission-critical applications. +- **Highly Available and Horizontal Scalable **: With the distributed architecture and consistency algorithm, via multi-replication and clustering features, TDengine ensures high availability and horizontal scalability to support the mission-critical applications. - **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C++/C#/Go/Node.js, and similar to MySQL with zero learning cost. - **Core is Open Sourced:** Except some auxiliary features, the core of TDengine is open sourced. Enterprise won't be locked by the database anymore. Ecosystem is more strong, product is more stable, and developer communities are more active. diff --git a/documentation20/en/02.getting-started/docs.md b/documentation20/en/02.getting-started/docs.md index 307ccadf9a..50a8c2fabb 100644 --- a/documentation20/en/02.getting-started/docs.md +++ b/documentation20/en/02.getting-started/docs.md @@ -10,7 +10,9 @@ Please visit our [TDengine github page](https://github.com/taosdata/TDengine) fo ### Install from Docker Container -Please visit our [TDengine Official Docker Image: Distribution, Downloading, and Usage](https://www.taosdata.com/blog/2020/05/13/1509.html). +For the time being, it is not recommended to use Docker to deploy the client or server side of TDengine in production environments, but it is convenient to use Docker to deploy in development environments or when trying it for the first time. In particular, with Docker, it is easy to try TDengine in Mac OS X and Windows environments. + +Please refer to the detailed operation in [Quickly experience TDengine through Docker](https://www.taosdata.com/en/documentation/getting-started/docker). ### Install from Package diff --git a/documentation20/en/03.architecture/docs.md b/documentation20/en/03.architecture/docs.md index b1d47684c3..20d41e63a9 100644 --- a/documentation20/en/03.architecture/docs.md +++ b/documentation20/en/03.architecture/docs.md @@ -243,7 +243,7 @@ The meda data of each table (including schema, tags, etc.) is also stored in vno ### Data Partitioning -In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter “`days`”. This method of partitioning by time rang is also convenient to efficiently implement the data retention policy. As long as the data file exceeds the specified number of days (system configuration parameter "`keep`"), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the tiered-storage. Cold/hot data can be stored in different storage meida to reduce the storage cost. +In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter `days`. This method of partitioning by time rang is also convenient to efficiently implement the data retention policy. As long as the data file exceeds the specified number of days (system configuration parameter `keep`), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the tiered-storage. Cold/hot data can be stored in different storage meida to reduce the storage cost. In general, **TDengine splits big data by vnode and time range in two dimensions** to manage the data efficiently with horizontal scalability. @@ -251,7 +251,7 @@ In general, **TDengine splits big data by vnode and time range in two dimensions Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node), so mnode knows the status of the entire cluster. Based on the overall status, when the mnode finds a dnode is overloaded, it will migrate one or more vnodes to other dnodes. During the process, TDengine services keep running and the data insertion, query and computing operations are not affected. -If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. When offline lasts a certain period of time (configured by parameter ‘`offlineThreshold`’), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number. +If the mnode has not received the dnode status for a period of time, the dnode will be treated as offline. When offline lasts a certain period of time (configured by parameter `offlineThreshold`), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure the replica number. When new data nodes are added to the cluster, with new computing and storage resources are added, the system will automatically start the load balancing process. @@ -268,7 +268,7 @@ Master Vnode uses a writing process as follows: Figure 3: TDengine Master writing process 1. Master vnode receives the application data insertion request, verifies, and moves to next step; -2. If the system configuration parameter “`walLevel`” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; +2. If the system configuration parameter `walLevel` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; 3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data; 4. Write into memory and add the record to “skip list”; 5. Master vnode returns a confirmation message to the application, indicating a successful writing. @@ -282,7 +282,7 @@ For a slave vnode, the write process as follows:
Picture 3 TDengine Slave Writing Process
1. Slave vnode receives a data insertion request forwarded by Master vnode. -2. If the system configuration parameter “walLevel” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; +2. If the system configuration parameter `walLevel` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; 3. Write into memory and add the record to “skip list”; Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same. @@ -336,17 +336,17 @@ Each vnode has its own independent memory, and it is composed of multiple memory TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will also pull up the disk-writing thread to write the cached data into persistent storage in order not to block subsequent data writing. TDengine will open a new database log file when the data is written, and delete the old database log file after written successfully to avoid unlimited log growth. -To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter “`days`”. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations. +To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter `days`. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations. -For time-series data, there is generally a retention policy, which is determined by the system configuration parameter “`keep`”. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space. +For time-series data, there is generally a retention policy, which is determined by the system configuration parameter `keep`. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space. Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set. -In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter “`maxRows`” (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. +In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter `maxRows` (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed. -Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter “`minRows`” (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file. +Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter `minRows` (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file. -When data is written to disk, it is decided whether to compress the data according to system configuration parameter “`comp`”. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. +When data is written to disk, it is decided whether to compress the data according to system configuration parameter `comp`. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio. ### Tiered Storage @@ -395,7 +395,7 @@ When client obtains query result, the worker thread in query execution queue of The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and distinct feature from common databases. From this point of view, it is similar to the window query of stream computing engine. -The keyword “`interval`” is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated based on time windows, and the data within window range are aggregated as needed. For example: +The keyword `interval` is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated based on time windows, and the data within window range are aggregated as needed. For example: ```mysql select count(*) from d1001 interval(1h); diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md index 9cbd395206..c111a47450 100644 --- a/documentation20/en/08.connector/docs.md +++ b/documentation20/en/08.connector/docs.md @@ -296,9 +296,7 @@ Asynchronous APIs have relatively high requirements for users, who can selective The asynchronous APIs of TDengine all use non-blocking calling mode. Applications can use multithreading to open multiple tables at the same time, and can query or insert to each open table at the same time. It should be pointed out that the **application client must ensure that the operation on the same table is completely serialized**, that is, when the insertion or query operation on the same table is not completed (when no result returned), the second insertion or query operation cannot be performed. - - ### Parameter binding API In addition to calling `taos_query` directly for queries, TDengine also provides a Prepare API that supports parameter binding. Like MySQL, these APIs currently only support using question mark `?` to represent the parameters to be bound, as follows: @@ -823,12 +821,12 @@ https://www.taosdata.com/blog/2020/11/02/1901.html The TDengine provides the GO driver taosSql. taosSql implements the GO language's built-in interface database/sql/driver. Users can access TDengine in the application by simply importing the package as follows, see https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go for details. -Sample code for using the Go connector can be found in https://github.com/taosdata/TDengine/tree/develop/tests/examples/go and the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1951.html). +Sample code for using the Go connector can be found in https://github.com/taosdata/TDengine/tree/develop/tests/examples/go . ```Go import ( "database/sql" - _ "github.com/taosdata/driver-go/taosSql" + _ "github.com/taosdata/driver-go/v2/taosSql" ) ``` @@ -839,6 +837,8 @@ go env -w GO111MODULE=on go env -w GOPROXY=https://goproxy.io,direct ``` +`taosSql` v2 completed refactoring of the v1 version and separated the built-in database operation interface `database/sql/driver` to the directory `taosSql`, and put other advanced functions such as subscription and stmt into the directory `af`. + ### Common APIs - `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` @@ -937,7 +937,7 @@ After installing the TDengine client, the nodejsChecker.js program can verify wh Steps: -1. Create a new installation verification directory, for example: ~/tdengine-test, copy the nodejsChecker.js source program on github. Download address: (https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js). +1. Create a new installation verification directory, for example: `~/tdengine-test`, copy the nodejsChecker.js source program on github. Download address: (https://github.com/taosdata/TDengine/tree/develop/tests/examples/nodejs/nodejsChecker.js). 2. Execute the following command: diff --git a/documentation20/en/10.cluster/docs.md b/documentation20/en/10.cluster/docs.md index c0623f43f7..864bc46200 100644 --- a/documentation20/en/10.cluster/docs.md +++ b/documentation20/en/10.cluster/docs.md @@ -16,7 +16,7 @@ Please refer to the [video tutorial](https://www.taosdata.com/blog/2020/11/11/19 **Note 1:** Because the information of FQDN will be written into a file, if FQDN has not been configured or changed before, and TDengine has been started, be sure to clean up the previous data (`rm -rf /var/lib/taos/*`)on the premise of ensuring that the data is useless or backed up; -**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or Host file. +**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or modify hosts file. **Step 2:** It is recommended to close the firewall of all physical nodes, and at least ensure that the TCP and UDP ports of ports 6030-6042 are open. It is **strongly recommended** to close the firewall first and configure the ports after the cluster is built; diff --git a/documentation20/en/11.administrator/docs.md b/documentation20/en/11.administrator/docs.md index 5dc1f9c7a9..a2c2486b8e 100644 --- a/documentation20/en/11.administrator/docs.md +++ b/documentation20/en/11.administrator/docs.md @@ -78,7 +78,7 @@ When the nodes in TDengine cluster are deployed on different physical machines a ## Server-side Configuration -The background service of TDengine system is provided by taosd, and the configuration parameters can be modified in the configuration file taos.cfg to meet the requirements of different scenarios. The default location of the configuration file is the /etc/taos directory, which can be specified by executing the parameter `-c` from the taosd command line. Such as `taosd -c /home/user,` to specify that the configuration file is located in the /home/user directory. +The background service of TDengine system is provided by taosd, and the configuration parameters can be modified in the configuration file taos.cfg to meet the requirements of different scenarios. The default location of the configuration file is the /etc/taos directory, which can be specified by executing the parameter `-c` from the taosd command line. Such as `taosd -c /home/user`, to specify that the configuration file is located in the /home/user directory. You can also use “-C” to show the current server configuration parameters: diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md index 680a3854b6..630fbd1cdb 100644 --- a/documentation20/en/12.taos-sql/docs.md +++ b/documentation20/en/12.taos-sql/docs.md @@ -1173,7 +1173,7 @@ TDengine supports aggregations over data, they are listed below: ## Time-dimension Aggregation -TDengine supports aggregating by intervals(time range). Data in a table can partitioned by intervals and aggregated to generate results. For example, a temperature sensor collects data once per second, but the average temperature needs to be queried every 10 minutes. This aggregation is suitable for down sample operation, and the syntax is as follows: +TDengine supports aggregating by intervals (time range). Data in a table can partitioned by intervals and aggregated to generate results. For example, a temperature sensor collects data once per second, but the average temperature needs to be queried every 10 minutes. This aggregation is suitable for down sample operation, and the syntax is as follows: ```mysql SELECT function_list FROM tb_name From 9d74b39539ede0e963d2a33b082febe24947dba7 Mon Sep 17 00:00:00 2001 From: jiajingbin <39030567+jiajingbin@users.noreply.github.com> Date: Tue, 7 Sep 2021 13:43:18 +0800 Subject: [PATCH 67/71] Update docs.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add: **提示:** - 以下所有执行命令的操作需要先登陆进TDengine系统,必要时请使用root权限。 --- documentation20/cn/10.cluster/docs.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index 0565e1ee5b..1f6f84dd1a 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -119,6 +119,11 @@ taos> 上面已经介绍如何从零开始搭建集群。集群组建完后,还可以随时添加新的数据节点进行扩容,或删除数据节点,并检查集群当前状态。 + +**提示:** + +- 以下所有执行命令的操作需要先登陆进TDengine系统,必要时请使用root权限。 + ### 添加数据节点 执行CLI程序taos,执行: From fba92f9301ed4a69188efb3a4141b95198bcb45c Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Tue, 7 Sep 2021 14:04:52 +0800 Subject: [PATCH 68/71] add asm notice in cmake --- cmake/define.inc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmake/define.inc b/cmake/define.inc index 9ee09c86b0..337a143e1f 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -133,8 +133,10 @@ IF (TD_LINUX) IF (TD_MEMORY_SANITIZER) SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG") + MESSAGE(STATUS "memory sanitizer detected as true") ELSE () SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG") + MESSAGE(STATUS "memory sanitizer detected as false") ENDIF () SET(RELEASE_FLAGS "-O3 -Wno-error") From 27afdd01b6277cd312b66f21a421f67111125aad Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 7 Sep 2021 14:55:06 +0800 Subject: [PATCH 69/71] [TD-4798] : max columns extend to 4096. --- documentation20/cn/04.model/docs.md | 2 +- documentation20/cn/11.administrator/docs.md | 4 ++-- documentation20/cn/12.taos-sql/docs.md | 6 +++--- documentation20/cn/13.faq/docs.md | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/documentation20/cn/04.model/docs.md b/documentation20/cn/04.model/docs.md index 45a4537d9b..5869973737 100644 --- a/documentation20/cn/04.model/docs.md +++ b/documentation20/cn/04.model/docs.md @@ -43,7 +43,7 @@ CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAG 每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。一张超级表里包含的采集物理量必须是同时采集的(时间戳是一致的)。 -一张超级表最多容许1024列,如果一个采集点采集的物理量个数超过1024,需要建多张超级表来处理。一个系统可以有多个DB,一个DB里可以有一到多个超级表。 +一张超级表最多容许 1024 列,如果一个采集点采集的物理量个数超过 1024,需要建多张超级表来处理。一个系统可以有多个 DB,一个 DB 里可以有一到多个超级表。(从 2.1.7.0 版本开始,列数限制由 1024 列放宽到了 4096 列。) ## 创建表 diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index 99953233f0..35eff03423 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -652,7 +652,7 @@ rmtaos - 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符 - 表的列名:不能包含特殊字符,不能超过 64 个字符 - 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线” -- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳 +- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列) - 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置) - 单条 SQL 语句默认最大字符串长度:65480 byte,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1048576 byte - 数据库副本数:不能超过 3 @@ -665,7 +665,7 @@ rmtaos - 库的个数:仅受节点个数限制 - 单个库上虚拟节点个数:不能超过 64 个 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 -- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。 +- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列) 目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下: diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 4c9765b276..b96a9c3d28 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -233,7 +233,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 ``` 说明: - 1) 列的最大个数为1024,最小个数为2; + 1) 列的最大个数为1024,最小个数为2;(从 2.1.7.0 版本开始,改为最多允许 4096 列) 2) 列名最大长度为64。 @@ -1458,10 +1458,10 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P - 数据库名最大长度为 32。 - 表名最大长度为 192,每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 -- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳。 +- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳。(从 2.1.7.0 版本开始,改为最多允许 4096 列) - 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符。 - SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M。 -- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。 +- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。(从 2.1.7.0 版本开始,改为最多允许 4096 列) - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。 ## TAOS SQL 其他约定 diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md index d89b2adeb8..3d6f03b303 100644 --- a/documentation20/cn/13.faq/docs.md +++ b/documentation20/cn/13.faq/docs.md @@ -98,7 +98,7 @@ TDengine 目前尚不支持删除功能,未来根据用户需求可能会支 ## 10. 我怎么创建超过1024列的表? -使用2.0及其以上版本,默认支持1024列;2.0之前的版本,TDengine最大允许创建250列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。 +使用 2.0 及其以上版本,默认支持 1024 列;2.0 之前的版本,TDengine 最大允许创建 250 列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。(从 2.1.7.0 版本开始,表的最大列数增加到了 4096 列。) ## 11. 最有效的写入数据的方法是什么? From be63fe34c55d42f27f523cf74f0576999cc0fbf0 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 7 Sep 2021 13:37:12 +0800 Subject: [PATCH 70/71] update version case --- tests/pytest/client/version.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/pytest/client/version.py b/tests/pytest/client/version.py index 7cbeeb60df..ca6ce69780 100644 --- a/tests/pytest/client/version.py +++ b/tests/pytest/client/version.py @@ -15,6 +15,7 @@ import sys from util.log import * from util.cases import * from util.sql import * +from math import floor class TDTestCase: @@ -37,13 +38,12 @@ class TDTestCase: sql = "select client_version()" ret = tdSql.query(sql) - version = tdSql.getData(0, 0)[0:3] - expectedVersion_dev = "2.0" - expectedVersion_master = "2.1" - if(version == expectedVersion_dev or version == expectedVersion_master): + version = floor(float(tdSql.getData(0, 0)[0:3])) + expectedVersion = 2 + if(version == expectedVersion): tdLog.info("sql:%s, row:%d col:%d data:%s == expect" % (sql, 0, 0, version)) else: - tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s or %s " % (sql, 0, 0, version, expectedVersion_dev, expectedVersion_master)) + tdLog.exit("sql:%s, row:%d col:%d data:%d != expect:%d " % (sql, 0, 0, version, expectedVersion)) def stop(self): From 453e4452da43f17522b853db4efd9336903faf92 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 7 Sep 2021 16:20:11 +0800 Subject: [PATCH 71/71] update version.py --- tests/pytest/client/version.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/pytest/client/version.py b/tests/pytest/client/version.py index ca6ce69780..5c79380a00 100644 --- a/tests/pytest/client/version.py +++ b/tests/pytest/client/version.py @@ -28,20 +28,20 @@ class TDTestCase: sql = "select server_version()" ret = tdSql.query(sql) - version = tdSql.getData(0, 0)[0:3] - expectedVersion_dev = "2.0" - expectedVersion_master = "2.1" - if(version == expectedVersion_dev or version == expectedVersion_master): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect" % (sql, 0, 0, version)) + version = floor(float(tdSql.getData(0, 0)[0:3])) + expectedVersion = 2 + + if(version == expectedVersion): + tdLog.info("sql:%s, row:%d col:%d data:%d == expect" % (sql, 0, 0, version)) else: - tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s or %s " % (sql, 0, 0, version, expectedVersion_dev, expectedVersion_master)) + tdLog.exit("sql:%s, row:%d col:%d data:%d != expect:%d " % (sql, 0, 0, version, expectedVersion)) sql = "select client_version()" ret = tdSql.query(sql) version = floor(float(tdSql.getData(0, 0)[0:3])) expectedVersion = 2 if(version == expectedVersion): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect" % (sql, 0, 0, version)) + tdLog.info("sql:%s, row:%d col:%d data:%d == expect" % (sql, 0, 0, version)) else: tdLog.exit("sql:%s, row:%d col:%d data:%d != expect:%d " % (sql, 0, 0, version, expectedVersion))