Merge branch 'develop' of https://github.com/taosdata/TDengine into develop
This commit is contained in:
commit
e98061b406
|
@ -45,7 +45,9 @@ matrix:
|
||||||
cd ${TRAVIS_BUILD_DIR}/debug
|
cd ${TRAVIS_BUILD_DIR}/debug
|
||||||
make install > /dev/null || travis_terminate $?
|
make install > /dev/null || travis_terminate $?
|
||||||
|
|
||||||
|
pip install numpy
|
||||||
pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/
|
pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/
|
||||||
|
pip3 install numpy
|
||||||
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
|
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
|
||||||
|
|
||||||
cd ${TRAVIS_BUILD_DIR}/tests
|
cd ${TRAVIS_BUILD_DIR}/tests
|
||||||
|
@ -164,7 +166,9 @@ matrix:
|
||||||
cd ${TRAVIS_BUILD_DIR}/debug
|
cd ${TRAVIS_BUILD_DIR}/debug
|
||||||
make install > /dev/null || travis_terminate $?
|
make install > /dev/null || travis_terminate $?
|
||||||
|
|
||||||
|
pip install numpy
|
||||||
pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/
|
pip install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python2/
|
||||||
|
pip3 install numpy
|
||||||
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
|
pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
|
||||||
|
|
||||||
cd ${TRAVIS_BUILD_DIR}/tests
|
cd ${TRAVIS_BUILD_DIR}/tests
|
||||||
|
|
|
@ -286,7 +286,7 @@ typedef struct STscObj {
|
||||||
char user[TSDB_USER_LEN];
|
char user[TSDB_USER_LEN];
|
||||||
char pass[TSDB_KEY_LEN];
|
char pass[TSDB_KEY_LEN];
|
||||||
char acctId[TSDB_ACCT_LEN];
|
char acctId[TSDB_ACCT_LEN];
|
||||||
char db[TSDB_DB_NAME_LEN];
|
char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
|
||||||
char sversion[TSDB_VERSION_LEN];
|
char sversion[TSDB_VERSION_LEN];
|
||||||
char writeAuth : 1;
|
char writeAuth : 1;
|
||||||
char superAuth : 1;
|
char superAuth : 1;
|
||||||
|
|
|
@ -699,7 +699,7 @@ static int32_t first_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t last_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
static int32_t last_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
||||||
if (pCtx->order == TSDB_ORDER_ASC) {
|
if (pCtx->order != pCtx->param[0].i64Key) {
|
||||||
return BLK_DATA_NO_NEEDED;
|
return BLK_DATA_NO_NEEDED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -727,7 +727,7 @@ static int32_t first_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t last_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
static int32_t last_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
|
||||||
if (pCtx->order == TSDB_ORDER_ASC) {
|
if (pCtx->order != pCtx->param[0].i64Key) {
|
||||||
return BLK_DATA_NO_NEEDED;
|
return BLK_DATA_NO_NEEDED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1593,7 +1593,7 @@ static void first_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
||||||
if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
|
if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pCtx->order == TSDB_ORDER_DESC) {
|
if (pCtx->order == TSDB_ORDER_DESC) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1652,7 +1652,7 @@ static void first_dist_func_second_merge(SQLFunctionCtx *pCtx) {
|
||||||
* least one data in this block that is not null.(TODO opt for this case)
|
* least one data in this block that is not null.(TODO opt for this case)
|
||||||
*/
|
*/
|
||||||
static void last_function(SQLFunctionCtx *pCtx) {
|
static void last_function(SQLFunctionCtx *pCtx) {
|
||||||
if (pCtx->order == TSDB_ORDER_ASC) {
|
if (pCtx->order != pCtx->param[0].i64Key) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1681,7 +1681,6 @@ static void last_function(SQLFunctionCtx *pCtx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void last_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
static void last_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
||||||
assert(pCtx->order != TSDB_ORDER_ASC);
|
|
||||||
void *pData = GET_INPUT_CHAR_INDEX(pCtx, index);
|
void *pData = GET_INPUT_CHAR_INDEX(pCtx, index);
|
||||||
if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
|
if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
|
||||||
return;
|
return;
|
||||||
|
@ -1725,7 +1724,7 @@ static void last_dist_function(SQLFunctionCtx *pCtx) {
|
||||||
* 1. for scan data in asc order, no need to check data
|
* 1. for scan data in asc order, no need to check data
|
||||||
* 2. for data blocks that are not loaded, no need to check data
|
* 2. for data blocks that are not loaded, no need to check data
|
||||||
*/
|
*/
|
||||||
if (pCtx->order == TSDB_ORDER_ASC) {
|
if (pCtx->order != pCtx->param[0].i64Key) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1763,7 +1762,7 @@ static void last_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
||||||
* 1. for scan data in asc order, no need to check data
|
* 1. for scan data in asc order, no need to check data
|
||||||
* 2. for data blocks that are not loaded, no need to check data
|
* 2. for data blocks that are not loaded, no need to check data
|
||||||
*/
|
*/
|
||||||
if (pCtx->order == TSDB_ORDER_ASC) {
|
if (pCtx->order != pCtx->param[0].i64Key) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -97,7 +97,7 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1
|
||||||
useconds = str2int64(pToken->z);
|
useconds = str2int64(pToken->z);
|
||||||
} else {
|
} else {
|
||||||
// strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm);
|
// strptime("2001-11-12 18:31:01", "%Y-%m-%d %H:%M:%S", &tm);
|
||||||
if (taosParseTime(pToken->z, time, pToken->n, timePrec) != TSDB_CODE_SUCCESS) {
|
if (taosParseTime(pToken->z, time, pToken->n, timePrec, tsDaylight) != TSDB_CODE_SUCCESS) {
|
||||||
return tscInvalidSQLErrMsg(error, "invalid timestamp format", pToken->z);
|
return tscInvalidSQLErrMsg(error, "invalid timestamp format", pToken->z);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -138,7 +138,7 @@ static int setColumnFilterInfoForTimestamp(SQueryInfo* pQueryInfo, tVariant* pVa
|
||||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||||
|
|
||||||
if (seg != NULL) {
|
if (seg != NULL) {
|
||||||
if (taosParseTime(pVar->pz, &time, pVar->nLen, tinfo.precision) != TSDB_CODE_SUCCESS) {
|
if (taosParseTime(pVar->pz, &time, pVar->nLen, tinfo.precision, tsDaylight) != TSDB_CODE_SUCCESS) {
|
||||||
return invalidSqlErrMsg(pQueryInfo->msg, msg);
|
return invalidSqlErrMsg(pQueryInfo->msg, msg);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1042,7 +1042,7 @@ int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pDB, SSQL
|
||||||
|
|
||||||
/* db name is not specified, the tableName dose not include db name */
|
/* db name is not specified, the tableName dose not include db name */
|
||||||
if (pDB != NULL) {
|
if (pDB != NULL) {
|
||||||
if (pDB->n >= TSDB_DB_NAME_LEN) {
|
if (pDB->n >= TSDB_ACCT_LEN + TSDB_DB_NAME_LEN) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1452,6 +1452,13 @@ static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema,
|
||||||
|
|
||||||
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, bytes, false);
|
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, bytes, false);
|
||||||
tstrncpy(pExpr->aliasName, columnName, sizeof(pExpr->aliasName));
|
tstrncpy(pExpr->aliasName, columnName, sizeof(pExpr->aliasName));
|
||||||
|
|
||||||
|
// set reverse order scan data blocks for last query
|
||||||
|
if (functionID == TSDB_FUNC_LAST) {
|
||||||
|
pExpr->numOfParams = 1;
|
||||||
|
pExpr->param[0].i64Key = TSDB_ORDER_DESC;
|
||||||
|
pExpr->param[0].nType = TSDB_DATA_TYPE_INT;
|
||||||
|
}
|
||||||
|
|
||||||
// for all queries, the timestamp column needs to be loaded
|
// for all queries, the timestamp column needs to be loaded
|
||||||
SColumnIndex index = {.tableIndex = pColIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
SColumnIndex index = {.tableIndex = pColIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||||
|
@ -1724,6 +1731,22 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
|
||||||
if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i, &index) != 0) {
|
if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i, &index) != 0) {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (optr == TK_LAST) { // todo refactor
|
||||||
|
SSqlGroupbyExpr* pGroupBy = &pQueryInfo->groupbyExpr;
|
||||||
|
if (pGroupBy->numOfGroupCols > 0) {
|
||||||
|
for(int32_t k = 0; k < pGroupBy->numOfGroupCols; ++k) {
|
||||||
|
SColIndex* pIndex = taosArrayGet(pGroupBy->columnInfo, k);
|
||||||
|
if (!TSDB_COL_IS_TAG(pIndex->flag) && pIndex->colIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { // group by normal columns
|
||||||
|
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, colIndex + i);
|
||||||
|
pExpr->numOfParams = 1;
|
||||||
|
pExpr->param->i64Key = TSDB_ORDER_ASC;
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1990,6 +2013,7 @@ static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SSQLToken
|
||||||
|
|
||||||
if (strncasecmp(pSchema[i].name, pToken->z, pToken->n) == 0) {
|
if (strncasecmp(pSchema[i].name, pToken->z, pToken->n) == 0) {
|
||||||
columnIndex = i;
|
columnIndex = i;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2586,9 +2610,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
|
||||||
|
|
||||||
tscColumnListInsert(pQueryInfo->colList, &index);
|
tscColumnListInsert(pQueryInfo->colList, &index);
|
||||||
|
|
||||||
SColIndex colIndex = {
|
SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId };
|
||||||
.colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId,
|
|
||||||
};
|
|
||||||
taosArrayPush(pGroupExpr->columnInfo, &colIndex);
|
taosArrayPush(pGroupExpr->columnInfo, &colIndex);
|
||||||
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
|
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
|
||||||
|
|
||||||
|
@ -2886,7 +2908,8 @@ static int32_t extractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnIndex* pIn
|
||||||
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex);
|
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, pIndex->columnIndex);
|
||||||
|
|
||||||
const char* msg1 = "non binary column not support like operator";
|
const char* msg1 = "non binary column not support like operator";
|
||||||
const char* msg2 = "binary column not support this operator";
|
const char* msg2 = "binary column not support this operator";
|
||||||
|
const char* msg3 = "bool column not support this operator";
|
||||||
|
|
||||||
SColumn* pColumn = tscColumnListInsert(pQueryInfo->colList, pIndex);
|
SColumn* pColumn = tscColumnListInsert(pQueryInfo->colList, pIndex);
|
||||||
SColumnFilterInfo* pColFilter = NULL;
|
SColumnFilterInfo* pColFilter = NULL;
|
||||||
|
@ -2920,6 +2943,12 @@ static int32_t extractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnIndex* pIn
|
||||||
if (pExpr->nSQLOptr == TK_LIKE) {
|
if (pExpr->nSQLOptr == TK_LIKE) {
|
||||||
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
|
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pSchema->type == TSDB_DATA_TYPE_BOOL) {
|
||||||
|
if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE) {
|
||||||
|
return invalidSqlErrMsg(pQueryInfo->msg, msg3);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pColumn->colIndex = *pIndex;
|
pColumn->colIndex = *pIndex;
|
||||||
|
@ -3921,7 +3950,7 @@ int32_t getTimeRange(STimeWindow* win, tSQLExpr* pRight, int32_t optr, int16_t t
|
||||||
|
|
||||||
char* seg = strnchr(pRight->val.pz, '-', pRight->val.nLen, false);
|
char* seg = strnchr(pRight->val.pz, '-', pRight->val.nLen, false);
|
||||||
if (seg != NULL) {
|
if (seg != NULL) {
|
||||||
if (taosParseTime(pRight->val.pz, &val, pRight->val.nLen, TSDB_TIME_PRECISION_MICRO) == TSDB_CODE_SUCCESS) {
|
if (taosParseTime(pRight->val.pz, &val, pRight->val.nLen, TSDB_TIME_PRECISION_MICRO, tsDaylight) == TSDB_CODE_SUCCESS) {
|
||||||
parsed = true;
|
parsed = true;
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_TSC_INVALID_SQL;
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
@ -6052,6 +6081,16 @@ int32_t exprTreeFromSqlExpr(tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray*
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((*pExpr)->_node.optr != TSDB_RELATION_EQUAL && (*pExpr)->_node.optr != TSDB_RELATION_NOT_EQUAL) {
|
||||||
|
if (pRight->nodeType == TSQL_NODE_VALUE) {
|
||||||
|
if ( pRight->pVal->nType == TSDB_DATA_TYPE_BOOL
|
||||||
|
|| pRight->pVal->nType == TSDB_DATA_TYPE_BINARY
|
||||||
|
|| pRight->pVal->nType == TSDB_DATA_TYPE_NCHAR) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
|
@ -430,7 +430,7 @@ void tscKillSTableQuery(SSqlObj *pSql) {
|
||||||
/*
|
/*
|
||||||
* 1. if the subqueries are not launched or partially launched, we need to waiting the launched
|
* 1. if the subqueries are not launched or partially launched, we need to waiting the launched
|
||||||
* query return to successfully free allocated resources.
|
* query return to successfully free allocated resources.
|
||||||
* 2. if no any subqueries are launched yet, which means the metric query only in parse sql stage,
|
* 2. if no any subqueries are launched yet, which means the super table query only in parse sql stage,
|
||||||
* set the res.code, and return.
|
* set the res.code, and return.
|
||||||
*/
|
*/
|
||||||
const int64_t MAX_WAITING_TIME = 10000; // 10 Sec.
|
const int64_t MAX_WAITING_TIME = 10000; // 10 Sec.
|
||||||
|
@ -2201,7 +2201,7 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
|
||||||
* The cached information is expired, however, we may have lost the ref of original meter. So, clear whole cache
|
* The cached information is expired, however, we may have lost the ref of original meter. So, clear whole cache
|
||||||
* instead.
|
* instead.
|
||||||
*/
|
*/
|
||||||
tscTrace("%p force release metermeta after drop table:%s", pSql, pTableMetaInfo->name);
|
tscTrace("%p force release table meta after drop table:%s", pSql, pTableMetaInfo->name);
|
||||||
taosCacheRelease(tscCacheHandle, (void **)&pTableMeta, true);
|
taosCacheRelease(tscCacheHandle, (void **)&pTableMeta, true);
|
||||||
|
|
||||||
if (pTableMetaInfo->pTableMeta) {
|
if (pTableMetaInfo->pTableMeta) {
|
||||||
|
|
|
@ -170,6 +170,7 @@ extern char gitinfo[];
|
||||||
extern char gitinfoOfInternal[];
|
extern char gitinfoOfInternal[];
|
||||||
extern char buildinfo[];
|
extern char buildinfo[];
|
||||||
|
|
||||||
|
extern int8_t tsDaylight;
|
||||||
extern char tsTimezone[64];
|
extern char tsTimezone[64];
|
||||||
extern char tsLocale[64];
|
extern char tsLocale[64];
|
||||||
extern char tsCharset[64]; // default encode string
|
extern char tsCharset[64]; // default encode string
|
||||||
|
|
|
@ -198,6 +198,7 @@ char tsMonitorDbName[TSDB_DB_NAME_LEN] = "log";
|
||||||
char tsInternalPass[] = "secretkey";
|
char tsInternalPass[] = "secretkey";
|
||||||
int32_t tsMonitorInterval = 30; // seconds
|
int32_t tsMonitorInterval = 30; // seconds
|
||||||
|
|
||||||
|
int8_t tsDaylight = 0;
|
||||||
char tsTimezone[64] = {0};
|
char tsTimezone[64] = {0};
|
||||||
char tsLocale[TSDB_LOCALE_LEN] = {0};
|
char tsLocale[TSDB_LOCALE_LEN] = {0};
|
||||||
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
|
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
|
||||||
|
|
|
@ -58,6 +58,7 @@ void tsSetTimeZone() {
|
||||||
* (BST, +0100)
|
* (BST, +0100)
|
||||||
*/
|
*/
|
||||||
sprintf(tsTimezone, "(%s, %s%02d00)", tzname[daylight], tz >= 0 ? "+" : "-", abs(tz));
|
sprintf(tsTimezone, "(%s, %s%02d00)", tzname[daylight], tz >= 0 ? "+" : "-", abs(tz));
|
||||||
|
tsDaylight = daylight;
|
||||||
|
|
||||||
uPrint("timezone format changed to %s", tsTimezone);
|
uPrint("timezone format changed to %s", tsTimezone);
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,7 +70,8 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo);
|
||||||
int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp** pRsp, int32_t* contLen);
|
int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp** pRsp, int32_t* contLen);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decide if more results will be produced or not
|
* Decide if more results will be produced or not, NOTE: this function will increase the ref count of QInfo,
|
||||||
|
* so it can be only called once for each retrieve
|
||||||
*
|
*
|
||||||
* @param qinfo
|
* @param qinfo
|
||||||
* @return
|
* @return
|
||||||
|
|
|
@ -250,7 +250,7 @@ typedef struct {
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char tableId[TSDB_TABLE_ID_LEN];
|
char tableId[TSDB_TABLE_ID_LEN];
|
||||||
char db[TSDB_DB_NAME_LEN];
|
char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
|
||||||
int8_t igExists;
|
int8_t igExists;
|
||||||
int8_t getMeta;
|
int8_t getMeta;
|
||||||
int16_t numOfTags;
|
int16_t numOfTags;
|
||||||
|
@ -268,7 +268,7 @@ typedef struct {
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char tableId[TSDB_TABLE_ID_LEN];
|
char tableId[TSDB_TABLE_ID_LEN];
|
||||||
char db[TSDB_DB_NAME_LEN];
|
char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
|
||||||
int16_t type; /* operation type */
|
int16_t type; /* operation type */
|
||||||
int16_t numOfCols; /* number of schema */
|
int16_t numOfCols; /* number of schema */
|
||||||
int32_t tagValLen;
|
int32_t tagValLen;
|
||||||
|
@ -670,7 +670,7 @@ typedef struct {
|
||||||
*/
|
*/
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t type;
|
int8_t type;
|
||||||
char db[TSDB_DB_NAME_LEN];
|
char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
|
||||||
uint16_t payloadLen;
|
uint16_t payloadLen;
|
||||||
char payload[];
|
char payload[];
|
||||||
} SCMShowMsg;
|
} SCMShowMsg;
|
||||||
|
|
|
@ -78,12 +78,13 @@ void rpcClose(void *);
|
||||||
void *rpcMallocCont(int contLen);
|
void *rpcMallocCont(int contLen);
|
||||||
void rpcFreeCont(void *pCont);
|
void rpcFreeCont(void *pCont);
|
||||||
void *rpcReallocCont(void *ptr, int contLen);
|
void *rpcReallocCont(void *ptr, int contLen);
|
||||||
void rpcSendRequest(void *thandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg);
|
void *rpcSendRequest(void *thandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg);
|
||||||
void rpcSendResponse(const SRpcMsg *pMsg);
|
void rpcSendResponse(const SRpcMsg *pMsg);
|
||||||
void rpcSendRedirectRsp(void *pConn, const SRpcIpSet *pIpSet);
|
void rpcSendRedirectRsp(void *pConn, const SRpcIpSet *pIpSet);
|
||||||
int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
|
int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
|
||||||
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, const SRpcMsg *pReq, SRpcMsg *pRsp);
|
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, const SRpcMsg *pReq, SRpcMsg *pRsp);
|
||||||
int rpcReportProgress(void *pConn, char *pCont, int contLen);
|
int rpcReportProgress(void *pConn, char *pCont, int contLen);
|
||||||
|
void rpcCanelRequest(void *pContext);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -469,7 +469,6 @@ static int dumpResultToFile(const char* fname, TAOS_RES* result) {
|
||||||
} while( row != NULL);
|
} while( row != NULL);
|
||||||
|
|
||||||
fclose(fp);
|
fclose(fp);
|
||||||
taos_free_result(result);
|
|
||||||
return numOfRows;
|
return numOfRows;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -297,7 +297,7 @@ void *deleteTable();
|
||||||
|
|
||||||
void *asyncWrite(void *sarg);
|
void *asyncWrite(void *sarg);
|
||||||
|
|
||||||
void generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary);
|
int generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary);
|
||||||
|
|
||||||
void rand_string(char *str, int size);
|
void rand_string(char *str, int size);
|
||||||
|
|
||||||
|
@ -817,7 +817,7 @@ void queryDB(TAOS *taos, char *command) {
|
||||||
i--;
|
i--;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i == 0) {
|
if (code != 0) {
|
||||||
fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql));
|
fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql));
|
||||||
taos_free_result(pSql);
|
taos_free_result(pSql);
|
||||||
|
|
||||||
|
@ -846,14 +846,19 @@ void *syncWrite(void *sarg) {
|
||||||
int k;
|
int k;
|
||||||
for (k = 0; k < winfo->nrecords_per_request;) {
|
for (k = 0; k < winfo->nrecords_per_request;) {
|
||||||
int rand_num = rand() % 100;
|
int rand_num = rand() % 100;
|
||||||
if (winfo->data_of_order ==1 && rand_num < winfo->data_of_rate)
|
int len = -1;
|
||||||
{
|
if (winfo->data_of_order ==1 && rand_num < winfo->data_of_rate) {
|
||||||
long d = tmp_time - rand() % 1000000 + rand_num;
|
long d = tmp_time - rand() % 1000000 + rand_num;
|
||||||
generateData(data, data_type, ncols_per_record, d, len_of_binary);
|
len = generateData(data, data_type, ncols_per_record, d, len_of_binary);
|
||||||
} else
|
} else {
|
||||||
{
|
len = generateData(data, data_type, ncols_per_record, tmp_time += 1000, len_of_binary);
|
||||||
generateData(data, data_type, ncols_per_record, tmp_time += 1000, len_of_binary);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//assert(len + pstr - buffer < BUFFER_SIZE);
|
||||||
|
if (len + pstr - buffer >= BUFFER_SIZE) { // too long
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
pstr += sprintf(pstr, " %s", data);
|
pstr += sprintf(pstr, " %s", data);
|
||||||
inserted++;
|
inserted++;
|
||||||
k++;
|
k++;
|
||||||
|
@ -968,7 +973,7 @@ double getCurrentTime() {
|
||||||
return tv.tv_sec + tv.tv_usec / 1E6;
|
return tv.tv_sec + tv.tv_usec / 1E6;
|
||||||
}
|
}
|
||||||
|
|
||||||
void generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary) {
|
int32_t generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary) {
|
||||||
memset(res, 0, MAX_DATA_SIZE);
|
memset(res, 0, MAX_DATA_SIZE);
|
||||||
char *pstr = res;
|
char *pstr = res;
|
||||||
pstr += sprintf(pstr, "(%" PRId64, timestamp);
|
pstr += sprintf(pstr, "(%" PRId64, timestamp);
|
||||||
|
@ -1002,9 +1007,16 @@ void generateData(char *res, char **data_type, int num_of_cols, int64_t timestam
|
||||||
rand_string(s, len_of_binary);
|
rand_string(s, len_of_binary);
|
||||||
pstr += sprintf(pstr, ", \"%s\"", s);
|
pstr += sprintf(pstr, ", \"%s\"", s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pstr - res > MAX_DATA_SIZE) {
|
||||||
|
perror("column length too long, abort");
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pstr += sprintf(pstr, ")");
|
pstr += sprintf(pstr, ")");
|
||||||
|
|
||||||
|
return pstr - res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJK1234567890";
|
static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJK1234567890";
|
||||||
|
|
|
@ -84,9 +84,12 @@ static int32_t mnodeDbActionDelete(SSdbOper *pOper) {
|
||||||
mnodeDropAllChildTables(pDb);
|
mnodeDropAllChildTables(pDb);
|
||||||
mnodeDropAllSuperTables(pDb);
|
mnodeDropAllSuperTables(pDb);
|
||||||
mnodeDropAllDbVgroups(pDb);
|
mnodeDropAllDbVgroups(pDb);
|
||||||
mnodeDropDbFromAcct(pAcct, pDb);
|
|
||||||
mnodeDecAcctRef(pAcct);
|
if (pAcct) {
|
||||||
|
mnodeDropDbFromAcct(pAcct, pDb);
|
||||||
|
mnodeDecAcctRef(pAcct);
|
||||||
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -157,10 +157,12 @@ static int32_t mnodeChildTableActionDelete(SSdbOper *pOper) {
|
||||||
if (pDb != NULL) pAcct = mnodeGetAcct(pDb->acct);
|
if (pDb != NULL) pAcct = mnodeGetAcct(pDb->acct);
|
||||||
|
|
||||||
if (pTable->info.type == TSDB_CHILD_TABLE) {
|
if (pTable->info.type == TSDB_CHILD_TABLE) {
|
||||||
grantRestore(TSDB_GRANT_TIMESERIES, pTable->superTable->numOfColumns - 1);
|
if (pTable->superTable) {
|
||||||
if (pAcct != NULL) pAcct->acctInfo.numOfTimeSeries -= (pTable->superTable->numOfColumns - 1);
|
grantRestore(TSDB_GRANT_TIMESERIES, pTable->superTable->numOfColumns - 1);
|
||||||
mnodeRemoveTableFromStable(pTable->superTable, pTable);
|
if (pAcct != NULL) pAcct->acctInfo.numOfTimeSeries -= (pTable->superTable->numOfColumns - 1);
|
||||||
mnodeDecTableRef(pTable->superTable);
|
mnodeRemoveTableFromStable(pTable->superTable, pTable);
|
||||||
|
mnodeDecTableRef(pTable->superTable);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
grantRestore(TSDB_GRANT_TIMESERIES, pTable->numOfColumns - 1);
|
grantRestore(TSDB_GRANT_TIMESERIES, pTable->numOfColumns - 1);
|
||||||
if (pAcct != NULL) pAcct->acctInfo.numOfTimeSeries -= (pTable->numOfColumns - 1);
|
if (pAcct != NULL) pAcct->acctInfo.numOfTimeSeries -= (pTable->numOfColumns - 1);
|
||||||
|
|
|
@ -354,7 +354,7 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin
|
||||||
int16_t bytes) {
|
int16_t bytes) {
|
||||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||||
|
|
||||||
int32_t *p1 = (int32_t *)taosHashGet(pWindowResInfo->hashList, pData, bytes);
|
int32_t *p1 = (int32_t *) taosHashGet(pWindowResInfo->hashList, pData, bytes);
|
||||||
if (p1 != NULL) {
|
if (p1 != NULL) {
|
||||||
pWindowResInfo->curIndex = *p1;
|
pWindowResInfo->curIndex = *p1;
|
||||||
} else { // more than the capacity, reallocate the resources
|
} else { // more than the capacity, reallocate the resources
|
||||||
|
@ -919,12 +919,25 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat
|
||||||
|
|
||||||
SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf;
|
SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf;
|
||||||
|
|
||||||
|
int64_t v = -1;
|
||||||
|
// not assign result buffer yet, add new result buffer
|
||||||
|
switch(type) {
|
||||||
|
case TSDB_DATA_TYPE_BOOL:
|
||||||
|
case TSDB_DATA_TYPE_TINYINT: v = GET_INT8_VAL(pData); break;
|
||||||
|
case TSDB_DATA_TYPE_SMALLINT: v = GET_INT16_VAL(pData); break;
|
||||||
|
case TSDB_DATA_TYPE_INT: v = GET_INT32_VAL(pData); break;
|
||||||
|
case TSDB_DATA_TYPE_BIGINT: v = GET_INT64_VAL(pData); break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// assert(pRuntimeEnv->windowResInfo.hashList->size <= 2);
|
||||||
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pData, bytes);
|
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pData, bytes);
|
||||||
if (pWindowRes == NULL) {
|
if (pWindowRes == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// not assign result buffer yet, add new result buffer
|
pWindowRes->window.skey = v;
|
||||||
|
pWindowRes->window.ekey = v;
|
||||||
|
|
||||||
if (pWindowRes->pos.pageId == -1) {
|
if (pWindowRes->pos.pageId == -1) {
|
||||||
int32_t ret = addNewWindowResultBuf(pWindowRes, pResultBuf, GROUPRESULTID, pRuntimeEnv->numOfRowsPerPage);
|
int32_t ret = addNewWindowResultBuf(pWindowRes, pResultBuf, GROUPRESULTID, pRuntimeEnv->numOfRowsPerPage);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
|
@ -1022,12 +1035,16 @@ static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_LAST) {
|
if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_FIRST) {
|
||||||
return !QUERY_IS_ASC_QUERY(pQuery);
|
|
||||||
} else if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_FIRST) {
|
|
||||||
return QUERY_IS_ASC_QUERY(pQuery);
|
return QUERY_IS_ASC_QUERY(pQuery);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// todo add comments
|
||||||
|
if ((functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_LAST)) {
|
||||||
|
return pCtx->param[0].i64Key == pQuery->order.order;
|
||||||
|
// return !QUERY_IS_ASC_QUERY(pQuery);
|
||||||
|
}
|
||||||
|
|
||||||
// in the supplementary scan, only the following functions need to be executed
|
// in the supplementary scan, only the following functions need to be executed
|
||||||
if (IS_REVERSE_SCAN(pRuntimeEnv)) {
|
if (IS_REVERSE_SCAN(pRuntimeEnv)) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -1079,7 +1096,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS
|
||||||
|
|
||||||
int32_t j = 0;
|
int32_t j = 0;
|
||||||
int32_t offset = -1;
|
int32_t offset = -1;
|
||||||
|
|
||||||
for (j = 0; j < pDataBlockInfo->rows; ++j) {
|
for (j = 0; j < pDataBlockInfo->rows; ++j) {
|
||||||
offset = GET_COL_DATA_POS(pQuery, j, step);
|
offset = GET_COL_DATA_POS(pQuery, j, step);
|
||||||
|
|
||||||
|
@ -1479,19 +1496,6 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
|
||||||
|
|
||||||
static bool isQueryKilled(SQInfo *pQInfo) {
|
static bool isQueryKilled(SQInfo *pQInfo) {
|
||||||
return (pQInfo->code == TSDB_CODE_TSC_QUERY_CANCELLED);
|
return (pQInfo->code == TSDB_CODE_TSC_QUERY_CANCELLED);
|
||||||
#if 0
|
|
||||||
/*
|
|
||||||
* check if the queried meter is going to be deleted.
|
|
||||||
* if it will be deleted soon, stop current query ASAP.
|
|
||||||
*/
|
|
||||||
SMeterObj *pMeterObj = pQInfo->pObj;
|
|
||||||
if (vnodeIsMeterState(pMeterObj, TSDB_METER_STATE_DROPPING)) {
|
|
||||||
pQInfo->killed = 1;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (pQInfo->killed == 1);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED; }
|
static void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED; }
|
||||||
|
@ -1574,10 +1578,14 @@ static bool needReverseScan(SQuery *pQuery) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (((functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_LAST_DST) && QUERY_IS_ASC_QUERY(pQuery)) ||
|
if ((functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_FIRST_DST) && !QUERY_IS_ASC_QUERY(pQuery)) {
|
||||||
((functionId == TSDB_FUNC_FIRST || functionId == TSDB_FUNC_FIRST_DST) && !QUERY_IS_ASC_QUERY(pQuery))) {
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_LAST_DST) {
|
||||||
|
int32_t order = pQuery->pSelectExpr[i].base.arg->argValue.i64;
|
||||||
|
return order != pQuery->order.order;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
@ -2030,6 +2038,34 @@ int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order) {
|
||||||
return midPos;
|
return midPos;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ensureOutputBufferSimple(SQueryRuntimeEnv* pRuntimeEnv, int32_t capacity) {
|
||||||
|
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||||
|
|
||||||
|
if (capacity < pQuery->rec.capacity) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
|
||||||
|
int32_t bytes = pQuery->pSelectExpr[i].bytes;
|
||||||
|
assert(bytes > 0 && capacity > 0);
|
||||||
|
|
||||||
|
char *tmp = realloc(pQuery->sdata[i], bytes * capacity + sizeof(tFilePage));
|
||||||
|
if (tmp == NULL) { // todo handle the oom
|
||||||
|
assert(0);
|
||||||
|
} else {
|
||||||
|
pQuery->sdata[i] = (tFilePage *)tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
// set the pCtx output buffer position
|
||||||
|
pRuntimeEnv->pCtx[i].aOutputBuf = pQuery->sdata[i]->data;
|
||||||
|
}
|
||||||
|
|
||||||
|
qTrace("QInfo:%p realloc output buffer to inc output buffer from: %d rows to:%d rows", GET_QINFO_ADDR(pRuntimeEnv),
|
||||||
|
pQuery->rec.capacity, capacity);
|
||||||
|
|
||||||
|
pQuery->rec.capacity = capacity;
|
||||||
|
}
|
||||||
|
|
||||||
static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) {
|
static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) {
|
||||||
// in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block
|
// in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block
|
||||||
SQuery* pQuery = pRuntimeEnv->pQuery;
|
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||||
|
@ -2916,8 +2952,7 @@ void skipResults(SQueryRuntimeEnv *pRuntimeEnv) {
|
||||||
pRuntimeEnv->pCtx[i].ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
|
pRuntimeEnv->pCtx[i].ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
updateNumOfResult(pRuntimeEnv, pQuery->rec.rows);
|
updateNumOfResult(pRuntimeEnv, pQuery->rec.rows);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3054,7 +3089,7 @@ static void clearEnvAfterReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus
|
||||||
pQuery->window = pTableQueryInfo->win;
|
pQuery->window = pTableQueryInfo->win;
|
||||||
}
|
}
|
||||||
|
|
||||||
void scanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
|
void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
|
||||||
SQInfo *pQInfo = (SQInfo *) GET_QINFO_ADDR(pRuntimeEnv);
|
SQInfo *pQInfo = (SQInfo *) GET_QINFO_ADDR(pRuntimeEnv);
|
||||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||||
STableQueryInfo *pTableQueryInfo = pQuery->current;
|
STableQueryInfo *pTableQueryInfo = pQuery->current;
|
||||||
|
@ -3496,18 +3531,32 @@ void copyFromWindowResToSData(SQInfo *pQInfo, SWindowResult *result) {
|
||||||
assert(pQuery->rec.rows <= pQuery->rec.capacity);
|
assert(pQuery->rec.rows <= pQuery->rec.capacity);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) {
|
static UNUSED_FUNC void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) {
|
||||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||||
|
|
||||||
// update the number of result for each, only update the number of rows for the corresponding window result.
|
// update the number of result for each, only update the number of rows for the corresponding window result.
|
||||||
if (pQuery->intervalTime == 0) {
|
if (pQuery->intervalTime == 0) {
|
||||||
int32_t g = pTableQueryInfo->groupIndex;
|
|
||||||
assert(pRuntimeEnv->windowResInfo.size > 0);
|
|
||||||
|
|
||||||
SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, (char *)&g, sizeof(g));
|
for (int32_t i = 0; i < pRuntimeEnv->windowResInfo.size; ++i) {
|
||||||
if (pWindowRes->numOfRows == 0) {
|
SWindowResult *pResult = &pRuntimeEnv->windowResInfo.pResult[i];
|
||||||
pWindowRes->numOfRows = getNumOfResult(pRuntimeEnv);
|
|
||||||
|
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
|
||||||
|
int32_t functionId = pRuntimeEnv->pCtx[j].functionId;
|
||||||
|
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// int32_t g = pTableQueryInfo->groupIndex;
|
||||||
|
// assert(pRuntimeEnv->windowResInfo.size > 0);
|
||||||
|
//
|
||||||
|
// SWindowResult *pWindowRes = doSetTimeWindowFromKey(pRuntimeEnv, &pRuntimeEnv->windowResInfo, (char *)&g, sizeof(g));
|
||||||
|
// if (pWindowRes->numOfRows == 0) {
|
||||||
|
// pWindowRes->numOfRows = getNumOfResult(pRuntimeEnv);
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3519,7 +3568,7 @@ void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *
|
||||||
SWindowResInfo * pWindowResInfo = &pTableQueryInfo->windowResInfo;
|
SWindowResInfo * pWindowResInfo = &pTableQueryInfo->windowResInfo;
|
||||||
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1;
|
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1;
|
||||||
|
|
||||||
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL) {
|
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
|
||||||
rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock);
|
rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock);
|
||||||
} else {
|
} else {
|
||||||
blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock);
|
blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock);
|
||||||
|
@ -4081,21 +4130,22 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
|
||||||
SDataStatis *pStatis = NULL;
|
SDataStatis *pStatis = NULL;
|
||||||
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
|
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
|
||||||
|
|
||||||
if (!isIntervalQuery(pQuery)) {
|
if (!isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
|
||||||
int32_t step = QUERY_IS_ASC_QUERY(pQuery)? 1:-1;
|
if (!isIntervalQuery(pQuery)) {
|
||||||
setExecutionContext(pQInfo, &pTableQueryInfo->id, pTableQueryInfo->groupIndex, blockInfo.window.ekey + step);
|
int32_t step = QUERY_IS_ASC_QUERY(pQuery)? 1:-1;
|
||||||
} else { // interval query
|
setExecutionContext(pQInfo, &pTableQueryInfo->id, pTableQueryInfo->groupIndex, blockInfo.window.ekey + step);
|
||||||
TSKEY nextKey = blockInfo.window.skey;
|
} else { // interval query
|
||||||
setIntervalQueryRange(pQInfo, nextKey);
|
TSKEY nextKey = blockInfo.window.skey;
|
||||||
/*int32_t ret = */setAdditionalInfo(pQInfo, &pTableQueryInfo->id, pTableQueryInfo);
|
setIntervalQueryRange(pQInfo, nextKey);
|
||||||
|
/*int32_t ret = */setAdditionalInfo(pQInfo, &pTableQueryInfo->id, pTableQueryInfo);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
summary->totalRows += blockInfo.rows;
|
summary->totalRows += blockInfo.rows;
|
||||||
stableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, pDataBlock, binarySearchForKey);
|
stableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, pDataBlock, binarySearchForKey);
|
||||||
|
|
||||||
qTrace("QInfo:%p check data block, uid:%"PRId64", tid:%d, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, lastKey:%" PRId64,
|
qTrace("QInfo:%p check data block, uid:%"PRId64", tid:%d, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, lastKey:%" PRId64,
|
||||||
GET_QINFO_ADDR(pRuntimeEnv), blockInfo.uid, blockInfo.tid, blockInfo.window.skey, blockInfo.window.ekey,
|
pQInfo, blockInfo.uid, blockInfo.tid, blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, pQuery->current->lastKey);
|
||||||
blockInfo.rows, pQuery->current->lastKey);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t et = taosGetTimestampMs();
|
int64_t et = taosGetTimestampMs();
|
||||||
|
@ -4220,7 +4270,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
|
||||||
|
|
||||||
// here we simply set the first table as current table
|
// here we simply set the first table as current table
|
||||||
pQuery->current = ((SGroupItem*) taosArrayGet(group, 0))->info;
|
pQuery->current = ((SGroupItem*) taosArrayGet(group, 0))->info;
|
||||||
scanAllDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
|
scanOneTableDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
|
||||||
|
|
||||||
int64_t numOfRes = getNumOfResult(pRuntimeEnv);
|
int64_t numOfRes = getNumOfResult(pRuntimeEnv);
|
||||||
if (numOfRes > 0) {
|
if (numOfRes > 0) {
|
||||||
|
@ -4233,10 +4283,84 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
|
||||||
|
|
||||||
// enable execution for next table, when handling the projection query
|
// enable execution for next table, when handling the projection query
|
||||||
enableExecutionForNextTable(pRuntimeEnv);
|
enableExecutionForNextTable(pRuntimeEnv);
|
||||||
|
|
||||||
|
if (pQuery->rec.rows >= pQuery->rec.capacity) {
|
||||||
|
setQueryStatus(pQuery, QUERY_RESBUF_FULL);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // group-by on normal columns query
|
||||||
|
while (pQInfo->groupIndex < numOfGroups) {
|
||||||
|
SArray* group = taosArrayGetP(pQInfo->groupInfo.pGroupList, pQInfo->groupIndex);
|
||||||
|
|
||||||
|
qTrace("QInfo:%p group by normal columns group:%d, total group:%d", pQInfo, pQInfo->groupIndex, numOfGroups);
|
||||||
|
|
||||||
|
STsdbQueryCond cond = {
|
||||||
|
.twindow = pQuery->window,
|
||||||
|
.colList = pQuery->colList,
|
||||||
|
.order = pQuery->order.order,
|
||||||
|
.numOfCols = pQuery->numOfCols,
|
||||||
|
};
|
||||||
|
|
||||||
|
SArray *g1 = taosArrayInit(1, POINTER_BYTES);
|
||||||
|
SArray *tx = taosArrayClone(group);
|
||||||
|
taosArrayPush(g1, &tx);
|
||||||
|
|
||||||
|
STableGroupInfo gp = {.numOfTables = taosArrayGetSize(tx), .pGroupList = g1};
|
||||||
|
|
||||||
|
// include only current table
|
||||||
|
if (pRuntimeEnv->pQueryHandle != NULL) {
|
||||||
|
tsdbCleanupQueryHandle(pRuntimeEnv->pQueryHandle);
|
||||||
|
pRuntimeEnv->pQueryHandle = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
pRuntimeEnv->pQueryHandle = tsdbQueryTables(pQInfo->tsdb, &cond, &gp, pQInfo);
|
||||||
|
|
||||||
|
SArray* s = tsdbGetQueriedTableIdList(pRuntimeEnv->pQueryHandle);
|
||||||
|
assert(taosArrayGetSize(s) >= 1);
|
||||||
|
|
||||||
|
setTagVal(pRuntimeEnv, (STableId*) taosArrayGet(s, 0), pQInfo->tsdb);
|
||||||
|
|
||||||
|
// here we simply set the first table as current table
|
||||||
|
scanMultiTableDataBlocks(pQInfo);
|
||||||
|
pQInfo->groupIndex += 1;
|
||||||
|
|
||||||
|
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
|
||||||
|
|
||||||
|
// no results generated for current group, continue to try the next group
|
||||||
|
if (pWindowResInfo->size <= 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < pWindowResInfo->size; ++i) {
|
||||||
|
SWindowStatus *pStatus = &pWindowResInfo->pResult[i].status;
|
||||||
|
pStatus->closed = true; // enable return all results for group by normal columns
|
||||||
|
|
||||||
|
SWindowResult *pResult = &pWindowResInfo->pResult[i];
|
||||||
|
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
|
||||||
|
pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
qTrace("QInfo:%p generated groupby columns results %d rows for group %d completed", pQInfo, pWindowResInfo->size,
|
||||||
|
pQInfo->groupIndex);
|
||||||
|
int32_t currentGroupIndex = pQInfo->groupIndex;
|
||||||
|
|
||||||
|
pQuery->rec.rows = 0;
|
||||||
|
pQInfo->groupIndex = 0;
|
||||||
|
|
||||||
|
ensureOutputBufferSimple(pRuntimeEnv, pWindowResInfo->size);
|
||||||
|
copyFromWindowResToSData(pQInfo, pWindowResInfo->pResult);
|
||||||
|
|
||||||
|
pQInfo->groupIndex = currentGroupIndex; //restore the group index
|
||||||
|
assert(pQuery->rec.rows == pWindowResInfo->size);
|
||||||
|
|
||||||
|
clearClosedTimeWindow(pRuntimeEnv);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* 1. super table projection query, 2. group-by on normal columns query, 3. ts-comp query
|
* 1. super table projection query, 2. ts-comp query
|
||||||
* if the subgroup index is larger than 0, results generated by group by tbname,k is existed.
|
* if the subgroup index is larger than 0, results generated by group by tbname,k is existed.
|
||||||
* we need to return it to client in the first place.
|
* we need to return it to client in the first place.
|
||||||
*/
|
*/
|
||||||
|
@ -4283,7 +4407,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
scanAllDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
|
scanOneTableDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
|
||||||
skipResults(pRuntimeEnv);
|
skipResults(pRuntimeEnv);
|
||||||
|
|
||||||
// the limitation of output result is reached, set the query completed
|
// the limitation of output result is reached, set the query completed
|
||||||
|
@ -4349,25 +4473,6 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
|
||||||
pRuntimeEnv->cur = pRuntimeEnv->pTSBuf->cur;
|
pRuntimeEnv->cur = pRuntimeEnv->pTSBuf->cur;
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo refactor
|
|
||||||
if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
|
|
||||||
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
|
|
||||||
|
|
||||||
for (int32_t i = 0; i < pWindowResInfo->size; ++i) {
|
|
||||||
SWindowStatus *pStatus = &pWindowResInfo->pResult[i].status;
|
|
||||||
pStatus->closed = true; // enable return all results for group by normal columns
|
|
||||||
|
|
||||||
SWindowResult *pResult = &pWindowResInfo->pResult[i];
|
|
||||||
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
|
|
||||||
pResult->numOfRows = MAX(pResult->numOfRows, pResult->resultInfo[j].numOfRes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pQInfo->groupIndex = 0;
|
|
||||||
pQuery->rec.rows = 0;
|
|
||||||
copyFromWindowResToSData(pQInfo, pWindowResInfo->pResult);
|
|
||||||
}
|
|
||||||
|
|
||||||
qTrace(
|
qTrace(
|
||||||
"QInfo %p numOfTables:%d, index:%d, numOfGroups:%d, %d points returned, total:%"PRId64", offset:%" PRId64,
|
"QInfo %p numOfTables:%d, index:%d, numOfGroups:%d, %d points returned, total:%"PRId64", offset:%" PRId64,
|
||||||
pQInfo, pQInfo->groupInfo.numOfTables, pQInfo->tableIndex, numOfGroups, pQuery->rec.rows, pQuery->rec.total,
|
pQInfo, pQInfo->groupInfo.numOfTables, pQInfo->tableIndex, numOfGroups, pQuery->rec.rows, pQuery->rec.total,
|
||||||
|
@ -4449,7 +4554,6 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
|
||||||
*/
|
*/
|
||||||
if (isIntervalQuery(pQuery)) {
|
if (isIntervalQuery(pQuery)) {
|
||||||
copyResToQueryResultBuf(pQInfo, pQuery);
|
copyResToQueryResultBuf(pQInfo, pQuery);
|
||||||
|
|
||||||
#ifdef _DEBUG_VIEW
|
#ifdef _DEBUG_VIEW
|
||||||
displayInterResult(pQuery->sdata, pRuntimeEnv, pQuery->sdata[0]->num);
|
displayInterResult(pQuery->sdata, pRuntimeEnv, pQuery->sdata[0]->num);
|
||||||
#endif
|
#endif
|
||||||
|
@ -4527,7 +4631,7 @@ static void tableFixedOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
|
||||||
|
|
||||||
pQuery->current = pTableInfo; // set current query table info
|
pQuery->current = pTableInfo; // set current query table info
|
||||||
|
|
||||||
scanAllDataBlocks(pRuntimeEnv, pTableInfo->lastKey);
|
scanOneTableDataBlocks(pRuntimeEnv, pTableInfo->lastKey);
|
||||||
finalizeQueryResult(pRuntimeEnv);
|
finalizeQueryResult(pRuntimeEnv);
|
||||||
|
|
||||||
if (isQueryKilled(pQInfo)) {
|
if (isQueryKilled(pQInfo)) {
|
||||||
|
@ -4560,7 +4664,7 @@ static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
scanAllDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
|
scanOneTableDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
|
||||||
finalizeQueryResult(pRuntimeEnv);
|
finalizeQueryResult(pRuntimeEnv);
|
||||||
|
|
||||||
if (isQueryKilled(pQInfo)) {
|
if (isQueryKilled(pQInfo)) {
|
||||||
|
@ -4607,7 +4711,7 @@ static void tableIntervalProcessImpl(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start)
|
||||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
scanAllDataBlocks(pRuntimeEnv, start);
|
scanOneTableDataBlocks(pRuntimeEnv, start);
|
||||||
|
|
||||||
if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) {
|
if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) {
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -113,7 +113,9 @@ void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) {
|
||||||
for (int32_t i = 0; i < num; ++i) {
|
for (int32_t i = 0; i < num; ++i) {
|
||||||
SWindowResult *pResult = &pWindowResInfo->pResult[i];
|
SWindowResult *pResult = &pWindowResInfo->pResult[i];
|
||||||
if (pResult->status.closed) { // remove the window slot from hash table
|
if (pResult->status.closed) { // remove the window slot from hash table
|
||||||
taosHashRemove(pWindowResInfo->hashList, (const char *)&pResult->window.skey, TSDB_KEYSIZE);
|
taosHashRemove(pWindowResInfo->hashList, (const char *)&pResult->window.skey, pWindowResInfo->type);
|
||||||
|
printf("remove ============>%ld, remain size:%ld\n", pResult->window.skey, pWindowResInfo->hashList->size);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -133,14 +135,16 @@ void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) {
|
||||||
}
|
}
|
||||||
|
|
||||||
pWindowResInfo->size = remain;
|
pWindowResInfo->size = remain;
|
||||||
|
printf("---------------size:%ld\n", taosHashGetSize(pWindowResInfo->hashList));
|
||||||
for (int32_t k = 0; k < pWindowResInfo->size; ++k) {
|
for (int32_t k = 0; k < pWindowResInfo->size; ++k) {
|
||||||
SWindowResult *pResult = &pWindowResInfo->pResult[k];
|
SWindowResult *pResult = &pWindowResInfo->pResult[k];
|
||||||
int32_t *p = (int32_t *)taosHashGet(pWindowResInfo->hashList, (const char *)&pResult->window.skey, TSDB_KEYSIZE);
|
int32_t *p = (int32_t *)taosHashGet(pWindowResInfo->hashList, (const char *)&pResult->window.skey,
|
||||||
|
tDataTypeDesc[pWindowResInfo->type].nSize);
|
||||||
|
|
||||||
int32_t v = (*p - num);
|
int32_t v = (*p - num);
|
||||||
assert(v >= 0 && v <= pWindowResInfo->size);
|
assert(v >= 0 && v <= pWindowResInfo->size);
|
||||||
taosHashPut(pWindowResInfo->hashList, (char *)&pResult->window.skey, TSDB_KEYSIZE, (char *)&v, sizeof(int32_t));
|
taosHashPut(pWindowResInfo->hashList, (char *)&pResult->window.skey, tDataTypeDesc[pWindowResInfo->type].nSize,
|
||||||
|
(char *)&v, sizeof(int32_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
pWindowResInfo->curIndex = -1;
|
pWindowResInfo->curIndex = -1;
|
||||||
|
|
|
@ -73,6 +73,7 @@ typedef struct {
|
||||||
SRpcInfo *pRpc; // associated SRpcInfo
|
SRpcInfo *pRpc; // associated SRpcInfo
|
||||||
SRpcIpSet ipSet; // ip list provided by app
|
SRpcIpSet ipSet; // ip list provided by app
|
||||||
void *ahandle; // handle provided by app
|
void *ahandle; // handle provided by app
|
||||||
|
struct SRpcConn *pConn; // pConn allocated
|
||||||
char msgType; // message type
|
char msgType; // message type
|
||||||
uint8_t *pCont; // content provided by app
|
uint8_t *pCont; // content provided by app
|
||||||
int32_t contLen; // content length
|
int32_t contLen; // content length
|
||||||
|
@ -339,7 +340,7 @@ void *rpcReallocCont(void *ptr, int contLen) {
|
||||||
return start + sizeof(SRpcReqContext) + sizeof(SRpcHead);
|
return start + sizeof(SRpcReqContext) + sizeof(SRpcHead);
|
||||||
}
|
}
|
||||||
|
|
||||||
void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg) {
|
void *rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg) {
|
||||||
SRpcInfo *pRpc = (SRpcInfo *)shandle;
|
SRpcInfo *pRpc = (SRpcInfo *)shandle;
|
||||||
SRpcReqContext *pContext;
|
SRpcReqContext *pContext;
|
||||||
|
|
||||||
|
@ -367,7 +368,7 @@ void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg)
|
||||||
|
|
||||||
rpcSendReqToServer(pRpc, pContext);
|
rpcSendReqToServer(pRpc, pContext);
|
||||||
|
|
||||||
return;
|
return pContext;
|
||||||
}
|
}
|
||||||
|
|
||||||
void rpcSendResponse(const SRpcMsg *pRsp) {
|
void rpcSendResponse(const SRpcMsg *pRsp) {
|
||||||
|
@ -501,6 +502,19 @@ int rpcReportProgress(void *handle, char *pCont, int contLen) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* todo: cancel process may have race condition, pContext may have been released
|
||||||
|
just before app calls the rpcCancelRequest */
|
||||||
|
void rpcCancelRequest(void *handle) {
|
||||||
|
SRpcReqContext *pContext = handle;
|
||||||
|
|
||||||
|
if (pContext->pConn) {
|
||||||
|
tTrace("%s, app trys to cancel request", pContext->pConn->info);
|
||||||
|
rpcCloseConn(pContext->pConn);
|
||||||
|
pContext->pConn = NULL;
|
||||||
|
rpcFreeCont(pContext->pCont);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void rpcFreeMsg(void *msg) {
|
static void rpcFreeMsg(void *msg) {
|
||||||
if ( msg ) {
|
if ( msg ) {
|
||||||
char *temp = (char *)msg - sizeof(SRpcReqContext);
|
char *temp = (char *)msg - sizeof(SRpcReqContext);
|
||||||
|
@ -874,6 +888,7 @@ static void rpcReportBrokenLinkToServer(SRpcConn *pConn) {
|
||||||
SRpcMsg rpcMsg;
|
SRpcMsg rpcMsg;
|
||||||
rpcMsg.pCont = pConn->pReqMsg; // pReqMsg is re-used to store the APP context from server
|
rpcMsg.pCont = pConn->pReqMsg; // pReqMsg is re-used to store the APP context from server
|
||||||
rpcMsg.contLen = pConn->reqMsgLen; // reqMsgLen is re-used to store the APP context length
|
rpcMsg.contLen = pConn->reqMsgLen; // reqMsgLen is re-used to store the APP context length
|
||||||
|
rpcMsg.ahandle = pConn->ahandle;
|
||||||
rpcMsg.handle = pConn;
|
rpcMsg.handle = pConn;
|
||||||
rpcMsg.msgType = pConn->inType;
|
rpcMsg.msgType = pConn->inType;
|
||||||
rpcMsg.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
|
rpcMsg.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
|
||||||
|
@ -942,6 +957,7 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
|
||||||
static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) {
|
static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) {
|
||||||
SRpcInfo *pRpc = pContext->pRpc;
|
SRpcInfo *pRpc = pContext->pRpc;
|
||||||
|
|
||||||
|
pContext->pConn = NULL;
|
||||||
if (pContext->pRsp) {
|
if (pContext->pRsp) {
|
||||||
// for synchronous API
|
// for synchronous API
|
||||||
memcpy(pContext->pSet, &pContext->ipSet, sizeof(SRpcIpSet));
|
memcpy(pContext->pSet, &pContext->ipSet, sizeof(SRpcIpSet));
|
||||||
|
@ -1110,6 +1126,7 @@ static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pContext->pConn = pConn;
|
||||||
pConn->ahandle = pContext->ahandle;
|
pConn->ahandle = pContext->ahandle;
|
||||||
rpcLockConn(pConn);
|
rpcLockConn(pConn);
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,7 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
|
||||||
|
|
||||||
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts);
|
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts);
|
||||||
|
|
||||||
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec);
|
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
|
||||||
void deltaToUtcInitOnce();
|
void deltaToUtcInitOnce();
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -24,7 +24,6 @@
|
||||||
#include "taosdef.h"
|
#include "taosdef.h"
|
||||||
#include "ttime.h"
|
#include "ttime.h"
|
||||||
#include "tutil.h"
|
#include "tutil.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mktime64 - Converts date to seconds.
|
* mktime64 - Converts date to seconds.
|
||||||
* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
|
* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
|
||||||
|
@ -119,15 +118,21 @@ static int month[12] = {
|
||||||
static int64_t parseFraction(char* str, char** end, int32_t timePrec);
|
static int64_t parseFraction(char* str, char** end, int32_t timePrec);
|
||||||
static int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec);
|
static int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec);
|
||||||
static int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec);
|
static int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec);
|
||||||
|
static int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec);
|
||||||
|
|
||||||
|
static int32_t (*parseLocaltimeFp[]) (char* timestr, int64_t* time, int32_t timePrec) = {
|
||||||
|
parseLocaltime,
|
||||||
|
parseLocaltimeWithDst
|
||||||
|
};
|
||||||
|
|
||||||
int32_t taosGetTimestampSec() { return (int32_t)time(NULL); }
|
int32_t taosGetTimestampSec() { return (int32_t)time(NULL); }
|
||||||
|
|
||||||
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec) {
|
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t daylight) {
|
||||||
/* parse datatime string in with tz */
|
/* parse datatime string in with tz */
|
||||||
if (strnchr(timestr, 'T', len, false) != NULL) {
|
if (strnchr(timestr, 'T', len, false) != NULL) {
|
||||||
return parseTimeWithTz(timestr, time, timePrec);
|
return parseTimeWithTz(timestr, time, timePrec);
|
||||||
} else {
|
} else {
|
||||||
return parseLocaltime(timestr, time, timePrec);
|
return (*parseLocaltimeFp[daylight])(timestr, time, timePrec);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,9 +309,6 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* mktime will be affected by TZ, set by using taos_options */
|
|
||||||
//int64_t seconds = mktime(&tm);
|
|
||||||
//int64_t seconds = (int64_t)user_mktime(&tm);
|
|
||||||
int64_t seconds = user_mktime64(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
|
int64_t seconds = user_mktime64(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
|
||||||
|
|
||||||
int64_t fraction = 0;
|
int64_t fraction = 0;
|
||||||
|
@ -324,6 +326,32 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
|
||||||
|
*time = 0;
|
||||||
|
struct tm tm = {0};
|
||||||
|
tm.tm_isdst = -1;
|
||||||
|
|
||||||
|
char* str = strptime(timestr, "%Y-%m-%d %H:%M:%S", &tm);
|
||||||
|
if (str == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* mktime will be affected by TZ, set by using taos_options */
|
||||||
|
int64_t seconds = mktime(&tm);
|
||||||
|
|
||||||
|
int64_t fraction = 0;
|
||||||
|
|
||||||
|
if (*str == '.') {
|
||||||
|
/* parse the second fraction part */
|
||||||
|
if ((fraction = parseFraction(str + 1, &str, timePrec)) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : 1000000;
|
||||||
|
*time = factor * seconds + fraction;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) {
|
static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) {
|
||||||
*result = val;
|
*result = val;
|
||||||
|
|
||||||
|
|
|
@ -321,6 +321,22 @@ void vnodeRelease(void *pVnodeRaw) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pVnode->tsdb)
|
||||||
|
tsdbCloseRepo(pVnode->tsdb, 1);
|
||||||
|
pVnode->tsdb = NULL;
|
||||||
|
|
||||||
|
if (pVnode->wal)
|
||||||
|
walClose(pVnode->wal);
|
||||||
|
pVnode->wal = NULL;
|
||||||
|
|
||||||
|
if (pVnode->wqueue)
|
||||||
|
dnodeFreeVnodeWqueue(pVnode->wqueue);
|
||||||
|
pVnode->wqueue = NULL;
|
||||||
|
|
||||||
|
if (pVnode->rqueue)
|
||||||
|
dnodeFreeVnodeRqueue(pVnode->rqueue);
|
||||||
|
pVnode->rqueue = NULL;
|
||||||
|
|
||||||
tfree(pVnode->rootDir);
|
tfree(pVnode->rootDir);
|
||||||
|
|
||||||
if (pVnode->status == TAOS_VN_STATUS_DELETING) {
|
if (pVnode->status == TAOS_VN_STATUS_DELETING) {
|
||||||
|
@ -411,33 +427,21 @@ void vnodeBuildStatusMsg(void *param) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vnodeCleanUp(SVnodeObj *pVnode) {
|
static void vnodeCleanUp(SVnodeObj *pVnode) {
|
||||||
|
// remove from hash, so new messages wont be consumed
|
||||||
taosHashRemove(tsDnodeVnodesHash, (const char *)&pVnode->vgId, sizeof(int32_t));
|
taosHashRemove(tsDnodeVnodesHash, (const char *)&pVnode->vgId, sizeof(int32_t));
|
||||||
|
|
||||||
|
// stop replication module
|
||||||
if (pVnode->sync) {
|
if (pVnode->sync) {
|
||||||
syncStop(pVnode->sync);
|
syncStop(pVnode->sync);
|
||||||
pVnode->sync = NULL;
|
pVnode->sync = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pVnode->tsdb)
|
// stop continuous query
|
||||||
tsdbCloseRepo(pVnode->tsdb, 1);
|
|
||||||
pVnode->tsdb = NULL;
|
|
||||||
|
|
||||||
if (pVnode->wal)
|
|
||||||
walClose(pVnode->wal);
|
|
||||||
pVnode->wal = NULL;
|
|
||||||
|
|
||||||
if (pVnode->cq)
|
if (pVnode->cq)
|
||||||
cqClose(pVnode->cq);
|
cqClose(pVnode->cq);
|
||||||
pVnode->cq = NULL;
|
pVnode->cq = NULL;
|
||||||
|
|
||||||
if (pVnode->wqueue)
|
// release local resources only after cutting off outside connections
|
||||||
dnodeFreeVnodeWqueue(pVnode->wqueue);
|
|
||||||
pVnode->wqueue = NULL;
|
|
||||||
|
|
||||||
if (pVnode->rqueue)
|
|
||||||
dnodeFreeVnodeRqueue(pVnode->rqueue);
|
|
||||||
pVnode->rqueue = NULL;
|
|
||||||
|
|
||||||
vnodeRelease(pVnode);
|
vnodeRelease(pVnode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) {
|
||||||
|
|
||||||
// notify connection(handle) that current qhandle is created, if current connection from
|
// notify connection(handle) that current qhandle is created, if current connection from
|
||||||
// client is broken, the query needs to be killed immediately.
|
// client is broken, the query needs to be killed immediately.
|
||||||
static void vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId) {
|
static int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId) {
|
||||||
SRetrieveTableMsg* killQueryMsg = rpcMallocCont(sizeof(SRetrieveTableMsg));
|
SRetrieveTableMsg* killQueryMsg = rpcMallocCont(sizeof(SRetrieveTableMsg));
|
||||||
killQueryMsg->qhandle = htobe64((uint64_t) qhandle);
|
killQueryMsg->qhandle = htobe64((uint64_t) qhandle);
|
||||||
killQueryMsg->free = htons(1);
|
killQueryMsg->free = htons(1);
|
||||||
|
@ -69,7 +69,7 @@ static void vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId)
|
||||||
killQueryMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg));
|
killQueryMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg));
|
||||||
|
|
||||||
vTrace("QInfo:%p register qhandle to connect:%p", qhandle, handle);
|
vTrace("QInfo:%p register qhandle to connect:%p", qhandle, handle);
|
||||||
rpcReportProgress(handle, (char*) killQueryMsg, sizeof(SRetrieveTableMsg));
|
return rpcReportProgress(handle, (char*) killQueryMsg, sizeof(SRetrieveTableMsg));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
|
static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
|
||||||
|
@ -106,7 +106,17 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
|
||||||
pRet->len = sizeof(SQueryTableRsp);
|
pRet->len = sizeof(SQueryTableRsp);
|
||||||
pRet->rsp = pRsp;
|
pRet->rsp = pRsp;
|
||||||
|
|
||||||
vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, pQInfo, pVnode->vgId);
|
// current connect is broken
|
||||||
|
if (vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, pQInfo, pVnode->vgId) != TSDB_CODE_SUCCESS) {
|
||||||
|
vError("vgId:%d, QInfo:%p, dnode query discarded since link is broken, %p", pVnode->vgId, pQInfo, pReadMsg->rpcMsg.handle);
|
||||||
|
pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
|
||||||
|
|
||||||
|
//NOTE: there two refcount, needs to kill twice, todo refactor
|
||||||
|
qKillQuery(pQInfo);
|
||||||
|
qKillQuery(pQInfo);
|
||||||
|
|
||||||
|
return pRsp->code;
|
||||||
|
}
|
||||||
|
|
||||||
vTrace("vgId:%d, QInfo:%p, dnode query msg disposed", pVnode->vgId, pQInfo);
|
vTrace("vgId:%d, QInfo:%p, dnode query msg disposed", pVnode->vgId, pQInfo);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -11,6 +11,8 @@
|
||||||
4. pip install ../src/connector/python/linux/python2 ; pip3 install
|
4. pip install ../src/connector/python/linux/python2 ; pip3 install
|
||||||
../src/connector/python/linux/python3
|
../src/connector/python/linux/python3
|
||||||
|
|
||||||
|
5. pip install numpy; pip3 install numpy
|
||||||
|
|
||||||
> Note: Both Python2 and Python3 are currently supported by the Python test
|
> Note: Both Python2 and Python3 are currently supported by the Python test
|
||||||
> framework. Since Python2 is no longer officially supported by Python Software
|
> framework. Since Python2 is no longer officially supported by Python Software
|
||||||
> Foundation since January 1, 2020, it is recommended that subsequent test case
|
> Foundation since January 1, 2020, it is recommended that subsequent test case
|
||||||
|
|
|
@ -137,6 +137,7 @@ python3 ./test.py -f query/queryError.py
|
||||||
python3 ./test.py -f query/filterAllIntTypes.py
|
python3 ./test.py -f query/filterAllIntTypes.py
|
||||||
python3 ./test.py -f query/filterFloatAndDouble.py
|
python3 ./test.py -f query/filterFloatAndDouble.py
|
||||||
python3 ./test.py -f query/filterOtherTypes.py
|
python3 ./test.py -f query/filterOtherTypes.py
|
||||||
|
python3 ./test.py -f query/querySort.py
|
||||||
|
|
||||||
#stream
|
#stream
|
||||||
python3 ./test.py -f stream/stream1.py
|
python3 ./test.py -f stream/stream1.py
|
||||||
|
|
|
@ -0,0 +1,80 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
|
||||||
|
self.rowNum = 10
|
||||||
|
self.ts = 1537146000000
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
|
||||||
|
print("======= step 1: create table and insert data =========")
|
||||||
|
tdLog.debug(
|
||||||
|
''' create table st(ts timestamp, tbcol1 tinyint, tbcol2 smallint, tbcol3 int, tbcol4 bigint, tbcol5 float, tbcol6 double,
|
||||||
|
tbcol7 bool, tbcol8 nchar(20), tbcol9 binary(20)) tags(tagcol1 tinyint, tagcol2 smallint, tagcol3 int, tagcol4 bigint, tagcol5 float,
|
||||||
|
tagcol6 double, tagcol7 bool, tagcol8 nchar(20), tagcol9 binary(20))''')
|
||||||
|
tdSql.execute(
|
||||||
|
''' create table st(ts timestamp, tbcol1 tinyint, tbcol2 smallint, tbcol3 int, tbcol4 bigint, tbcol5 float, tbcol6 double,
|
||||||
|
tbcol7 bool, tbcol8 nchar(20), tbcol9 binary(20)) tags(tagcol1 tinyint, tagcol2 smallint, tagcol3 int, tagcol4 bigint, tagcol5 float,
|
||||||
|
tagcol6 double, tagcol7 bool, tagcol8 nchar(20), tagcol9 binary(20))''')
|
||||||
|
|
||||||
|
for i in range(self.rowNum):
|
||||||
|
tdSql.execute("create table st%d using st tags(%d, %d, %d, %d, %f, %f, %d, 'tag%d', '标签%d')" % (i + 1, i + 1, i + 1, i + 1, i + 1, 1.1 * (i + 1),
|
||||||
|
1.23 * (i + 1), (i + 1) % 2, i + 1, i + 1))
|
||||||
|
for j in range(self.rowNum):
|
||||||
|
tdSql.execute("insert into st%d values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')" % (i + 1, self.ts + 10 * (i + 1) + j + 1,
|
||||||
|
j + 1, j + 1, j + 1, j + 1, 1.1 * (j + 1), 1.23 * (j + 1), (j + 1) % 2, j + 1, j + 1))
|
||||||
|
|
||||||
|
|
||||||
|
print("======= step 2: verify order for each column =========")
|
||||||
|
# sort for timestamp in asc order
|
||||||
|
tdSql.query("select * from st order by ts asc")
|
||||||
|
tdSql.checkColumnSorted(0, "asc")
|
||||||
|
|
||||||
|
# sort for timestamp in desc order
|
||||||
|
tdSql.query("select * from st order by ts desc")
|
||||||
|
tdSql.checkColumnSorted(0, "desc")
|
||||||
|
|
||||||
|
|
||||||
|
for i in range(1, 10):
|
||||||
|
tdSql.error("select * from st order by tbcol%d" % i)
|
||||||
|
tdSql.error("select * from st order by tbcol%d asc" % i)
|
||||||
|
tdSql.error("select * from st order by tbcol%d desc" % i)
|
||||||
|
|
||||||
|
tdSql.query("select avg(tbcol1) from st group by tagcol%d order by tagcol%d" % (i, i))
|
||||||
|
tdSql.checkColumnSorted(1, "")
|
||||||
|
|
||||||
|
tdSql.query("select avg(tbcol1) from st group by tagcol%d order by tagcol%d asc" % (i, i))
|
||||||
|
tdSql.checkColumnSorted(1, "asc")
|
||||||
|
|
||||||
|
tdSql.query("select avg(tbcol1) from st group by tagcol%d order by tagcol%d desc" % (i, i))
|
||||||
|
tdSql.checkColumnSorted(1, "desc")
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -80,8 +80,8 @@ python3 ./test.py -f import_merge/importCacheFileTO.py
|
||||||
python3 ./test.py -f import_merge/importCacheFileTPO.py
|
python3 ./test.py -f import_merge/importCacheFileTPO.py
|
||||||
python3 ./test.py -f import_merge/importCacheFileT.py
|
python3 ./test.py -f import_merge/importCacheFileT.py
|
||||||
python3 ./test.py -f import_merge/importDataH2.py
|
python3 ./test.py -f import_merge/importDataH2.py
|
||||||
# python3 ./test.py -f import_merge/importDataHO2.py
|
python3 ./test.py -f import_merge/importDataHO2.py
|
||||||
# python3 ./test.py -f import_merge/importDataHO.py
|
python3 ./test.py -f import_merge/importDataHO.py
|
||||||
python3 ./test.py -f import_merge/importDataHPO.py
|
python3 ./test.py -f import_merge/importDataHPO.py
|
||||||
python3 ./test.py -f import_merge/importDataLastHO.py
|
python3 ./test.py -f import_merge/importDataLastHO.py
|
||||||
python3 ./test.py -f import_merge/importDataLastHPO.py
|
python3 ./test.py -f import_merge/importDataLastHPO.py
|
||||||
|
@ -92,7 +92,7 @@ python3 ./test.py -f import_merge/importDataLastTO.py
|
||||||
python3 ./test.py -f import_merge/importDataLastTPO.py
|
python3 ./test.py -f import_merge/importDataLastTPO.py
|
||||||
python3 ./test.py -f import_merge/importDataLastT.py
|
python3 ./test.py -f import_merge/importDataLastT.py
|
||||||
python3 ./test.py -f import_merge/importDataS.py
|
python3 ./test.py -f import_merge/importDataS.py
|
||||||
# python3 ./test.py -f import_merge/importDataSub.py
|
python3 ./test.py -f import_merge/importDataSub.py
|
||||||
python3 ./test.py -f import_merge/importDataTO.py
|
python3 ./test.py -f import_merge/importDataTO.py
|
||||||
python3 ./test.py -f import_merge/importDataTPO.py
|
python3 ./test.py -f import_merge/importDataTPO.py
|
||||||
python3 ./test.py -f import_merge/importDataT.py
|
python3 ./test.py -f import_merge/importDataT.py
|
||||||
|
@ -133,6 +133,7 @@ python3 ./test.py -f user/pass_len.py
|
||||||
python3 ./test.py -f query/filter.py
|
python3 ./test.py -f query/filter.py
|
||||||
python3 ./test.py -f query/filterAllIntTypes.py
|
python3 ./test.py -f query/filterAllIntTypes.py
|
||||||
python3 ./test.py -f query/filterFloatAndDouble.py
|
python3 ./test.py -f query/filterFloatAndDouble.py
|
||||||
|
python3 ./test.py -f query/querySort.py
|
||||||
|
|
||||||
|
|
||||||
#stream
|
#stream
|
||||||
|
|
|
@ -17,6 +17,7 @@ import time
|
||||||
import datetime
|
import datetime
|
||||||
import inspect
|
import inspect
|
||||||
from util.log import *
|
from util.log import *
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
class TDSql:
|
class TDSql:
|
||||||
|
@ -196,5 +197,39 @@ class TDSql:
|
||||||
tdLog.info("sql:%s, affectedRows:%d == expect:%d" %
|
tdLog.info("sql:%s, affectedRows:%d == expect:%d" %
|
||||||
(self.sql, self.affectedRows, expectAffectedRows))
|
(self.sql, self.affectedRows, expectAffectedRows))
|
||||||
|
|
||||||
|
def checkColumnSorted(self, col, order):
|
||||||
|
frame = inspect.stack()[1]
|
||||||
|
callerModule = inspect.getmodule(frame[0])
|
||||||
|
callerFilename = callerModule.__file__
|
||||||
|
|
||||||
|
if col < 0:
|
||||||
|
tdLog.exit(
|
||||||
|
"%s failed: sql:%s, col:%d is smaller than zero" %
|
||||||
|
(callerFilename, self.sql, col))
|
||||||
|
if col > self.queryCols:
|
||||||
|
tdLog.exit(
|
||||||
|
"%s failed: sql:%s, col:%d is larger than queryCols:%d" %
|
||||||
|
(callerFilename, self.sql, col, self.queryCols))
|
||||||
|
|
||||||
|
matrix = np.array(self.queryResult)
|
||||||
|
list = matrix[:, 0]
|
||||||
|
|
||||||
|
if order == "" or order.upper() == "ASC":
|
||||||
|
if all(sorted(list) == list):
|
||||||
|
tdLog.info("sql:%s, column :%d is sorted in accending order as expected" %
|
||||||
|
(self.sql, col))
|
||||||
|
else:
|
||||||
|
tdLog.exit("%s failed: sql:%s, col:%d is not sorted in accesnind order" %
|
||||||
|
(callerFilename, self.sql, col))
|
||||||
|
elif order.upper() == "DESC":
|
||||||
|
if all(sorted(list, reverse=True) == list):
|
||||||
|
tdLog.info("sql:%s, column :%d is sorted in decending order as expected" %
|
||||||
|
(self.sql, col))
|
||||||
|
else:
|
||||||
|
tdLog.exit("%s failed: sql:%s, col:%d is not sorted in decending order" %
|
||||||
|
(callerFilename, self.sql, col))
|
||||||
|
else:
|
||||||
|
tdLog.exit("%s failed: sql:%s, the order provided for col:%d is not correct" %
|
||||||
|
(callerFilename, self.sql, col))
|
||||||
|
|
||||||
tdSql = TDSql()
|
tdSql = TDSql()
|
||||||
|
|
|
@ -355,7 +355,7 @@ if $data00 != 0 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data01 != 800 then
|
if $data11 != 800 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ step1:
|
||||||
sql create database $db cache 16
|
sql create database $db cache 16
|
||||||
print ====== create tables
|
print ====== create tables
|
||||||
sql use $db
|
sql use $db
|
||||||
|
sql reset query cache
|
||||||
$i = 0
|
$i = 0
|
||||||
$ts = $ts0
|
$ts = $ts0
|
||||||
$tb = $tbPrefix . $i
|
$tb = $tbPrefix . $i
|
||||||
|
|
|
@ -352,7 +352,7 @@ if $data07 != nchar0 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
sql select max(c1), min(c2), avg(c3), sum(c5), spread(c6), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and c1 > 1 and c2 < 9 and c3 > 2 and c4 < 8 and c5 > 3 and c6 < 7 and c7 > 0 and c8 like '%5' and t1 > 3 and t1 < 6 limit 1 offset 0;
|
sql select max(c1), min(c2), avg(c3), sum(c5), spread(c6), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and c1 > 1 and c2 < 9 and c3 > 2 and c4 < 8 and c5 > 3 and c6 < 7 and c7 != 0 and c8 like '%5' and t1 > 3 and t1 < 6 limit 1 offset 0;
|
||||||
if $rows != 1 then
|
if $rows != 1 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
|
@ -198,15 +198,15 @@ if $rows != 100 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
sql select * from $mt where tgcol > '0'
|
#sql select * from $mt where tgcol > '0'
|
||||||
#print rows = $rows
|
##print rows = $rows
|
||||||
if $rows != 100 then
|
#if $rows != 100 then
|
||||||
return -1
|
# return -1
|
||||||
endi
|
#endi
|
||||||
#print $data03
|
##print $data03
|
||||||
if $data03 != 1 then
|
#if $data03 != 1 then
|
||||||
return -1
|
# return -1
|
||||||
endi
|
#endi
|
||||||
|
|
||||||
# cumulative query with nchar tag filtering
|
# cumulative query with nchar tag filtering
|
||||||
sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tgcol = '1'
|
sql select count(tbcol1), avg(tbcol1), sum(tbcol1), min(tbcol1), max(tbcol1), first(tbcol1), last(tbcol1) from $mt where tgcol = '1'
|
||||||
|
|
|
@ -40,42 +40,42 @@
|
||||||
#run general/parser/nchar.sim
|
#run general/parser/nchar.sim
|
||||||
#sleep 2000
|
#sleep 2000
|
||||||
##run general/parser/null_char.sim
|
##run general/parser/null_char.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/single_row_in_tb.sim
|
run general/parser/single_row_in_tb.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/select_from_cache_disk.sim
|
run general/parser/select_from_cache_disk.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/selectResNum.sim
|
run general/parser/selectResNum.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/mixed_blocks.sim
|
run general/parser/mixed_blocks.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/limit1.sim
|
run general/parser/limit1.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/limit.sim
|
run general/parser/limit.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/limit1_tblocks100.sim
|
run general/parser/limit1_tblocks100.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/select_across_vnodes.sim
|
run general/parser/select_across_vnodes.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/slimit1.sim
|
run general/parser/slimit1.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/tbnameIn.sim
|
run general/parser/tbnameIn.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
run general/parser/projection_limit_offset.sim
|
run general/parser/projection_limit_offset.sim
|
||||||
sleep 2000
|
sleep 2000
|
||||||
run general/parser/limit2.sim
|
run general/parser/limit2.sim
|
||||||
sleep 2000
|
sleep 2000
|
||||||
#run general/parser/fill.sim
|
run general/parser/fill.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/fill_stb.sim
|
run general/parser/fill_stb.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/where.sim
|
run general/parser/where.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/slimit.sim
|
run general/parser/slimit.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/select_with_tags.sim
|
run general/parser/select_with_tags.sim
|
||||||
#sleep 2000
|
sleep 2000
|
||||||
#run general/parser/interp.sim
|
run general/parser/interp.sim
|
||||||
sleep 2000
|
sleep 2000
|
||||||
run general/parser/tags_dynamically_specifiy.sim
|
run general/parser/tags_dynamically_specifiy.sim
|
||||||
sleep 2000
|
sleep 2000
|
||||||
|
|
|
@ -159,6 +159,21 @@ cd ../../../debug; make
|
||||||
./test.sh -f general/stable/values.sim
|
./test.sh -f general/stable/values.sim
|
||||||
./test.sh -f general/stable/vnode3.sim
|
./test.sh -f general/stable/vnode3.sim
|
||||||
|
|
||||||
|
#./test.sh -f general/stream/metrics_1.sim
|
||||||
|
#./test.sh -f general/stream/metrics_del.sim
|
||||||
|
#./test.sh -f general/stream/metrics_n.sim
|
||||||
|
#./test.sh -f general/stream/metrics_replica1_vnoden.sim
|
||||||
|
#./test.sh -f general/stream/new_stream.sim
|
||||||
|
#./test.sh -f general/stream/restart_stream.sim
|
||||||
|
#./test.sh -f general/stream/stream_1.sim
|
||||||
|
#./test.sh -f general/stream/stream_2.sim
|
||||||
|
#./test.sh -f general/stream/stream_3.sim
|
||||||
|
#./test.sh -f general/stream/stream_restart.sim
|
||||||
|
#./test.sh -f general/stream/table_1.sim
|
||||||
|
#./test.sh -f general/stream/table_del.sim
|
||||||
|
#./test.sh -f general/stream/table_n.sim
|
||||||
|
#./test.sh -f general/stream/table_replica1_vnoden.sim
|
||||||
|
|
||||||
./test.sh -f general/table/autocreate.sim
|
./test.sh -f general/table/autocreate.sim
|
||||||
./test.sh -f general/table/basic1.sim
|
./test.sh -f general/table/basic1.sim
|
||||||
./test.sh -f general/table/basic2.sim
|
./test.sh -f general/table/basic2.sim
|
||||||
|
|
|
@ -70,9 +70,9 @@ run general/http/restful_insert.sim
|
||||||
run general/http/restful_limit.sim
|
run general/http/restful_limit.sim
|
||||||
run general/http/restful_full.sim
|
run general/http/restful_full.sim
|
||||||
run general/http/prepare.sim
|
run general/http/prepare.sim
|
||||||
# run general/http/telegraf.sim
|
run general/http/telegraf.sim
|
||||||
# run general/http/grafana_bug.sim
|
run general/http/grafana_bug.sim
|
||||||
# run general/http/grafana.sim
|
run general/http/grafana.sim
|
||||||
run general/import/basic.sim
|
run general/import/basic.sim
|
||||||
run general/import/commit.sim
|
run general/import/commit.sim
|
||||||
run general/import/large.sim
|
run general/import/large.sim
|
||||||
|
@ -102,32 +102,32 @@ run general/parser/import_commit1.sim
|
||||||
run general/parser/import_commit2.sim
|
run general/parser/import_commit2.sim
|
||||||
run general/parser/import_commit3.sim
|
run general/parser/import_commit3.sim
|
||||||
run general/parser/insert_tb.sim
|
run general/parser/insert_tb.sim
|
||||||
# run general/parser/first_last.sim
|
run general/parser/first_last.sim
|
||||||
#unsupport run general/parser/import_file.sim
|
#unsupport run general/parser/import_file.sim
|
||||||
# run general/parser/lastrow.sim
|
run general/parser/lastrow.sim
|
||||||
run general/parser/nchar.sim
|
run general/parser/nchar.sim
|
||||||
#unsupport run general/parser/null_char.sim
|
#unsupport run general/parser/null_char.sim
|
||||||
# run general/parser/single_row_in_tb.sim
|
run general/parser/single_row_in_tb.sim
|
||||||
run general/parser/select_from_cache_disk.sim
|
run general/parser/select_from_cache_disk.sim
|
||||||
# run general/parser/limit.sim
|
run general/parser/limit.sim
|
||||||
# run general/parser/limit1.sim
|
# run general/parser/limit1.sim
|
||||||
# run general/parser/limit1_tblocks100.sim
|
run general/parser/limit1_tblocks100.sim
|
||||||
# run general/parser/mixed_blocks.sim
|
run general/parser/mixed_blocks.sim
|
||||||
# run general/parser/selectResNum.sim
|
# run general/parser/selectResNum.sim
|
||||||
run general/parser/select_across_vnodes.sim
|
run general/parser/select_across_vnodes.sim
|
||||||
run general/parser/slimit1.sim
|
run general/parser/slimit1.sim
|
||||||
run general/parser/tbnameIn.sim
|
run general/parser/tbnameIn.sim
|
||||||
run general/parser/binary_escapeCharacter.sim
|
run general/parser/binary_escapeCharacter.sim
|
||||||
# run general/parser/projection_limit_offset.sim
|
run general/parser/projection_limit_offset.sim
|
||||||
run general/parser/limit2.sim
|
run general/parser/limit2.sim
|
||||||
# run general/parser/slimit.sim
|
run general/parser/slimit.sim
|
||||||
run general/parser/fill.sim
|
run general/parser/fill.sim
|
||||||
# run general/parser/fill_stb.sim
|
run general/parser/fill_stb.sim
|
||||||
# run general/parser/interp.sim
|
# run general/parser/interp.sim
|
||||||
# run general/parser/where.sim
|
# run general/parser/where.sim
|
||||||
#unsupport run general/parser/join.sim
|
#unsupport run general/parser/join.sim
|
||||||
#unsupport run general/parser/join_multivnode.sim
|
#unsupport run general/parser/join_multivnode.sim
|
||||||
# run general/parser/select_with_tags.sim
|
run general/parser/select_with_tags.sim
|
||||||
#unsupport run general/parser/groupby.sim
|
#unsupport run general/parser/groupby.sim
|
||||||
#unsupport run general/parser/bug.sim
|
#unsupport run general/parser/bug.sim
|
||||||
#unsupport run general/parser/tags_dynamically_specifiy.sim
|
#unsupport run general/parser/tags_dynamically_specifiy.sim
|
||||||
|
@ -142,7 +142,7 @@ run general/stable/dnode3.sim
|
||||||
run general/stable/metrics.sim
|
run general/stable/metrics.sim
|
||||||
run general/stable/values.sim
|
run general/stable/values.sim
|
||||||
run general/stable/vnode3.sim
|
run general/stable/vnode3.sim
|
||||||
# run general/table/autocreate.sim
|
run general/table/autocreate.sim
|
||||||
run general/table/basic1.sim
|
run general/table/basic1.sim
|
||||||
run general/table/basic2.sim
|
run general/table/basic2.sim
|
||||||
run general/table/basic3.sim
|
run general/table/basic3.sim
|
||||||
|
@ -166,7 +166,7 @@ run general/table/int.sim
|
||||||
run general/table/limit.sim
|
run general/table/limit.sim
|
||||||
run general/table/smallint.sim
|
run general/table/smallint.sim
|
||||||
run general/table/table_len.sim
|
run general/table/table_len.sim
|
||||||
# run general/table/table.sim
|
run general/table/table.sim
|
||||||
run general/table/tinyint.sim
|
run general/table/tinyint.sim
|
||||||
run general/table/vgroup.sim
|
run general/table/vgroup.sim
|
||||||
run general/tag/3.sim
|
run general/tag/3.sim
|
||||||
|
@ -214,6 +214,7 @@ run general/vector/table_time.sim
|
||||||
run general/stream/stream_1.sim
|
run general/stream/stream_1.sim
|
||||||
run general/stream/stream_2.sim
|
run general/stream/stream_2.sim
|
||||||
run general/stream/stream_3.sim
|
run general/stream/stream_3.sim
|
||||||
|
run general/stream/stream_restart.sim
|
||||||
run general/stream/table_1.sim
|
run general/stream/table_1.sim
|
||||||
run general/stream/metrics_1.sim
|
run general/stream/metrics_1.sim
|
||||||
run general/stream/table_n.sim
|
run general/stream/table_n.sim
|
||||||
|
|
|
@ -41,7 +41,7 @@ if [ "$2" != "python" ]; then
|
||||||
elif [ "$1" == "full" ]; then
|
elif [ "$1" == "full" ]; then
|
||||||
echo "### run TSIM full test ###"
|
echo "### run TSIM full test ###"
|
||||||
runSimCaseOneByOne fullGeneralSuite.sim
|
runSimCaseOneByOne fullGeneralSuite.sim
|
||||||
else
|
elif [ "$1" == "smoke" ] || [ -z "$1" ]; then
|
||||||
echo "### run TSIM smoke test ###"
|
echo "### run TSIM smoke test ###"
|
||||||
runSimCaseOneByOne basicSuite.sim
|
runSimCaseOneByOne basicSuite.sim
|
||||||
fi
|
fi
|
||||||
|
@ -77,7 +77,7 @@ if [ "$2" != "sim" ]; then
|
||||||
elif [ "$1" == "full" ]; then
|
elif [ "$1" == "full" ]; then
|
||||||
echo "### run Python full test ###"
|
echo "### run Python full test ###"
|
||||||
runPyCaseOneByOne fulltest.sh
|
runPyCaseOneByOne fulltest.sh
|
||||||
else
|
elif [ "$1" == "smoke" ] || [ -z "$1" ]; then
|
||||||
echo "### run Python smoke test ###"
|
echo "### run Python smoke test ###"
|
||||||
runPyCaseOneByOne smoketest.sh
|
runPyCaseOneByOne smoketest.sh
|
||||||
fi
|
fi
|
||||||
|
|
Loading…
Reference in New Issue