Merge remote-tracking branch 'origin/3.0' into feature/dnode

This commit is contained in:
Shengliang Guan 2022-04-29 10:54:00 +08:00
commit fe7f246ccd
78 changed files with 2627 additions and 1598 deletions

View File

@ -228,7 +228,7 @@ endif()
# iconv
if(${BUILD_WITH_ICONV})
add_subdirectory(iconv EXCLUDE_FROM_ALL)
add_library(iconv STATIC iconv/win_iconv.c)
endif(${BUILD_WITH_ICONV})
# wingetopt

View File

@ -25,7 +25,7 @@ int32_t init_env() {
return -1;
}
TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 2");
TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
if (taos_errno(pRes) != 0) {
printf("error in create db, reason:%s\n", taos_errstr(pRes));
return -1;
@ -78,11 +78,14 @@ int32_t create_stream() {
taos_free_result(pRes);
/*const char* sql = "select min(k), max(k), sum(k) from tu1";*/
const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";
/*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/
/*const char* sql = "select sum(k) from tu1 interval(10m)";*/
pRes = tmq_create_stream(pConn, "stream1", "out1", sql);
/*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/
pRes = taos_query(
pConn,
"create stream stream1 trigger window_close as select min(k), max(k), sum(k) as sum_of_k from tu1 interval(10m)");
if (taos_errno(pRes) != 0) {
printf("failed to create stream out1, reason:%s\n", taos_errstr(pRes));
printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);

View File

@ -86,9 +86,9 @@ typedef struct taosField {
} TAOS_FIELD;
#ifdef WINDOWS
#define DLL_EXPORT __declspec(dllexport)
#define DLL_EXPORT __declspec(dllexport)
#else
#define DLL_EXPORT
#define DLL_EXPORT
#endif
typedef void (*__taos_async_fn_t)(void *param, TAOS_RES *, int code);
@ -123,42 +123,42 @@ DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...);
DLL_EXPORT setConfRet taos_set_config(const char *config);
DLL_EXPORT int taos_init(void);
DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port);
DLL_EXPORT TAOS *taos_connect_l(const char *ip, int ipLen, const char *user, int userLen, const char *pass, int passLen,
DLL_EXPORT TAOS *taos_connect_l(const char *ip, int ipLen, const char *user, int userLen, const char *pass, int passLen,
const char *db, int dbLen, uint16_t port);
DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
DLL_EXPORT void taos_close(TAOS *taos);
DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
DLL_EXPORT void taos_close(TAOS *taos);
const char *taos_data_type(int type);
const char *taos_data_type(int type);
DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos);
DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length);
DLL_EXPORT int taos_stmt_set_tbname_tags(TAOS_STMT *stmt, const char *name, TAOS_MULTI_BIND *tags);
DLL_EXPORT int taos_stmt_set_tbname(TAOS_STMT *stmt, const char *name);
DLL_EXPORT int taos_stmt_set_sub_tbname(TAOS_STMT *stmt, const char *name);
DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos);
DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length);
DLL_EXPORT int taos_stmt_set_tbname_tags(TAOS_STMT *stmt, const char *name, TAOS_MULTI_BIND *tags);
DLL_EXPORT int taos_stmt_set_tbname(TAOS_STMT *stmt, const char *name);
DLL_EXPORT int taos_stmt_set_sub_tbname(TAOS_STMT *stmt, const char *name);
DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert);
DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums);
DLL_EXPORT int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes);
DLL_EXPORT int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind);
DLL_EXPORT int taos_stmt_bind_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind);
DLL_EXPORT int taos_stmt_bind_single_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind, int colIdx);
DLL_EXPORT int taos_stmt_add_batch(TAOS_STMT *stmt);
DLL_EXPORT int taos_stmt_execute(TAOS_STMT *stmt);
DLL_EXPORT TAOS_RES *taos_stmt_use_result(TAOS_STMT *stmt);
DLL_EXPORT int taos_stmt_close(TAOS_STMT *stmt);
DLL_EXPORT char *taos_stmt_errstr(TAOS_STMT *stmt);
DLL_EXPORT int taos_stmt_affected_rows(TAOS_STMT *stmt);
DLL_EXPORT int taos_stmt_affected_rows_once(TAOS_STMT *stmt);
DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert);
DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums);
DLL_EXPORT int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes);
DLL_EXPORT int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind);
DLL_EXPORT int taos_stmt_bind_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind);
DLL_EXPORT int taos_stmt_bind_single_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind, int colIdx);
DLL_EXPORT int taos_stmt_add_batch(TAOS_STMT *stmt);
DLL_EXPORT int taos_stmt_execute(TAOS_STMT *stmt);
DLL_EXPORT TAOS_RES *taos_stmt_use_result(TAOS_STMT *stmt);
DLL_EXPORT int taos_stmt_close(TAOS_STMT *stmt);
DLL_EXPORT char *taos_stmt_errstr(TAOS_STMT *stmt);
DLL_EXPORT int taos_stmt_affected_rows(TAOS_STMT *stmt);
DLL_EXPORT int taos_stmt_affected_rows_once(TAOS_STMT *stmt);
DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql);
DLL_EXPORT TAOS_RES *taos_query_l(TAOS *taos, const char *sql, int sqlLen);
DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql);
DLL_EXPORT TAOS_RES *taos_query_l(TAOS *taos, const char *sql, int sqlLen);
DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res);
DLL_EXPORT int taos_result_precision(TAOS_RES *res); // get the time precision of result
DLL_EXPORT void taos_free_result(TAOS_RES *res);
DLL_EXPORT int taos_field_count(TAOS_RES *res);
DLL_EXPORT int taos_num_fields(TAOS_RES *res);
DLL_EXPORT int taos_affected_rows(TAOS_RES *res);
DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res);
DLL_EXPORT int taos_result_precision(TAOS_RES *res); // get the time precision of result
DLL_EXPORT void taos_free_result(TAOS_RES *res);
DLL_EXPORT int taos_field_count(TAOS_RES *res);
DLL_EXPORT int taos_num_fields(TAOS_RES *res);
DLL_EXPORT int taos_affected_rows(TAOS_RES *res);
DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res);
DLL_EXPORT int taos_select_db(TAOS *taos, const char *db);
@ -271,10 +271,8 @@ DLL_EXPORT int64_t tmq_get_response_offset(tmq_message_t *message);
/* --------------------TMPORARY INTERFACE FOR TESTING--------------------- */
#if 0
DLL_EXPORT TAOS_RES *tmq_create_topic(TAOS *taos, const char *name, const char *sql, int sqlLen);
#endif
DLL_EXPORT TAOS_RES *tmq_create_stream(TAOS *taos, const char *streamName, const char *tbName, const char *sql);
#endif
/* ------------------------------ TMQ END -------------------------------- */
#if 1 // Shuduo: temporary enable for app build
typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB *tsub, TAOS_RES *res, void *param, int code);

78
include/common/systable.h Normal file
View File

@ -0,0 +1,78 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "os.h"
#ifndef TDENGINE_SYSTABLE_H
#define TDENGINE_SYSTABLE_H
#define TSDB_INFORMATION_SCHEMA_DB "information_schema"
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
#define TSDB_INS_TABLE_DNODES "dnodes"
#define TSDB_INS_TABLE_MNODES "mnodes"
#define TSDB_INS_TABLE_MODULES "modules"
#define TSDB_INS_TABLE_QNODES "qnodes"
#define TSDB_INS_TABLE_BNODES "bnodes"
#define TSDB_INS_TABLE_SNODES "snodes"
#define TSDB_INS_TABLE_CLUSTER "cluster"
#define TSDB_INS_TABLE_USER_DATABASES "user_databases"
#define TSDB_INS_TABLE_USER_FUNCTIONS "user_functions"
#define TSDB_INS_TABLE_USER_INDEXES "user_indexes"
#define TSDB_INS_TABLE_USER_STABLES "user_stables"
#define TSDB_INS_TABLE_USER_STREAMS "user_streams"
#define TSDB_INS_TABLE_USER_TABLES "user_tables"
#define TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED "user_table_distributed"
#define TSDB_INS_TABLE_USER_USERS "user_users"
#define TSDB_INS_TABLE_LICENCES "grants"
#define TSDB_INS_TABLE_VGROUPS "vgroups"
#define TSDB_INS_TABLE_VNODES "vnodes"
#define TSDB_INS_TABLE_CONFIGS "configs"
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
#define TSDB_PERFS_TABLE_SMAS "smas"
#define TSDB_PERFS_TABLE_SUBSCRIBES "subscribes"
#define TSDB_PERFS_TABLE_CONNECTIONS "connections"
#define TSDB_PERFS_TABLE_QUERIES "queries"
#define TSDB_PERFS_TABLE_TOPICS "topics"
#define TSDB_PERFS_TABLE_CONSUMERS "consumers"
#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "subscriptions"
#define TSDB_PERFS_TABLE_OFFSETS "offsets"
#define TSDB_PERFS_TABLE_TRANS "trans"
#define TSDB_PERFS_TABLE_STREAMS "streams"
typedef struct SSysDbTableSchema {
const char *name;
const int32_t type;
const int32_t bytes;
} SSysDbTableSchema;
typedef struct SSysTableMeta {
const char *name;
const SSysDbTableSchema *schema;
const int32_t colNum;
} SSysTableMeta;
void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size);
void getPerfDbMeta(const SSysTableMeta** pPerfsTableMeta, size_t* size);
#ifdef __cplusplus
}
#endif
#endif // TDENGINE_SYSTABLE_H

View File

@ -65,14 +65,12 @@ typedef struct SDataBlockInfo {
STimeWindow window;
int32_t rows;
int32_t rowSize;
union {
int64_t uid; // from which table of uid, comes from this data block
int64_t blockId;
};
uint64_t groupId; // no need to serialize
int16_t numOfCols;
int16_t hasVarCol;
int16_t capacity;
int64_t uid; // the uid of table, from which current data block comes
int64_t blockId; // block id, generated by physical planner
uint64_t groupId; // no need to serialize
int16_t numOfCols;
int16_t hasVarCol;
int16_t capacity;
} SDataBlockInfo;
typedef struct SSDataBlock {

View File

@ -1038,6 +1038,7 @@ typedef struct {
int8_t compressed;
int32_t compLen;
int32_t numOfRows;
int32_t numOfCols;
char data[];
} SRetrieveTableRsp;
@ -1473,7 +1474,6 @@ _err:
// this message is sent from mnode to mnode(read thread to write thread), so there is no need for serialization or
// deserialization
typedef struct {
int8_t* mqInReb;
SHashObj* rebSubHash; // SHashObj<key, SMqRebSubscribe>
} SMqDoRebalanceMsg;

View File

@ -595,6 +595,34 @@ static FORCE_INLINE int32_t tdSRowSetInfo(SRowBuilder *pBuilder, int32_t nCols,
return TSDB_CODE_SUCCESS;
}
/**
* @brief
*
* @param pBuilder
* @param nCols
* @param nBoundCols use -1 if not available
* @param flen
* @return FORCE_INLINE
*/
static FORCE_INLINE int32_t tdSRowSetTpInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t flen) {
pBuilder->flen = flen;
pBuilder->nCols = nCols;
if (pBuilder->flen <= 0 || pBuilder->nCols <= 0) {
TASSERT(0);
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
#ifdef TD_SUPPORT_BITMAP
// the primary TS key is stored separatedly
pBuilder->nBitmaps = (int16_t)TD_BITMAP_BYTES(pBuilder->nCols - 1);
#else
pBuilder->nBitmaps = 0;
pBuilder->nBoundBitmaps = 0;
#endif
return TSDB_CODE_SUCCESS;
}
/**
* @brief To judge row type: STpRow/SKvRow
*
@ -1376,7 +1404,7 @@ static void tdSRowPrint(STSRow *row, STSchema *pSchema, const char* tag) {
printf("%s >>>", tag);
for (int i = 0; i < pSchema->numOfCols; ++i) {
STColumn *stCol = pSchema->columns + i;
SCellVal sVal = {.valType = 255, .val = NULL};
SCellVal sVal = { 255, NULL};
if (!tdSTSRowIterNext(&iter, stCol->colId, stCol->type, &sVal)) {
break;
}
@ -1385,7 +1413,6 @@ static void tdSRowPrint(STSRow *row, STSchema *pSchema, const char* tag) {
}
printf("\n");
}
#ifdef TROW_ORIGIN_HZ
typedef struct {
uint32_t nRows;

View File

@ -45,6 +45,7 @@ typedef struct SInputData {
typedef struct SOutputData {
int32_t numOfRows;
int32_t numOfCols;
int8_t compressed;
char* pData;
bool queryEnd;

View File

@ -33,6 +33,7 @@ typedef struct SReadHandle {
void* meta;
void* config;
void* vnode;
void* mnd;
} SReadHandle;
#define STREAM_DATA_TYPE_SUBMIT_BLOCK 0x1

View File

@ -165,7 +165,7 @@ typedef struct SInputColumnInfoData {
SColumnInfoData *pPTS; // primary timestamp column
SColumnInfoData **pData;
SColumnDataAgg **pColumnDataAgg;
uint64_t uid; // table uid
uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions.
} SInputColumnInfoData;
// sql function runtime context

View File

@ -40,6 +40,7 @@ typedef enum EFunctionType {
FUNCTION_TYPE_STDDEV,
FUNCTION_TYPE_SUM,
FUNCTION_TYPE_TWA,
FUNCTION_TYPE_HISTOGRAM,
// nonstandard SQL function
FUNCTION_TYPE_BOTTOM = 500,

View File

@ -35,19 +35,6 @@ enum {
STREAM_CREATED_BY__SMA,
};
#if 0
// pipe -> fetch/pipe queue
// merge -> merge queue
// write -> write queue
enum {
TASK_DISPATCH_MSG__SND_PIPE = 1,
TASK_DISPATCH_MSG__SND_MERGE,
TASK_DISPATCH_MSG__VND_PIPE,
TASK_DISPATCH_MSG__VND_MERGE,
TASK_DISPATCH_MSG__VND_WRITE,
};
#endif
typedef struct {
int32_t nodeId; // 0 for snode
SEpSet epSet;
@ -100,10 +87,6 @@ typedef struct {
int8_t reserved;
} STaskSinkFetch;
typedef struct {
int8_t reserved;
} STaskSinkShow;
enum {
TASK_SOURCE__SCAN = 1,
TASK_SOURCE__PIPE,
@ -128,7 +111,6 @@ enum {
TASK_SINK__TABLE,
TASK_SINK__SMA,
TASK_SINK__FETCH,
TASK_SINK__SHOW,
};
typedef struct {
@ -155,7 +137,6 @@ typedef struct {
STaskSinkTb tbSink;
STaskSinkSma smaSink;
STaskSinkFetch fetchSink;
STaskSinkShow showSink;
};
// dispatch

View File

@ -97,43 +97,6 @@ extern const int32_t TYPE_BYTES[15];
#define TSDB_TIME_PRECISION_MICRO_DIGITS 16
#define TSDB_TIME_PRECISION_NANO_DIGITS 19
#define TSDB_INFORMATION_SCHEMA_DB "information_schema"
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
#define TSDB_INS_TABLE_DNODES "dnodes"
#define TSDB_INS_TABLE_MNODES "mnodes"
#define TSDB_INS_TABLE_MODULES "modules"
#define TSDB_INS_TABLE_QNODES "qnodes"
#define TSDB_INS_TABLE_BNODES "bnodes"
#define TSDB_INS_TABLE_SNODES "snodes"
#define TSDB_INS_TABLE_CLUSTER "cluster"
#define TSDB_INS_TABLE_USER_DATABASES "user_databases"
#define TSDB_INS_TABLE_USER_FUNCTIONS "user_functions"
#define TSDB_INS_TABLE_USER_INDEXES "user_indexes"
#define TSDB_INS_TABLE_USER_STABLES "user_stables"
#define TSDB_INS_TABLE_USER_STREAMS "user_streams"
#define TSDB_INS_TABLE_USER_TABLES "user_tables"
#define TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED "user_table_distributed"
#define TSDB_INS_TABLE_USER_USERS "user_users"
#define TSDB_INS_TABLE_LICENCES "grants"
#define TSDB_INS_TABLE_VGROUPS "vgroups"
#define TSDB_INS_TABLE_CONSUMERS "consumers"
#define TSDB_INS_TABLE_SUBSCRIBES "subscribes"
#define TSDB_INS_TABLE_TRANS "trans"
#define TSDB_INS_TABLE_SMAS "smas"
#define TSDB_INS_TABLE_CONFIGS "configs"
#define TSDB_INS_TABLE_CONNS "connections"
#define TSDB_INS_TABLE_QUERIES "queries"
#define TSDB_INS_TABLE_VNODES "vnodes"
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
#define TSDB_PERFS_TABLE_CONNECTIONS "connections"
#define TSDB_PERFS_TABLE_QUERIES "queries"
#define TSDB_PERFS_TABLE_TOPICS "topics"
#define TSDB_PERFS_TABLE_CONSUMERS "consumers"
#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "subscriptions"
#define TSDB_PERFS_TABLE_OFFSETS "offsets"
#define TSDB_PERFS_TABLE_STREAMS "streams"
#define TSDB_INDEX_TYPE_SMA "SMA"
#define TSDB_INDEX_TYPE_FULLTEXT "FULLTEXT"
@ -205,16 +168,6 @@ typedef enum ELogicConditionType {
LOGIC_COND_TYPE_NOT,
} ELogicConditionType;
#define FUNCTION_CEIL 4500
#define FUNCTION_FLOOR 4501
#define FUNCTION_ABS 4502
#define FUNCTION_ROUND 4503
#define FUNCTION_LENGTH 4800
#define FUNCTION_CONCAT 4801
#define FUNCTION_LTRIM 4802
#define FUNCTION_RTRIM 4803
#define TSDB_NAME_DELIMITER_LEN 1
#define TSDB_UNI_LEN 24
@ -417,21 +370,9 @@ typedef enum ELogicConditionType {
* 1. ordinary sub query for select * from super_table
* 2. all sqlobj generated by createSubqueryObj with this flag
*/
#define TSDB_QUERY_TYPE_SUBQUERY 0x02u
#define TSDB_QUERY_TYPE_STABLE_SUBQUERY 0x04u // two-stage subquery for super table
#define TSDB_QUERY_TYPE_TABLE_QUERY 0x08u // query ordinary table; below only apply to client side
#define TSDB_QUERY_TYPE_STABLE_QUERY 0x10u // query on super table
#define TSDB_QUERY_TYPE_JOIN_QUERY 0x20u // join query
#define TSDB_QUERY_TYPE_PROJECTION_QUERY 0x40u // select *,columns... query
#define TSDB_QUERY_TYPE_JOIN_SEC_STAGE 0x80u // join sub query at the second stage
#define TSDB_QUERY_TYPE_TAG_FILTER_QUERY 0x400u
#define TSDB_QUERY_TYPE_INSERT 0x100u // insert type
#define TSDB_QUERY_TYPE_MULTITABLE_QUERY 0x200u
#define TSDB_QUERY_TYPE_FILE_INSERT 0x400u // insert data from file
#define TSDB_QUERY_TYPE_STMT_INSERT 0x800u // stmt insert type
#define TSDB_QUERY_TYPE_NEST_SUBQUERY 0x1000u // nested sub query
#define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0)
#define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type))

View File

@ -667,7 +667,7 @@ tmq_resp_err_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
if (code != 0) goto FAIL;
while (TSDB_CODE_MND_CONSUMER_NOT_READY == tmqAskEp(tmq, false)) {
tscDebug("not ready, retry");
tscDebug("consumer not ready, retry");
taosMsleep(500);
}
@ -693,6 +693,7 @@ void tmq_conf_set_offset_commit_cb(tmq_conf_t* conf, tmq_commit_cb* cb) {
conf->commitCb = cb;
}
#if 0
TAOS_RES* tmq_create_stream(TAOS* taos, const char* streamName, const char* tbName, const char* sql) {
STscObj* pTscObj = (STscObj*)taos;
SRequestObj* pRequest = NULL;
@ -777,6 +778,7 @@ _return:
return pRequest;
}
#endif
#if 0
int32_t tmqGetSkipLogNum(tmq_message_t* tmq_message) {

View File

@ -659,10 +659,15 @@ TEST(testCase, agg_query_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
TAOS_RES* pRes = taos_query(pConn, "use abc1");
TAOS_RES* pRes = taos_query(pConn, "use db");
if (taos_errno(pRes) != 0) {
printf("failed to use db, reason:%s\n", taos_errstr(pRes));
taos_free_result(pRes);
ASSERT_TRUE(false);
}
taos_free_result(pRes);
pRes = taos_query(pConn, "select now() from m1");
pRes = taos_query(pConn, "select tbname from st1");
if (taos_errno(pRes) != 0) {
printf("failed to select from table, reason:%s\n", taos_errstr(pRes));
taos_free_result(pRes);

View File

@ -0,0 +1,340 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "systable.h"
#include "tdef.h"
#include "types.h"
#include "taos.h"
#define SYSTABLE_SCH_TABLE_NAME_LEN ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
#define SYSTABLE_SCH_DB_NAME_LEN ((TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
#define SYSTABLE_SCH_COL_NAME_LEN ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
static const SSysDbTableSchema dnodesSchema[] = {
{.name = "id", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "max_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema mnodesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "role_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SSysDbTableSchema modulesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "endpoint", .bytes = 134 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "module", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema qnodesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SSysDbTableSchema snodesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SSysDbTableSchema bnodesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SSysDbTableSchema clusterSchema[] = {
{.name = "id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SSysDbTableSchema userDBSchema[] = {
{.name = "name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "vgroups", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "strict", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "duration", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "keep", .bytes = 24 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "pages", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "wal", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "fsync", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "cachelast", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
// {.name = "update", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, // disable update
};
static const SSysDbTableSchema userFuncSchema[] = {
{.name = "name", .bytes = TSDB_FUNC_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "comment", .bytes = PATH_MAX - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "aggregate", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "output_type", .bytes = TSDB_TYPE_STR_MAX_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "code_len", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "bufsize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
};
static const SSysDbTableSchema userIdxSchema[] = {
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "index_database", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "index_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "column_name", .bytes = SYSTABLE_SCH_COL_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "index_type", .bytes = 10, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "index_extensions", .bytes = 256, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema userStbsSchema[] = {
{.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "tags", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "last_update", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "table_comment", .bytes = 1024 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema userStreamsSchema[] = {
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "user_name", .bytes = 23, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "dest_table", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "sql", .bytes = 1024, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema userTblsSchema[] = {
{.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "uid", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "ttl", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "table_comment", .bytes = 512 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "type", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema userTblDistSchema[] = {
{.name = "db_name", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "table_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "distributed_histogram", .bytes = 500 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "min_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "max_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "avg_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "stddev_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "blocks", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "storage_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "compression_ratio", .bytes = 8, .type = TSDB_DATA_TYPE_DOUBLE},
{.name = "rows_in_mem", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "seek_header_time", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
};
static const SSysDbTableSchema userUsersSchema[] = {
{.name = "name", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "privilege", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SSysDbTableSchema grantsSchema[] = {
{.name = "version", .bytes = 8 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "expire time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "storage(GB)", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "databases", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "users", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "accounts", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "connections", .bytes = 11 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "streams", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "cpu cores", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "speed(PPS)", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema vgroupsSchema[] = {
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "tables", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "v1_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "v1_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "v2_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "v2_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "v3_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
};
static const SSysDbTableSchema smaSchema[] = {
{.name = "sma_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema transSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "type", .bytes = TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "last_error", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema configSchema[] = {
{.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "value", .bytes = TSDB_CONIIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema)},
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema)},
{TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema)},
{TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema)},
{TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)},
{TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)},
{TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema)},
{TSDB_INS_TABLE_USER_DATABASES, userDBSchema, tListLen(userDBSchema)},
{TSDB_INS_TABLE_USER_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)},
{TSDB_INS_TABLE_USER_INDEXES, userIdxSchema, tListLen(userIdxSchema)},
{TSDB_INS_TABLE_USER_STABLES, userStbsSchema, tListLen(userStbsSchema)},
{TSDB_INS_TABLE_USER_STREAMS, userStreamsSchema, tListLen(userStreamsSchema)},
{TSDB_INS_TABLE_USER_TABLES, userTblsSchema, tListLen(userTblsSchema)},
{TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)},
{TSDB_INS_TABLE_USER_USERS, userUsersSchema, tListLen(userUsersSchema)},
{TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema)},
{TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema)},
{TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema)},
};
static const SSysDbTableSchema connectionsSchema[] = {
{.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT},
{.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "program", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "login_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SSysDbTableSchema topicSchema[] = {
{.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
// TODO config
};
static const SSysDbTableSchema consumerSchema[] = {
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "app_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "topics", .bytes = TSDB_SHOW_LIST_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SSysDbTableSchema subscriptionSchema[] = {
{.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
};
static const SSysDbTableSchema offsetSchema[] = {
{.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "committed_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "current_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "skip_log_cnt", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
};
static const SSysDbTableSchema querySchema[] = {
{.name = "query_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "connId", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "qid", .bytes = 22 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "time", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "sql_obj_id", .bytes = QUERY_OBJ_ID_SIZE + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "ep", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "stable_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
{.name = "sub_queries", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "sub_query_info", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema streamSchema[] = {
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
};
static const SSysTableMeta perfsMeta[] = {
{TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema)},
{TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema)},
{TSDB_PERFS_TABLE_TOPICS, topicSchema, tListLen(topicSchema)},
{TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema)},
{TSDB_PERFS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema)},
{TSDB_PERFS_TABLE_OFFSETS, offsetSchema, tListLen(offsetSchema)},
{TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema)},
{TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema)},
{TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)},
};
void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size) {
*pInfosTableMeta = infosMeta;
*size = tListLen(infosMeta);
}
void getPerfDbMeta(const SSysTableMeta** pPerfsTableMeta, size_t* size) {
*pPerfsTableMeta = perfsMeta;
*size = tListLen(perfsMeta);
}

View File

@ -44,6 +44,11 @@ SSdbRow *mndConsumerActionDecode(SSdbRaw *pRaw);
int32_t mndSetConsumerCommitLogs(SMnode *pMnode, STrans *pTrans, SMqConsumerObj *pConsumer);
bool mndRebTryStart();
void mndRebEnd();
void mndRebCntInc();
void mndRebCntDec();
#ifdef __cplusplus
}
#endif

View File

@ -388,15 +388,12 @@ typedef struct {
int8_t type;
int8_t replica;
int16_t numOfColumns;
int32_t rowSize;
int32_t numOfRows;
void* pIter;
SMnode* pMnode;
STableMetaRsp* pMeta;
bool sysDbRsp;
char db[TSDB_DB_FNAME_LEN];
int16_t offset[TSDB_MAX_COLUMNS];
int32_t bytes[TSDB_MAX_COLUMNS];
} SShowObj;
typedef struct {

View File

@ -22,18 +22,6 @@
extern "C" {
#endif
typedef struct SInfosTableSchema {
const char *name;
const int32_t type;
const int32_t bytes;
} SInfosTableSchema;
typedef struct SInfosTableMeta {
const char *name;
const SInfosTableSchema *schema;
const int32_t colNum;
} SInfosTableMeta;
int32_t mndInitInfos(SMnode *pMnode);
void mndCleanupInfos(SMnode *pMnode);
int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp);

View File

@ -22,18 +22,6 @@
extern "C" {
#endif
typedef struct SPerfsTableSchema {
char *name;
int32_t type;
int32_t bytes;
} SPerfsTableSchema;
typedef struct SPerfsTableMeta {
char *name;
const SPerfsTableSchema *schema;
int32_t colNum;
} SPerfsTableMeta;
int32_t mndBuildPerfsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp);
int32_t mndInitPerfs(SMnode *pMnode);
void mndCleanupPerfs(SMnode *pMnode);

View File

@ -17,6 +17,7 @@
#define _TD_MND_SHOW_H_
#include "mndInt.h"
#include "systable.h"
#ifdef __cplusplus
extern "C" {

View File

@ -36,8 +36,8 @@ typedef struct {
typedef enum {
TEST_TRANS_START_FUNC = 1,
TEST_TRANS_STOP_FUNC = 2,
CONSUME_TRANS_START_FUNC = 3,
CONSUME_TRANS_STOP_FUNC = 4,
MQ_REB_TRANS_START_FUNC = 3,
MQ_REB_TRANS_STOP_FUNC = 4,
} ETrnFuncType;
typedef void (*TransCbFp)(SMnode *pMnode, void *param, int32_t paramLen);

View File

@ -453,7 +453,7 @@ static int32_t mndRetrieveBnodes(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *p
colDataAppend(pColInfo, numOfRows, (const char *)&pObj->id, false);
char buf[TSDB_EP_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(buf, pObj->pDnode->ep, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(buf, pObj->pDnode->ep, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, buf, false);

View File

@ -194,7 +194,7 @@ static int32_t mndRetrieveClusters(SNodeMsg *pMsg, SShowObj *pShow, SSDataBlock
colDataAppend(pColInfo, numOfRows, (const char*) &pCluster->id, false);
char buf[tListLen(pCluster->name) + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(buf, pCluster->name, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(buf, pCluster->name, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, buf, false);

View File

@ -35,7 +35,7 @@
#define MND_CONSUMER_LOST_HB_CNT 3
static int8_t mqInRebFlag = 0;
static int8_t mqRebLock = 0;
static const char *mndConsumerStatusName(int status);
@ -75,6 +75,17 @@ int32_t mndInitConsumer(SMnode *pMnode) {
void mndCleanupConsumer(SMnode *pMnode) {}
bool mndRebTryStart() {
int8_t old = atomic_val_compare_exchange_8(&mqRebLock, 0, 1);
return old == 0;
}
void mndRebEnd() { atomic_sub_fetch_8(&mqRebLock, 1); }
void mndRebCntInc() { atomic_add_fetch_8(&mqRebLock, 1); }
void mndRebCntDec() { atomic_sub_fetch_8(&mqRebLock, 1); }
static int32_t mndProcessConsumerLostMsg(SNodeMsg *pMsg) {
SMnode *pMnode = pMsg->pNode;
SMqConsumerLostMsg *pLostMsg = pMsg->rpcMsg.pCont;
@ -143,8 +154,7 @@ static int32_t mndProcessMqTimerMsg(SNodeMsg *pMsg) {
void *pIter = NULL;
// rebalance cannot be parallel
int8_t old = atomic_val_compare_exchange_8(&mqInRebFlag, 0, 1);
if (old != 0) {
if (!mndRebTryStart()) {
mInfo("mq rebalance already in progress, do nothing");
return 0;
}
@ -152,7 +162,6 @@ static int32_t mndProcessMqTimerMsg(SNodeMsg *pMsg) {
SMqDoRebalanceMsg *pRebMsg = rpcMallocCont(sizeof(SMqDoRebalanceMsg));
pRebMsg->rebSubHash = taosHashInit(64, MurmurHash3_32, true, HASH_NO_LOCK);
// TODO set cleanfp
pRebMsg->mqInReb = &mqInRebFlag;
// iterate all consumers, find all modification
while (1) {
@ -223,7 +232,7 @@ static int32_t mndProcessMqTimerMsg(SNodeMsg *pMsg) {
taosHashCleanup(pRebMsg->rebSubHash);
rpcFreeCont(pRebMsg);
mTrace("mq rebalance finished, no modification");
atomic_store_8(&mqInRebFlag, 0);
mndRebEnd();
}
return 0;
}

View File

@ -23,6 +23,7 @@
#include "mndTrans.h"
#include "mndUser.h"
#include "mndVgroup.h"
#include "systable.h"
#define DB_VER_NUMBER 1
#define DB_RESERVE_SIZE 64
@ -1192,7 +1193,7 @@ static int32_t mndProcessUseDbReq(SNodeMsg *pReq) {
}
char *p = strchr(usedbReq.db, '.');
if (p && 0 == strcmp(p + 1, TSDB_INFORMATION_SCHEMA_DB)) {
if (p && ((0 == strcmp(p + 1, TSDB_INFORMATION_SCHEMA_DB) || (0 == strcmp(p + 1, TSDB_PERFORMANCE_SCHEMA_DB))))) {
memcpy(usedbRsp.db, usedbReq.db, TSDB_DB_FNAME_LEN);
int32_t vgVersion = mndGetGlobalVgroupVersion(pMnode);
if (usedbReq.vgVersion < vgVersion) {
@ -1386,12 +1387,13 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
bool sysDb) {
int32_t cols = 0;
char *buf = taosMemoryMalloc(pShow->bytes[cols]);
int32_t bytes = pShow->pMeta->pSchemas[cols].bytes;
char *buf = taosMemoryMalloc(bytes);
const char *name = mndGetDbStr(pDb->name);
if (name != NULL) {
STR_WITH_MAXSIZE_TO_VARSTR(buf, name, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(buf, name, bytes);
} else {
STR_WITH_MAXSIZE_TO_VARSTR(buf, "NULL", pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(buf, "NULL", bytes);
}
char *status = "ready";
@ -1429,7 +1431,6 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.replications, false);
const char *src = pDb->cfg.strict ? "strict" : "nostrict";
char b[9 + VARSTR_HEADER_SIZE] = {0};
STR_WITH_SIZE_TO_VARSTR(b, src, strlen(src));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, rows, (const char *)b, false);
@ -1499,8 +1500,10 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, rows, (const char *)t, false);
// single stable model
int8_t m = 0;
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.numOfStables, false);
colDataAppend(pColInfo, rows, (const char *)&m, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
colDataAppend(pColInfo, rows, (const char *)b, false);

View File

@ -418,6 +418,9 @@ int32_t tEncodeSStreamObj(SCoder *pEncoder, const SStreamObj *pObj) {
if (tEncodeI32(pEncoder, pObj->version) < 0) return -1;
if (tEncodeI8(pEncoder, pObj->status) < 0) return -1;
if (tEncodeI8(pEncoder, pObj->createdBy) < 0) return -1;
if (tEncodeI8(pEncoder, pObj->trigger) < 0) return -1;
if (tEncodeI32(pEncoder, pObj->triggerParam) < 0) return -1;
if (tEncodeI64(pEncoder, pObj->waterMark) < 0) return -1;
if (tEncodeI32(pEncoder, pObj->fixedSinkVgId) < 0) return -1;
if (tEncodeI64(pEncoder, pObj->smaId) < 0) return -1;
if (tEncodeCStr(pEncoder, pObj->sql) < 0) return -1;
@ -464,6 +467,9 @@ int32_t tDecodeSStreamObj(SCoder *pDecoder, SStreamObj *pObj) {
if (tDecodeI32(pDecoder, &pObj->version) < 0) return -1;
if (tDecodeI8(pDecoder, &pObj->status) < 0) return -1;
if (tDecodeI8(pDecoder, &pObj->createdBy) < 0) return -1;
if (tDecodeI8(pDecoder, &pObj->trigger) < 0) return -1;
if (tDecodeI32(pDecoder, &pObj->triggerParam) < 0) return -1;
if (tDecodeI64(pDecoder, &pObj->waterMark) < 0) return -1;
if (tDecodeI32(pDecoder, &pObj->fixedSinkVgId) < 0) return -1;
if (tDecodeI64(pDecoder, &pObj->smaId) < 0) return -1;
if (tDecodeCStrAlloc(pDecoder, &pObj->sql) < 0) return -1;

View File

@ -705,7 +705,7 @@ static int32_t mndRetrieveDnodes(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *p
colDataAppend(pColInfo, numOfRows, (const char *)&pDnode->id, false);
char buf[tListLen(pDnode->ep) + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(buf, pDnode->ep, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(buf, pDnode->ep, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, buf, false);

View File

@ -517,14 +517,14 @@ static int32_t mndRetrieveFuncs(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *pB
cols = 0;
char b1[tListLen(pFunc->name) + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(b1, pFunc->name, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(b1, pFunc->name, pShow->pMeta->pSchemas[cols].bytes);
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)b1, false);
if (pFunc->pComment) {
char *b2 = taosMemoryCalloc(1, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(b2, pFunc->pComment, pShow->bytes[cols]);
char *b2 = taosMemoryCalloc(1, pShow->pMeta->pSchemas[cols].bytes);
STR_WITH_MAXSIZE_TO_VARSTR(b2, pFunc->pComment, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)b2, false);
@ -540,7 +540,7 @@ static int32_t mndRetrieveFuncs(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *pB
char b3[TSDB_TYPE_STR_MAX_LEN] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(b3, mnodeGenTypeStr(buf, TSDB_TYPE_STR_MAX_LEN, pFunc->outputType, pFunc->outputLen),
pShow->bytes[cols]);
pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)b3, false);

View File

@ -14,255 +14,10 @@
*/
#define _DEFAULT_SOURCE
#include "mndInfoSchema.h"
#include "systable.h"
#include "mndInt.h"
#define SYSTABLE_SCH_TABLE_NAME_LEN ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
#define SYSTABLE_SCH_DB_NAME_LEN ((TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
#define SYSTABLE_SCH_COL_NAME_LEN ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
static const SInfosTableSchema dnodesSchema[] = {
{.name = "id", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "max_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SInfosTableSchema mnodesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "role_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SInfosTableSchema modulesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "endpoint", .bytes = 134 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "module", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SInfosTableSchema qnodesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SInfosTableSchema snodesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SInfosTableSchema bnodesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SInfosTableSchema clusterSchema[] = {
{.name = "id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SInfosTableSchema userDBSchema[] = {
{.name = "name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "vgroups", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "replica", .bytes = 2, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "strict", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "duration", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "keep", .bytes = 24 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "pages", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "wal", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "fsync", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "cachelast", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
// {.name = "update", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, // disable update
};
static const SInfosTableSchema userFuncSchema[] = {
{.name = "name", .bytes = TSDB_FUNC_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "comment", .bytes = PATH_MAX - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "aggregate", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "output_type", .bytes = TSDB_TYPE_STR_MAX_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "code_len", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "bufsize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
};
static const SInfosTableSchema userIdxSchema[] = {
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "index_database", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "index_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "column_name", .bytes = SYSTABLE_SCH_COL_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "index_type", .bytes = 10, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "index_extensions", .bytes = 256, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SInfosTableSchema userStbsSchema[] = {
{.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "tags", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "last_update", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "table_comment", .bytes = 1024 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SInfosTableSchema userTblsSchema[] = {
{.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "uid", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "ttl", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "table_comment", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "type", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SInfosTableSchema userTblDistSchema[] = {
{.name = "db_name", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "table_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "distributed_histogram", .bytes = 500 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "min_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "max_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "avg_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "stddev_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "blocks", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "storage_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "compression_ratio", .bytes = 8, .type = TSDB_DATA_TYPE_DOUBLE},
{.name = "rows_in_mem", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "seek_header_time", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
};
static const SInfosTableSchema userUsersSchema[] = {
{.name = "name", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "privilege", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SInfosTableSchema grantsSchema[] = {
{.name = "version", .bytes = 8 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "expire time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "storage(GB)", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "databases", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "users", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "accounts", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "connections", .bytes = 11 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "streams", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "cpu cores", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "speed(PPS)", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SInfosTableSchema vgroupsSchema[] = {
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "tables", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "v1_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "v1_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "v2_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "v2_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "v3_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
};
static const SInfosTableSchema smaSchema[] = {
{.name = "sma_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SInfosTableSchema transSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "created_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "type", .bytes = TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "last_error", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SInfosTableSchema configSchema[] = {
{.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "value", .bytes = TSDB_CONIIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SInfosTableSchema connSchema[] = {
{.name = "connId", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "program", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "ip:port", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "login_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SInfosTableSchema querySchema[] = {
{.name = "queryId", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "connId", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "ip:port", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "qid", .bytes = 22 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "created_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "time", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "sql_obj_id", .bytes = QUERY_OBJ_ID_SIZE + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "ep", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "stable_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
{.name = "sub_queries", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "sub_query_info", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SInfosTableMeta infosMeta[] = {
{TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema)},
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema)},
{TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema)},
{TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema)},
{TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)},
{TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)},
{TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema)},
{TSDB_INS_TABLE_USER_DATABASES, userDBSchema, tListLen(userDBSchema)},
{TSDB_INS_TABLE_USER_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)},
{TSDB_INS_TABLE_USER_INDEXES, userIdxSchema, tListLen(userIdxSchema)},
{TSDB_INS_TABLE_USER_STABLES, userStbsSchema, tListLen(userStbsSchema)},
{TSDB_INS_TABLE_USER_TABLES, userTblsSchema, tListLen(userTblsSchema)},
{TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)},
{TSDB_INS_TABLE_USER_USERS, userUsersSchema, tListLen(userUsersSchema)},
{TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema)},
{TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema)},
{TSDB_INS_TABLE_TRANS, transSchema, tListLen(transSchema)},
{TSDB_INS_TABLE_SMAS, smaSchema, tListLen(smaSchema)},
{TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema)},
{TSDB_INS_TABLE_CONNS, connSchema, tListLen(connSchema)},
{TSDB_INS_TABLE_QUERIES, querySchema, tListLen(querySchema)},
};
static int32_t mndInitInfosTableSchema(const SInfosTableSchema *pSrc, int32_t colNum, SSchema **pDst) {
static int32_t mndInitInfosTableSchema(const SSysDbTableSchema *pSrc, int32_t colNum, SSchema **pDst) {
SSchema *schema = taosMemoryCalloc(colNum, sizeof(SSchema));
if (NULL == schema) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -288,11 +43,15 @@ static int32_t mndInsInitMeta(SHashObj *hash) {
meta.sversion = 1;
meta.tversion = 1;
for (int32_t i = 0; i < tListLen(infosMeta); ++i) {
tstrncpy(meta.tbName, infosMeta[i].name, sizeof(meta.tbName));
meta.numOfColumns = infosMeta[i].colNum;
size_t size = 0;
const SSysTableMeta* pInfosTableMeta = NULL;
getInfosDbMeta(&pInfosTableMeta, &size);
if (mndInitInfosTableSchema(infosMeta[i].schema, infosMeta[i].colNum, &meta.pSchemas)) {
for (int32_t i = 0; i < size; ++i) {
tstrncpy(meta.tbName, pInfosTableMeta[i].name, sizeof(meta.tbName));
meta.numOfColumns = pInfosTableMeta[i].colNum;
if (mndInitInfosTableSchema(pInfosTableMeta[i].schema, pInfosTableMeta[i].colNum, &meta.pSchemas)) {
return -1;
}

View File

@ -619,14 +619,14 @@ static int32_t mndRetrieveMnodes(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *p
colDataAppend(pColInfo, numOfRows, (const char *)&pObj->id, false);
char b1[TSDB_EP_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(b1, pObj->pDnode->ep, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(b1, pObj->pDnode->ep, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, b1, false);
const char *roles = syncStr(pObj->role);
char *b2 = taosMemoryCalloc(1, strlen(roles) + VARSTR_HEADER_SIZE);
STR_WITH_MAXSIZE_TO_VARSTR(b2, roles, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(b2, roles, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)b2, false);

View File

@ -14,92 +14,11 @@
*/
#define _DEFAULT_SOURCE
#include "mndPerfSchema.h"
#include "mndInt.h"
//!!!! Note: only APPEND columns in below tables, NO insert !!!!
static const SPerfsTableSchema connectionsSchema[] = {
{.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT},
{.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "program", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "login_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SPerfsTableSchema queriesSchema[] = {
{.name = "query_id", .bytes = 4, .type = TSDB_DATA_TYPE_UBIGINT},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "fqdn", .bytes = TSDB_FQDN_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "sub_queries", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "sub_query_info", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
};
static const SPerfsTableSchema topicSchema[] = {
{.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
// TODO config
};
static const SPerfsTableSchema consumerSchema[] = {
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "app_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "topics", .bytes = TSDB_SHOW_LIST_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SPerfsTableSchema subscriptionSchema[] = {
{.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
};
static const SPerfsTableSchema offsetSchema[] = {
{.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "committed_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "current_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "skip_log_cnt", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
};
static const SPerfsTableSchema streamSchema[] = {
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
};
static const SPerfsTableMeta perfsMeta[] = {
{TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema)},
{TSDB_PERFS_TABLE_QUERIES, queriesSchema, tListLen(queriesSchema)},
{TSDB_PERFS_TABLE_TOPICS, topicSchema, tListLen(topicSchema)},
{TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema)},
{TSDB_PERFS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema)},
{TSDB_PERFS_TABLE_OFFSETS, offsetSchema, tListLen(offsetSchema)},
{TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)},
};
#include "systable.h"
// connection/application/
int32_t mndInitPerfsTableSchema(const SPerfsTableSchema *pSrc, int32_t colNum, SSchema **pDst) {
int32_t mndInitPerfsTableSchema(const SSysDbTableSchema *pSrc, int32_t colNum, SSchema **pDst) {
SSchema *schema = taosMemoryCalloc(colNum, sizeof(SSchema));
if (NULL == schema) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -126,11 +45,15 @@ int32_t mndPerfsInitMeta(SHashObj *hash) {
meta.sversion = 1;
meta.tversion = 1;
for (int32_t i = 0; i < tListLen(perfsMeta); ++i) {
strcpy(meta.tbName, perfsMeta[i].name);
meta.numOfColumns = perfsMeta[i].colNum;
size_t size = 0;
const SSysTableMeta* pSysDbTableMeta = NULL;
getPerfDbMeta(&pSysDbTableMeta, &size);
if (mndInitPerfsTableSchema(perfsMeta[i].schema, perfsMeta[i].colNum, &meta.pSchemas)) {
for (int32_t i = 0; i < size; ++i) {
strcpy(meta.tbName, pSysDbTableMeta[i].name);
meta.numOfColumns = pSysDbTableMeta[i].colNum;
if (mndInitPerfsTableSchema(pSysDbTableMeta[i].schema, pSysDbTableMeta[i].colNum, &meta.pSchemas)) {
return -1;
}
@ -166,7 +89,6 @@ int32_t mndBuildPerfsTableSchema(SMnode *pMnode, const char *dbFName, const char
}
memcpy(pRsp->pSchemas, meta->pSchemas, meta->numOfColumns * sizeof(SSchema));
return 0;
}

View File

@ -570,38 +570,39 @@ static int32_t mndRetrieveConns(SNodeMsg *pReq, SShowObj *pShow, char *data, int
if (pConn == NULL) break;
cols = 0;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
#if 0
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
*(uint32_t *)pWrite = pConn->id;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConn->user, pShow->bytes[cols]);
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConn->user, pShow->pMeta->pSchemas[cols].bytes);
cols++;
// app name
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConn->app, pShow->bytes[cols]);
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConn->app, pShow->pMeta->pSchemas[cols].bytes);
cols++;
// app pid
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
*(int32_t *)pWrite = pConn->pid;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
taosIpPort2String(pConn->ip, pConn->port, ipStr);
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, pShow->pMeta->pSchemas[cols].bytes);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
*(int64_t *)pWrite = pConn->loginTimeMs;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
if (pConn->lastAccessTimeMs < pConn->loginTimeMs) pConn->lastAccessTimeMs = pConn->loginTimeMs;
*(int64_t *)pWrite = pConn->lastAccessTimeMs;
cols++;
#endif
numOfRows++;
}
@ -643,67 +644,67 @@ static int32_t mndRetrieveQueries(SNodeMsg *pReq, SShowObj *pShow, char *data, i
SQueryDesc *pDesc = pConn->pQueries + i;
cols = 0;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
*(int64_t *)pWrite = htobe64(pDesc->queryId);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
*(int64_t *)pWrite = htobe64(pConn->id);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConn->user, pShow->bytes[cols]);
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConn->user, pShow->pMeta->pSchemas[cols].bytes);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
snprintf(str, tListLen(str), "%s:%u", taosIpStr(pConn->ip), pConn->port);
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, str, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, str, pShow->pMeta->pSchemas[cols].bytes);
cols++;
char handleBuf[24] = {0};
snprintf(handleBuf, tListLen(handleBuf), "%" PRIu64, htobe64(pDesc->qId));
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, handleBuf, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, handleBuf, pShow->pMeta->pSchemas[cols].bytes);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
*(int64_t *)pWrite = htobe64(pDesc->stime);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
*(int64_t *)pWrite = htobe64(pDesc->useconds);
cols++;
snprintf(str, tListLen(str), "0x%" PRIx64, htobe64(pDesc->sqlObjId));
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, str, pShow->bytes[cols]);
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, str, pShow->pMeta->pSchemas[cols].bytes);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
*(int32_t *)pWrite = htonl(pDesc->pid);
cols++;
char epBuf[TSDB_EP_LEN + 1] = {0};
snprintf(epBuf, tListLen(epBuf), "%s:%u", pDesc->fqdn, pConn->port);
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, epBuf, pShow->bytes[cols]);
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, epBuf, pShow->pMeta->pSchemas[cols].bytes);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
*(bool *)pWrite = pDesc->stableQuery;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
*(int32_t *)pWrite = htonl(pDesc->numOfSub);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDesc->subSqlInfo, pShow->bytes[cols]);
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDesc->subSqlInfo, pShow->pMeta->pSchemas[cols].bytes);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDesc->sql, pShow->bytes[cols]);
pWrite = data + pShow->offset[cols] * rows + pShow->pMeta->pSchemas[cols].bytes * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDesc->sql, pShow->pMeta->pSchemas[cols].bytes);
cols++;
numOfRows++;

View File

@ -517,7 +517,7 @@ static int32_t mndRetrieveQnodes(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *p
colDataAppend(pColInfo, numOfRows, (const char *)&pObj->id, false);
char ep[TSDB_EP_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(ep, pObj->pDnode->ep, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(ep, pObj->pDnode->ep, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)ep, false);

View File

@ -20,7 +20,7 @@
int32_t mndProcessQueryMsg(SNodeMsg *pReq) {
SMnode *pMnode = pReq->pNode;
SReadHandle handle = {0};
SReadHandle handle = {.mnd = pMnode};
mTrace("msg:%p, in query queue is processing", pReq);
switch (pReq->rpcMsg.msgType) {

View File

@ -308,8 +308,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
// sink part
if (level == 0) {
// only for inplace
pTask->sinkType = TASK_SINK__SHOW;
pTask->showSink.reserved = 0;
pTask->sinkType = TASK_SINK__NONE;
if (!hasExtraSink) {
#if 1
if (pStream->createdBy == STREAM_CREATED_BY__SMA) {
@ -368,8 +367,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
pTask->sourceType = TASK_SOURCE__PIPE;
// sink part
pTask->sinkType = TASK_SINK__SHOW;
/*pTask->sinkType = TASK_SINK__NONE;*/
pTask->sinkType = TASK_SINK__NONE;
// dispatch part
ASSERT(hasExtraSink);
@ -456,7 +454,7 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
pTask->sourceType = TASK_SOURCE__MERGE;
// sink part
pTask->sinkType = TASK_SINK__SHOW;
pTask->sinkType = TASK_SINK__NONE;
// dispatch part
pTask->dispatchType = TASK_DISPATCH__NONE;

View File

@ -15,6 +15,7 @@
#define _DEFAULT_SOURCE
#include "mndShow.h"
#include "systable.h"
#define SHOW_STEP_SIZE 100
@ -47,7 +48,7 @@ void mndCleanupShow(SMnode *pMnode) {
}
}
static int32_t convertToRetrieveType(char* name, int32_t len) {
static int32_t convertToRetrieveType(char *name, int32_t len) {
int32_t type = -1;
if (strncasecmp(name, TSDB_INS_TABLE_DNODES, len) == 0) {
@ -72,8 +73,6 @@ static int32_t convertToRetrieveType(char* name, int32_t len) {
// type = TSDB_MGMT_TABLE_INDEX;
} else if (strncasecmp(name, TSDB_INS_TABLE_USER_STABLES, len) == 0) {
type = TSDB_MGMT_TABLE_STB;
} else if (strncasecmp(name, TSDB_INS_TABLE_USER_STREAMS, len) == 0) {
type = TSDB_MGMT_TABLE_STREAMS;
} else if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, len) == 0) {
type = TSDB_MGMT_TABLE_TABLE;
} else if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED, len) == 0) {
@ -84,26 +83,28 @@ static int32_t convertToRetrieveType(char* name, int32_t len) {
type = TSDB_MGMT_TABLE_GRANTS;
} else if (strncasecmp(name, TSDB_INS_TABLE_VGROUPS, len) == 0) {
type = TSDB_MGMT_TABLE_VGROUP;
} else if (strncasecmp(name, TSDB_INS_TABLE_CONSUMERS, len) == 0) {
} else if (strncasecmp(name, TSDB_PERFS_TABLE_CONSUMERS, len) == 0) {
type = TSDB_MGMT_TABLE_CONSUMERS;
} else if (strncasecmp(name, TSDB_INS_TABLE_SUBSCRIBES, len) == 0) {
} else if (strncasecmp(name, TSDB_PERFS_TABLE_SUBSCRIBES, len) == 0) {
type = TSDB_MGMT_TABLE_SUBSCRIBES;
} else if (strncasecmp(name, TSDB_INS_TABLE_TRANS, len) == 0) {
} else if (strncasecmp(name, TSDB_PERFS_TABLE_TRANS, len) == 0) {
type = TSDB_MGMT_TABLE_TRANS;
} else if (strncasecmp(name, TSDB_INS_TABLE_SMAS, len) == 0) {
} else if (strncasecmp(name, TSDB_PERFS_TABLE_SMAS, len) == 0) {
type = TSDB_MGMT_TABLE_SMAS;
} else if (strncasecmp(name, TSDB_INS_TABLE_CONFIGS, len) == 0) {
type = TSDB_MGMT_TABLE_CONFIGS;
} else if (strncasecmp(name, TSDB_INS_TABLE_CONNS, len) == 0) {
} else if (strncasecmp(name, TSDB_PERFS_TABLE_CONNECTIONS, len) == 0) {
type = TSDB_MGMT_TABLE_CONNS;
} else if (strncasecmp(name, TSDB_INS_TABLE_QUERIES, len) == 0) {
} else if (strncasecmp(name, TSDB_PERFS_TABLE_QUERIES, len) == 0) {
type = TSDB_MGMT_TABLE_QUERIES;
} else if (strncasecmp(name, TSDB_INS_TABLE_VNODES, len) == 0) {
} else if (strncasecmp(name, TSDB_INS_TABLE_VNODES, len) == 0) {
type = TSDB_MGMT_TABLE_VNODES;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_TOPICS, len) == 0) {
type = TSDB_MGMT_TABLE_TOPICS;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_STREAMS, len) == 0) {
type = TSDB_MGMT_TABLE_STREAMS;
} else {
// ASSERT(0);
// ASSERT(0);
}
return type;
@ -115,12 +116,12 @@ static SShowObj *mndCreateShowObj(SMnode *pMnode, SRetrieveTableReq *pReq) {
int64_t showId = atomic_add_fetch_64(&pMgmt->showId, 1);
if (showId == 0) atomic_add_fetch_64(&pMgmt->showId, 1);
int32_t size = sizeof(SShowObj);
int32_t size = sizeof(SShowObj);
SShowObj showObj = {0};
showObj.id = showId;
showObj.id = showId;
showObj.pMnode = pMnode;
showObj.type = convertToRetrieveType(pReq->tb, tListLen(pReq->tb));
showObj.type = convertToRetrieveType(pReq->tb, tListLen(pReq->tb));
memcpy(showObj.db, pReq->db, TSDB_DB_FNAME_LEN);
int32_t keepTime = tsShellActivityTimer * 6 * 1000;
@ -208,16 +209,6 @@ static int32_t mndProcessRetrieveSysTableReq(SNodeMsg *pReq) {
pShow->pMeta = pMeta;
pShow->numOfColumns = pShow->pMeta->numOfColumns;
int32_t offset = 0;
for (int32_t i = 0; i < pShow->pMeta->numOfColumns; ++i) {
pShow->offset[i] = offset;
int32_t bytes = pShow->pMeta->pSchemas[i].bytes;
pShow->rowSize += bytes;
pShow->bytes[i] = bytes;
offset += bytes;
}
} else {
pShow = mndAcquireShowObj(pMnode, retrieveReq.showId);
if (pShow == NULL) {

View File

@ -463,7 +463,7 @@ static int32_t mndRetrieveSnodes(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *p
colDataAppend(pColInfo, numOfRows, (const char *)&pObj->id, false);
char ep[TSDB_EP_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(ep, pObj->pDnode->ep, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(ep, pObj->pDnode->ep, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)ep, false);

View File

@ -308,6 +308,8 @@ static int32_t mndCreateStream(SMnode *pMnode, SNodeMsg *pReq, SCMCreateStreamRe
streamObj.smaId = 0;
/*streamObj.physicalPlan = "";*/
streamObj.logicalPlan = "not implemented";
streamObj.trigger = pCreate->triggerType;
streamObj.waterMark = pCreate->watermark;
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_STREAM, &pReq->rpcMsg);
if (pTrans == NULL) {
@ -431,7 +433,7 @@ static int32_t mndRetrieveStream(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *p
SStreamObj *pStream = NULL;
while (numOfRows < rows) {
pShow->pIter = sdbFetch(pSdb, SDB_TOPIC, pShow->pIter, (void **)&pStream);
pShow->pIter = sdbFetch(pSdb, SDB_STREAM, pShow->pIter, (void **)&pStream);
if (pShow->pIter == NULL) break;
SColumnInfoData *pColInfo;
@ -471,8 +473,13 @@ static int32_t mndRetrieveStream(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *p
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->trigger, false);
numOfRows++;
sdbRelease(pSdb, pStream);
}
return 0;
pShow->numOfRows += numOfRows;
return numOfRows;
}
static void mndCancelGetNextStream(SMnode *pMnode, void *pIter) {

View File

@ -452,7 +452,10 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SNodeMsg *pMsg, const SMqRebO
}
// 4. TODO commit log: modification log
// 5. execution
// 5. set cb
mndTransSetCb(pTrans, MQ_REB_TRANS_START_FUNC, MQ_REB_TRANS_STOP_FUNC, NULL, 0);
// 6. execution
if (mndTransPrepare(pMnode, pTrans) != 0) goto REB_FAIL;
mndTransDrop(pTrans);
@ -518,9 +521,9 @@ static int32_t mndProcessRebalanceReq(SNodeMsg *pMsg) {
}
// reset flag
atomic_store_8(pReq->mqInReb, 0);
mInfo("mq rebalance completed successfully");
taosHashCleanup(pReq->rebSubHash);
mndRebEnd();
return 0;
}

View File

@ -16,6 +16,7 @@
#define _DEFAULT_SOURCE
#include "mndTrans.h"
#include "mndAuth.h"
#include "mndConsumer.h"
#include "mndDb.h"
#include "mndShow.h"
#include "mndSync.h"
@ -442,6 +443,10 @@ static TransCbFp mndTransGetCbFp(ETrnFuncType ftype) {
return mndTransTestStartFunc;
case TEST_TRANS_STOP_FUNC:
return mndTransTestStopFunc;
case MQ_REB_TRANS_START_FUNC:
return mndRebCntInc;
case MQ_REB_TRANS_STOP_FUNC:
return mndRebCntDec;
default:
return NULL;
}
@ -1346,17 +1351,17 @@ static int32_t mndRetrieveTrans(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *pB
colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->createdTime, false);
char stage[TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(stage, mndTransStr(pTrans->stage), pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(stage, mndTransStr(pTrans->stage), pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)stage, false);
char dbname[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(dbname, mndGetDbStr(pTrans->dbname), pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(dbname, mndGetDbStr(pTrans->dbname), pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)dbname, false);
char transType[TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(dbname, mndTransType(pTrans->transType), pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(dbname, mndTransType(pTrans->transType), pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)transType, false);
@ -1364,7 +1369,7 @@ static int32_t mndRetrieveTrans(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *pB
colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->lastExecTime, false);
char lastError[TSDB_TRANS_ERROR_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(dbname, pTrans->lastError, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(dbname, pTrans->lastError, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)lastError, false);

View File

@ -657,7 +657,7 @@ static int32_t mndRetrieveUsers(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *pB
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols);
char name[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(name, pUser->user, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(name, pUser->user, pShow->pMeta->pSchemas[cols].bytes);
colDataAppend(pColInfo, numOfRows, (const char *)name, false);

View File

@ -540,7 +540,7 @@ static int32_t mndRetrieveVgroups(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock*
char buf1[20] = {0};
const char *role = syncStr(pVgroup->vnodeGid[i].role);
STR_WITH_MAXSIZE_TO_VARSTR(buf1, role, pShow->bytes[cols]);
STR_WITH_MAXSIZE_TO_VARSTR(buf1, role, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)buf1, false);

View File

@ -92,16 +92,16 @@ int metaTbCursorNext(SMTbCursor *pTbCur);
#endif
// tsdb
typedef struct STsdb STsdb;
// typedef struct STsdb STsdb;
typedef void *tsdbReaderT;
#define BLOCK_LOAD_OFFSET_SEQ_ORDER 1
#define BLOCK_LOAD_TABLE_SEQ_ORDER 2
#define BLOCK_LOAD_TABLE_RR_ORDER 3
tsdbReaderT *tsdbQueryTables(STsdb *tsdb, SQueryTableDataCond *pCond, STableGroupInfo *tableInfoGroup, uint64_t qId,
tsdbReaderT *tsdbQueryTables(SVnode *pVnode, SQueryTableDataCond *pCond, STableGroupInfo *tableInfoGroup, uint64_t qId,
uint64_t taskId);
tsdbReaderT tsdbQueryCacheLast(STsdb *tsdb, SQueryTableDataCond *pCond, STableGroupInfo *groupList, uint64_t qId,
tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableGroupInfo *groupList, uint64_t qId,
void *pMemRef);
int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo);
bool isTsdbCacheLastRow(tsdbReaderT *pReader);
@ -116,7 +116,7 @@ SArray *tsdbRetrieveDataBlock(tsdbReaderT *pTsdbReadHandle, SArray *pColumn
void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond);
void tsdbDestroyTableGroup(STableGroupInfo *pGroupList);
int32_t tsdbGetOneTableGroup(void *pMeta, uint64_t uid, TSKEY startKey, STableGroupInfo *pGroupInfo);
int32_t tsdbGetTableGroupFromIdList(STsdb *tsdb, SArray *pTableIdList, STableGroupInfo *pGroupInfo);
int32_t tsdbGetTableGroupFromIdList(SVnode *pVnode, SArray *pTableIdList, STableGroupInfo *pGroupInfo);
// tq

View File

@ -87,14 +87,19 @@ int32_t metaCreateTSma(SMeta* pMeta, SSmaCfg* pCfg);
int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid);
// tsdb
int tsdbOpen(SVnode* pVnode, STsdb** ppTsdb);
int tsdbClose(STsdb* pTsdb);
int tsdbBegin(STsdb* pTsdb);
int tsdbCommit(STsdb* pTsdb);
int32_t tsdbUpdateSmaWindow(STsdb* pTsdb, SSubmitReq* pMsg, int64_t version);
int32_t tsdbCreateTSma(STsdb* pTsdb, char* pMsg);
int32_t tsdbInsertTSmaData(STsdb* pTsdb, int64_t indexUid, const char* msg);
int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq* pMsg, SSubmitRsp* pRsp);
int tsdbOpen(SVnode* pVnode, STsdb** ppTsdb);
int tsdbClose(STsdb* pTsdb);
int tsdbBegin(STsdb* pTsdb);
int tsdbCommit(STsdb* pTsdb);
int32_t tsdbUpdateSmaWindow(STsdb* pTsdb, SSubmitReq* pMsg, int64_t version);
int32_t tsdbCreateTSma(STsdb* pTsdb, char* pMsg);
int32_t tsdbInsertTSmaData(STsdb* pTsdb, int64_t indexUid, const char* msg);
int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq* pMsg, SSubmitRsp* pRsp);
tsdbReaderT* tsdbQueryTablesT(STsdb* tsdb, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId,
uint64_t taskId);
tsdbReaderT tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId,
void* pMemRef);
int32_t tsdbGetTableGroupFromIdListT(STsdb* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo);
// tq
STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal);

View File

@ -159,7 +159,7 @@ SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, boo
// decode
pBuf = pVal;
pSW = taosMemoryMalloc(sizeof(pSW));
pSW = taosMemoryMalloc(sizeof(SSchemaWrapper));
tCoderInit(&coder, TD_LITTLE_ENDIAN, pVal, vLen, TD_DECODER);
tDecodeSSchemaWrapper(&coder, pSW);
@ -436,4 +436,4 @@ void *metaGetSmaInfoByIndex(SMeta *pMeta, int64_t indexUid, bool isDecode) {
return NULL;
}
#endif
#endif

View File

@ -864,7 +864,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int32_t parallel) {
}
int32_t tqProcessTaskDeploy(STQ* pTq, char* msg, int32_t msgLen) {
SStreamTask* pTask = taosMemoryMalloc(sizeof(SStreamTask));
SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask));
if (pTask == NULL) {
return -1;
}

View File

@ -422,7 +422,7 @@ _end:
return NULL;
}
tsdbReaderT* tsdbQueryTables(STsdb* tsdb, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId,
tsdbReaderT* tsdbQueryTablesT(STsdb* tsdb, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId,
uint64_t taskId) {
STsdbReadHandle* pTsdbReadHandle = tsdbQueryTablesImpl(tsdb, pCond, qId, taskId);
if (pTsdbReadHandle == NULL) {
@ -535,7 +535,7 @@ tsdbReaderT tsdbQueryLastRow(STsdb* tsdb, SQueryTableDataCond* pCond, STableGrou
return NULL;
}
STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(tsdb, pCond, groupList, qId, taskId);
STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTablesT(tsdb, pCond, groupList, qId, taskId);
if (pTsdbReadHandle == NULL) {
return NULL;
}
@ -555,8 +555,8 @@ tsdbReaderT tsdbQueryLastRow(STsdb* tsdb, SQueryTableDataCond* pCond, STableGrou
}
#if 0
tsdbReaderT tsdbQueryCacheLast(STsdb *tsdb, SQueryTableDataCond *pCond, STableGroupInfo *groupList, uint64_t qId, STsdbMemTable* pMemRef) {
STsdbReadHandle *pTsdbReadHandle = (STsdbReadHandle*) tsdbQueryTables(tsdb, pCond, groupList, qId, pMemRef);
tsdbReaderT tsdbQueryCacheLastT(STsdb *tsdb, SQueryTableDataCond *pCond, STableGroupInfo *groupList, uint64_t qId, STsdbMemTable* pMemRef) {
STsdbReadHandle *pTsdbReadHandle = (STsdbReadHandle*) tsdbQueryTablesT(tsdb, pCond, groupList, qId, pMemRef);
if (pTsdbReadHandle == NULL) {
return NULL;
}
@ -634,7 +634,7 @@ tsdbReaderT tsdbQueryRowsInExternalWindow(STsdb* tsdb, SQueryTableDataCond* pCon
}
}
STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTables(tsdb, pCond, pNew, qId, taskId);
STsdbReadHandle* pTsdbReadHandle = (STsdbReadHandle*)tsdbQueryTablesT(tsdb, pCond, pNew, qId, taskId);
pTsdbReadHandle->loadExternalRow = true;
pTsdbReadHandle->currentLoadExternalRows = true;
@ -3717,7 +3717,7 @@ _error:
}
#if 0
int32_t tsdbGetTableGroupFromIdList(STsdb* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo) {
int32_t tsdbGetTableGroupFromIdListT(STsdb* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo) {
if (tsdbRLockRepoMeta(tsdb) < 0) {
return terrno;
}

View File

@ -147,4 +147,24 @@ void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId) {
if (vgId) {
*vgId = TD_VID(pVnode);
}
}
// wrapper of tsdb read interface
// TODO: use FORCE_INLINE if possible later
tsdbReaderT *tsdbQueryTables(SVnode *pVnode, SQueryTableDataCond *pCond, STableGroupInfo *tableInfoGroup, uint64_t qId,
uint64_t taskId) {
return tsdbQueryTablesT(pVnode->pTsdb, pCond, tableInfoGroup, qId, taskId);
}
tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableGroupInfo *groupList, uint64_t qId,
void *pMemRef) {
#if 0
return tsdbQueryCacheLastT(pVnode->pTsdb, pCond, groupList, qId, pMemRef);
#endif
return 0;
}
int32_t tsdbGetTableGroupFromIdList(SVnode *pVnode, SArray *pTableIdList, STableGroupInfo *pGroupInfo) {
#if 0
return tsdbGetTableGroupFromIdListT(pVnode->pTsdb, pTableIdList, pGroupInfo);
#endif
return 0;
}

View File

@ -141,8 +141,10 @@ _err:
int vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
vTrace("message in vnode query queue is processing");
#if 0
SReadHandle handle = {.reader = pVnode->pTsdb, .meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode};
#endif
SReadHandle handle = {.meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode};
switch (pMsg->msgType) {
case TDMT_VND_QUERY:
return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg);

View File

@ -17,6 +17,7 @@
#include "query.h"
#include "tname.h"
#include "catalogInt.h"
#include "systable.h"
int32_t ctgActUpdateVg(SCtgMetaAction *action);
int32_t ctgActUpdateTbl(SCtgMetaAction *action);

View File

@ -640,6 +640,7 @@ int32_t getTableScanOrder(SOperatorInfo* pOperator);
void doSetOperatorCompleted(SOperatorInfo* pOperator);
void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock);
SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowCellInfoOffset);
void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray* pCols);
SOperatorInfo* createExchangeOperatorInfo(const SNodeList* pSources, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
@ -694,7 +695,7 @@ SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(STaskRuntimeEnv* pRuntim
SExprInfo* pExpr, int32_t numOfOutput);
#endif
void projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
int32_t numOfOutput, SArray* pPseudoList);
void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order, bool createDummyCol);

View File

@ -31,6 +31,7 @@ typedef struct SDataDispatchBuf {
typedef struct SDataCacheEntry {
int32_t dataLen;
int32_t numOfRows;
int32_t numOfCols;
int8_t compressed;
char data[];
} SDataCacheEntry;
@ -76,6 +77,7 @@ static void toDataCacheEntry(const SDataDispatchHandle* pHandle, const SInputDat
SDataCacheEntry* pEntry = (SDataCacheEntry*)pBuf->pData;
pEntry->compressed = (int8_t)needCompress(pInput->pData, numOfCols);
pEntry->numOfRows = pInput->pData->info.rows;
pEntry->numOfCols = pInput->pData->info.numOfCols;
pEntry->dataLen = 0;
pBuf->useSize = sizeof(SRetrieveTableRsp);
@ -169,6 +171,7 @@ static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) {
SDataCacheEntry* pEntry = (SDataCacheEntry*)(pDispatcher->nextOutput.pData);
memcpy(pOutput->pData, pEntry->data, pEntry->dataLen);
pOutput->numOfRows = pEntry->numOfRows;
pOutput->numOfCols = pEntry->numOfCols;
pOutput->compressed = pEntry->compressed;
taosMemoryFreeClear(pDispatcher->nextOutput.pData); // todo persistent
pOutput->bufStatus = updateStatus(pDispatcher);

View File

@ -227,8 +227,8 @@ int32_t operatorDummyOpenFn(SOperatorInfo* pOperator) {
}
SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t streamFn,
__optr_fn_t cleanup, __optr_close_fn_t closeFn, __optr_encode_fn_t encode,
__optr_decode_fn_t decode, __optr_get_explain_fn_t explain) {
__optr_fn_t cleanup, __optr_close_fn_t closeFn, __optr_encode_fn_t encode,
__optr_decode_fn_t decode, __optr_get_explain_fn_t explain) {
SOperatorFpSet fpSet = {
._openFn = openFn,
.getNextFn = nextFn,
@ -441,8 +441,8 @@ SResultRow* getNewResultRow_rv(SDiskbasedBuf* pResultBuf, int64_t tableGroupId,
* +----------+---------------+
*/
static SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, int64_t uid,
char* pData, int16_t bytes, bool masterscan, uint64_t groupId,
SExecTaskInfo* pTaskInfo, bool isIntervalQuery, SAggSupporter* pSup) {
char* pData, int16_t bytes, bool masterscan, uint64_t groupId,
SExecTaskInfo* pTaskInfo, bool isIntervalQuery, SAggSupporter* pSup) {
SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId);
SResultRowPosition* p1 =
@ -463,9 +463,9 @@ static SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowI
}
}
// 1. close current opened time window
// 1. close current opened time window
if (pResultRowInfo->cur.pageId != -1 && ((pResult == NULL) || (pResult->pageId != pResultRowInfo->cur.pageId &&
pResult->offset != pResultRowInfo->cur.offset))) {
pResult->offset != pResultRowInfo->cur.offset))) {
// todo extract function
SResultRowPosition pos = pResultRowInfo->cur;
SFilePage* pPage = getBufPage(pResultBuf, pos.pageId);
@ -482,7 +482,8 @@ static SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowI
// add a new result set for a new group
SResultRowPosition pos = {.pageId = pResult->pageId, .offset = pResult->offset};
taosHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos, sizeof(SResultRowPosition));
taosHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos,
sizeof(SResultRowPosition));
}
// 2. set the new time window to be the new active time window
@ -646,7 +647,7 @@ static int32_t setResultOutputBufByKey_rv(SResultRowInfo* pResultRowInfo, int64_
SExecTaskInfo* pTaskInfo) {
assert(win->skey <= win->ekey);
SResultRow* pResultRow = doSetResultOutBufByKey(pAggSup->pResultBuf, pResultRowInfo, id, (char*)&win->skey,
TSDB_KEYSIZE, masterscan, tableGroupId, pTaskInfo, true, pAggSup);
TSDB_KEYSIZE, masterscan, tableGroupId, pTaskInfo, true, pAggSup);
if (pResultRow == NULL) {
*pResult = NULL;
@ -1034,8 +1035,8 @@ void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlo
}
}
static int32_t doCreateConstantValColumnInfo(SInputColumnInfoData* pInput, SFunctParam* pFuncParam,
int32_t paramIndex, int32_t numOfRows) {
static int32_t doCreateConstantValColumnInfo(SInputColumnInfoData* pInput, SFunctParam* pFuncParam, int32_t paramIndex,
int32_t numOfRows) {
SColumnInfoData* pColInfo = NULL;
if (pInput->pData[paramIndex] == NULL) {
pColInfo = taosMemoryCalloc(1, sizeof(SColumnInfoData));
@ -1066,9 +1067,9 @@ static int32_t doCreateConstantValColumnInfo(SInputColumnInfoData* pInput, SFunc
colDataAppendDouble(pColInfo, i, &v);
}
} else if (type == TSDB_DATA_TYPE_VARCHAR) {
char *tmp = taosMemoryMalloc(pFuncParam->param.nLen + VARSTR_HEADER_SIZE);
char* tmp = taosMemoryMalloc(pFuncParam->param.nLen + VARSTR_HEADER_SIZE);
STR_WITH_SIZE_TO_VARSTR(tmp, pFuncParam->param.pz, pFuncParam->param.nLen);
for(int32_t i = 0; i < numOfRows; ++i) {
for (int32_t i = 0; i < numOfRows; ++i) {
colDataAppend(pColInfo, i, tmp, false);
}
}
@ -1081,9 +1082,9 @@ static int32_t doSetInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCt
int32_t code = TSDB_CODE_SUCCESS;
for (int32_t i = 0; i < pOperator->numOfOutput; ++i) {
pCtx[i].order = order;
pCtx[i].size = pBlock->info.rows;
pCtx[i].pSrcBlock = pBlock;
pCtx[i].order = order;
pCtx[i].size = pBlock->info.rows;
pCtx[i].pSrcBlock = pBlock;
pCtx[i].currentStage = MAIN_SCAN;
SInputColumnInfoData* pInput = &pCtx[i].input;
@ -1177,7 +1178,7 @@ static void setPseudoOutputColInfo(SSDataBlock* pResult, SqlFunctionCtx* pCtx, S
}
}
void projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
int32_t numOfOutput, SArray* pPseudoList) {
setPseudoOutputColInfo(pResult, pCtx, pPseudoList);
pResult->info.groupId = pSrcBlock->info.groupId;
@ -1217,7 +1218,7 @@ void projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock*
taosArrayPush(pBlockList, &pSrcBlock);
SColumnInfoData* pResColData = taosArrayGet(pResult->pDataBlock, outputSlotId);
SColumnInfoData idata = {.info = pResColData->info};
SColumnInfoData idata = {.info = pResColData->info, .hasNull = true};
SScalarParam dest = {.columnData = &idata};
scalarCalculate(pExpr[k].pExpr->_optrRoot.pRootNode, pBlockList, &dest);
@ -1254,10 +1255,14 @@ void projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock*
taosArrayPush(pBlockList, &pSrcBlock);
SColumnInfoData* pResColData = taosArrayGet(pResult->pDataBlock, outputSlotId);
SColumnInfoData idata = {.info = pResColData->info};
SColumnInfoData idata = {.info = pResColData->info, .hasNull = true};
SScalarParam dest = {.columnData = &idata};
scalarCalculate((SNode*)pExpr[k].pExpr->_function.pFunctNode, pBlockList, &dest);
int32_t code = scalarCalculate((SNode*)pExpr[k].pExpr->_function.pFunctNode, pBlockList, &dest);
if (code != TSDB_CODE_SUCCESS) {
taosArrayDestroy(pBlockList);
return code;
}
int32_t startOffset = createNewColModel ? 0 : pResult->info.rows;
colDataMergeCol(pResColData, startOffset, &idata, dest.numOfRows);
@ -1273,6 +1278,8 @@ void projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock*
if (!createNewColModel) {
pResult->info.rows += numOfRows;
}
return TSDB_CODE_SUCCESS;
}
void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, SArray* pDataBlock, TSKEY prevTs,
@ -1421,7 +1428,7 @@ static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBloc
if (!done) { // it is not interpolated, now start to generated the interpolated value
int32_t startRowIndex = startPos;
bool interp = setTimeWindowInterpolationStartTs(pOperatorInfo, pCtx, startRowIndex, pBlock->info.rows,
pBlock->pDataBlock, tsCols, win);
pBlock->pDataBlock, tsCols, win);
if (interp) {
setResultRowInterpo(pResult, RESULT_ROW_START_INTERP);
}
@ -1482,8 +1489,8 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
SResultRow* pResult = NULL;
int32_t ret = setResultOutputBufByKey_rv(pResultRowInfo, pSDataBlock->info.uid, &win, masterScan, &pResult,
tableGroupId, pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset,
&pInfo->aggSup, pTaskInfo);
tableGroupId, pInfo->binfo.pCtx, numOfOutput, pInfo->binfo.rowCellInfoOffset,
&pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
@ -1491,8 +1498,8 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
pos->groupId = tableGroupId;
pos->pos = (SResultRowPosition) {.pageId = pResult->pageId, .offset = pResult->offset};
*(int64_t*) pos->key = pResult->win.skey;
pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
*(int64_t*)pos->key = pResult->win.skey;
taosArrayPush(pUpdated, &pos);
}
@ -1569,8 +1576,8 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
pos->groupId = tableGroupId;
pos->pos = (SResultRowPosition) {.pageId = pResult->pageId, .offset = pResult->offset};
*(int64_t*) pos->key = pResult->win.skey;
pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
*(int64_t*)pos->key = pResult->win.skey;
taosArrayPush(pUpdated, &pos);
}
@ -1706,7 +1713,7 @@ int32_t setGroupResultOutputBuf(SOptrBasicInfo* binfo, int32_t numOfCols, char*
SqlFunctionCtx* pCtx = binfo->pCtx;
SResultRow* pResultRow = doSetResultOutBufByKey(pBuf, pResultRowInfo, groupId, (char*)pData, bytes, true, groupId,
pTaskInfo, false, pAggSup);
pTaskInfo, false, pAggSup);
assert(pResultRow != NULL);
setResultRowKey(pResultRow, pData, type);
@ -1890,7 +1897,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
pCtx->functionId = -1;
pCtx->curBufPage = -1;
pCtx->pExpr = pExpr;
pCtx->pExpr = pExpr;
if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) {
SFuncExecEnv env = {0};
@ -1901,7 +1908,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
if (!isUdaf) {
fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet);
} else {
char *udfName = pExpr->pExpr->_function.pFunctNode->functionName;
char* udfName = pExpr->pExpr->_function.pFunctNode->functionName;
strncpy(pCtx->udfName, udfName, strlen(udfName));
fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet);
}
@ -1926,9 +1933,9 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
pCtx->pTsOutput = NULL;
pCtx->resDataInfo.bytes = pFunct->resSchema.bytes;
pCtx->resDataInfo.type = pFunct->resSchema.type;
pCtx->order = TSDB_ORDER_ASC;
pCtx->order = TSDB_ORDER_ASC;
pCtx->start.key = INT64_MIN;
pCtx->end.key = INT64_MIN;
pCtx->end.key = INT64_MIN;
pCtx->numOfParams = pExpr->base.numOfParams;
pCtx->param = pFunct->pParam;
@ -2719,7 +2726,7 @@ void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t
int64_t tid = 0;
int64_t groupId = 0;
SResultRow* pRow = doSetResultOutBufByKey(pSup->pResultBuf, pResultRowInfo, tid, (char*)&tid, sizeof(tid), true,
groupId, pTaskInfo, false, pSup);
groupId, pTaskInfo, false, pSup);
for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
struct SResultRowEntryInfo* pEntry = getResultCell(pRow, i, rowCellInfoOffset);
@ -2866,24 +2873,24 @@ void finalizeUpdatedResult(SqlFunctionCtx* pCtx, int32_t numOfOutput, SDiskbased
size_t num = taosArrayGetSize(pUpdateList);
for (int32_t i = 0; i < num; ++i) {
SResKeyPos * pPos = taosArrayGetP(pUpdateList, i);
SResKeyPos* pPos = taosArrayGetP(pUpdateList, i);
SFilePage* bufPage = getBufPage(pBuf, pPos->pos.pageId);
SResultRow* pRow = (SResultRow*)((char*)bufPage + pPos->pos.offset);
//
//
for (int32_t j = 0; j < numOfOutput; ++j) {
pCtx[j].resultInfo = getResultCell(pRow, j, rowCellInfoOffset);
//
//
struct SResultRowEntryInfo* pResInfo = pCtx[j].resultInfo;
// if (isRowEntryCompleted(pResInfo) && isRowEntryInitialized(pResInfo)) {
// continue;
// }
//
// if (pCtx[j].fpSet.process) { // TODO set the dummy function.
//// pCtx[j].fpSet.finalize(&pCtx[j]);
// pResInfo->initialized = true;
// }
//
// if (isRowEntryCompleted(pResInfo) && isRowEntryInitialized(pResInfo)) {
// continue;
// }
//
// if (pCtx[j].fpSet.process) { // TODO set the dummy function.
//// pCtx[j].fpSet.finalize(&pCtx[j]);
// pResInfo->initialized = true;
// }
//
if (pRow->numOfRows < pResInfo->numOfRes) {
pRow->numOfRows = pResInfo->numOfRes;
}
@ -3005,9 +3012,8 @@ void doSetTableGroupOutputBuf(SAggOperatorInfo* pAggInfo, int32_t numOfOutput, u
SqlFunctionCtx* pCtx = pAggInfo->binfo.pCtx;
int32_t* rowCellInfoOffset = pAggInfo->binfo.rowCellInfoOffset;
SResultRow* pResultRow =
doSetResultOutBufByKey(pAggInfo->aggSup.pResultBuf, pResultRowInfo, uid, (char*)&groupId, sizeof(groupId),
true, groupId, pTaskInfo, false, &pAggInfo->aggSup);
SResultRow* pResultRow = doSetResultOutBufByKey(pAggInfo->aggSup.pResultBuf, pResultRowInfo, uid, (char*)&groupId,
sizeof(groupId), true, groupId, pTaskInfo, false, &pAggInfo->aggSup);
assert(pResultRow != NULL);
/*
@ -3105,8 +3111,8 @@ int32_t doCopyToSDataBlock(SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbased
}
for (int32_t i = start; (i < numOfRows) && (i >= 0); i += step) {
SResKeyPos *pPos = taosArrayGetP(pGroupResInfo->pRows, i);
SFilePage* page = getBufPage(pBuf, pPos->pos.pageId);
SResKeyPos* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
SFilePage* page = getBufPage(pBuf, pPos->pos.pageId);
SResultRow* pRow = (SResultRow*)((char*)page + pPos->pos.offset);
if (pRow->numOfRows == 0) {
@ -3668,6 +3674,7 @@ int32_t loadRemoteDataCallback(void* param, const SDataBuf* pMsg, int32_t code)
SRetrieveTableRsp* pRsp = pSourceDataInfo->pRsp;
pRsp->numOfRows = htonl(pRsp->numOfRows);
pRsp->compLen = htonl(pRsp->compLen);
pRsp->numOfCols = htonl(pRsp->numOfCols);
pRsp->useconds = htobe64(pRsp->useconds);
} else {
pSourceDataInfo->code = code;
@ -3746,12 +3753,12 @@ static int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInf
}
// NOTE: sources columns are more than the destination SSDatablock columns.
static void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray* pCols) {
void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray* pCols) {
size_t numOfSrcCols = taosArrayGetSize(pCols);
ASSERT(numOfSrcCols >= pBlock->info.numOfCols);
int32_t i = 0, j = 0;
while(i < numOfSrcCols && j < taosArrayGetSize(pColMatchInfo)) {
while (i < numOfSrcCols && j < taosArrayGetSize(pColMatchInfo)) {
SColumnInfoData* p = taosArrayGet(pCols, i);
SColMatchInfo* pmInfo = taosArrayGet(pColMatchInfo, j);
if (!pmInfo->output) {
@ -3777,10 +3784,10 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI
blockDataEnsureCapacity(pRes, numOfRows);
if (pColList == NULL) { // data from other sources
int32_t dataLen = *(int32_t*) pData;
int32_t dataLen = *(int32_t*)pData;
pData += sizeof(int32_t);
pRes->info.groupId = *(uint64_t*) pData;
pRes->info.groupId = *(uint64_t*)pData;
pData += sizeof(uint64_t);
int32_t* colLen = (int32_t*)pData;
@ -3810,8 +3817,8 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI
memcpy(pColInfoData->pData, pStart, colLen[i]);
}
//TODO setting this flag to true temporarily so aggregate function on stable will
//examine NULL value for non-primary key column
// TODO setting this flag to true temporarily so aggregate function on stable will
// examine NULL value for non-primary key column
pColInfoData->hasNull = true;
pStart += colLen[i];
}
@ -3847,11 +3854,11 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI
blockDataEnsureCapacity(&block, numOfRows);
int32_t dataLen = *(int32_t*) pStart;
uint64_t groupId = *(uint64_t*) (pStart + sizeof(int32_t));
int32_t dataLen = *(int32_t*)pStart;
uint64_t groupId = *(uint64_t*)(pStart + sizeof(int32_t));
pStart += sizeof(int32_t) + sizeof(uint64_t);
int32_t* colLen = (int32_t*) (pStart);
int32_t* colLen = (int32_t*)(pStart);
pStart += sizeof(int32_t) * numOfCols;
for (int32_t i = 0; i < numOfCols; ++i) {
@ -3877,7 +3884,7 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI
}
// data from mnode
relocateColumnData(pRes, pColList, block.pDataBlock);
relocateColumnData(pRes, pColList, block.pDataBlock);
}
pRes->info.rows = numOfRows;
@ -3951,7 +3958,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
SRetrieveTableRsp* pTableRsp = pDataInfo->pRsp;
code =
setSDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data,
pTableRsp->compLen, pOperator->numOfOutput, startTs, &pDataInfo->totalRows, NULL);
pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL);
if (code != 0) {
goto _error;
}
@ -4086,7 +4093,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) {
SRetrieveTableRsp* pTableRsp = pDataInfo->pRsp;
int32_t code =
setSDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data,
pTableRsp->compLen, pOperator->numOfOutput, startTs, &pDataInfo->totalRows, NULL);
pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL);
if (pRsp->completed == 1) {
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " numOfRows:%d, rowsOfSource:%" PRIu64
@ -4226,8 +4233,8 @@ SOperatorInfo* createExchangeOperatorInfo(const SNodeList* pSources, SSDataBlock
pOperator->numOfOutput = pBlock->info.numOfCols;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(prepareLoadRemoteData, doLoadRemoteData, NULL, NULL, destroyExchangeOperatorInfo,
NULL, NULL, NULL);
pOperator->fpSet = createOperatorFpSet(prepareLoadRemoteData, doLoadRemoteData, NULL, NULL,
destroyExchangeOperatorInfo, NULL, NULL, NULL);
#if 1
{ // todo refactor
@ -4716,8 +4723,8 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pR
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSort, NULL, NULL, destroyOrderOperatorInfo,
NULL, NULL, NULL);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doSort, NULL, NULL, destroyOrderOperatorInfo, NULL, NULL, NULL);
int32_t code = appendDownstream(pOperator, &downstream, 1);
return pOperator;
@ -4771,8 +4778,12 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
// there is an scalar expression that needs to be calculated before apply the group aggregation.
if (pAggInfo->pScalarExprInfo != NULL) {
projectApplyFunctions(pAggInfo->pScalarExprInfo, pBlock, pBlock, pAggInfo->pScalarCtx, pAggInfo->numOfScalarExpr,
int32_t code = projectApplyFunctions(pAggInfo->pScalarExprInfo, pBlock, pBlock, pAggInfo->pScalarCtx, pAggInfo->numOfScalarExpr,
NULL);
if (code != TSDB_CODE_SUCCESS) {
pTaskInfo->code = code;
longjmp(pTaskInfo->env, pTaskInfo->code);
}
}
// the pDataBlock are always the same one, no need to call this again
@ -4933,8 +4944,8 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi
initResultRow(resultRow);
prepareResultListBuffer(&pInfo->resultRowInfo, pOperator->pTaskInfo->env);
// pInfo->resultRowInfo.cur = pInfo->resultRowInfo.size;
// pInfo->resultRowInfo.pPosition[pInfo->resultRowInfo.size++] =
// (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset};
// pInfo->resultRowInfo.pPosition[pInfo->resultRowInfo.size++] =
// (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset};
pInfo->resultRowInfo.cur = (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset};
}
@ -4946,13 +4957,13 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi
enum {
PROJECT_RETRIEVE_CONTINUE = 0x1,
PROJECT_RETRIEVE_DONE = 0x2,
PROJECT_RETRIEVE_DONE = 0x2,
};
static int32_t handleLimitOffset(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
SProjectOperatorInfo* pProjectInfo = pOperator->info;
SOptrBasicInfo* pInfo = &pProjectInfo->binfo;
SSDataBlock* pRes = pInfo->pRes;
SSDataBlock* pRes = pInfo->pRes;
if (pProjectInfo->curSOffset > 0) {
if (pProjectInfo->groupId == 0) { // it is the first group
@ -5015,9 +5026,10 @@ static int32_t handleLimitOffset(SOperatorInfo* pOperator, SSDataBlock* pBlock)
// todo optimize performance
// If there are slimit/soffset value exists, multi-round result can not be packed into one group, since the
// they may not belong to the same group the limit/offset value is not valid in this case.
if (pRes->info.rows >= pOperator->resultInfo.threshold || pProjectInfo->slimit.offset != -1 || pProjectInfo->slimit.limit != -1) {
if (pRes->info.rows >= pOperator->resultInfo.threshold || pProjectInfo->slimit.offset != -1 ||
pProjectInfo->slimit.limit != -1) {
return PROJECT_RETRIEVE_DONE;
} else { // not full enough, continue to accumulate the output data in the buffer.
} else { // not full enough, continue to accumulate the output data in the buffer.
return PROJECT_RETRIEVE_CONTINUE;
}
}
@ -5101,7 +5113,7 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator, bool* newgroup)
int32_t status = handleLimitOffset(pOperator, pBlock);
if (status == PROJECT_RETRIEVE_CONTINUE) {
continue;
} else if (status == PROJECT_RETRIEVE_DONE) {
} else if (status == PROJECT_RETRIEVE_DONE) {
break;
}
}
@ -5291,7 +5303,7 @@ static SSDataBlock* doAllIntervalAgg(SOperatorInfo* pOperator, bool* newgroup) {
setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
// finalizeQueryResult(pSliceInfo->binfo.pCtx, pOperator->numOfOutput);
// initGroupedResultInfo(&pSliceInfo->groupResInfo, &pSliceInfo->binfo.resultRowInfo);
// initGroupedResultInfo(&pSliceInfo->groupResInfo, &pSliceInfo->binfo.resultRowInfo);
// doBuildResultDatablock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pSliceInfo->pRes);
if (pSliceInfo->binfo.pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pSliceInfo->groupResInfo)) {
@ -5342,7 +5354,7 @@ static SSDataBlock* doSTableIntervalAgg(SOperatorInfo* pOperator, bool* newgroup
finalizeMultiTupleQueryResult(pInfo->binfo.pCtx, pOperator->numOfOutput, pInfo->aggSup.pResultBuf,
&pInfo->binfo.resultRowInfo, pInfo->binfo.rowCellInfoOffset);
// initGroupedResultInfo(&pInfo->groupResInfo, &pInfo->binfo.resultRowInfo);
// initGroupedResultInfo(&pInfo->groupResInfo, &pInfo->binfo.resultRowInfo);
OPTR_SET_OPENED(pOperator);
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
@ -5684,8 +5696,8 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
pAggSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t));
pAggSup->pResultRowHashTable = taosHashInit(10, hashFn, true, HASH_NO_LOCK);
// pAggSup->pResultRowListSet = taosHashInit(100, hashFn, false, HASH_NO_LOCK);
// pAggSup->pResultRowArrayList = taosArrayInit(10, sizeof(SResultRowCell));
// pAggSup->pResultRowListSet = taosHashInit(100, hashFn, false, HASH_NO_LOCK);
// pAggSup->pResultRowArrayList = taosArrayInit(10, sizeof(SResultRowCell));
if (pAggSup->keyBuf == NULL /*|| pAggSup->pResultRowArrayList == NULL || pAggSup->pResultRowListSet == NULL*/ ||
pAggSup->pResultRowHashTable == NULL) {
@ -5703,8 +5715,8 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
static void cleanupAggSup(SAggSupporter* pAggSup) {
taosMemoryFreeClear(pAggSup->keyBuf);
taosHashCleanup(pAggSup->pResultRowHashTable);
// taosHashCleanup(pAggSup->pResultRowListSet);
// taosArrayDestroy(pAggSup->pResultRowArrayList);
// taosHashCleanup(pAggSup->pResultRowListSet);
// taosArrayDestroy(pAggSup->pResultRowArrayList);
destroyDiskbasedBuf(pAggSup->pResultBuf);
}
@ -5715,7 +5727,7 @@ int32_t initAggInfo(SOptrBasicInfo* pBasicInfo, SAggSupporter* pAggSup, SExprInf
doInitAggInfoSup(pAggSup, pBasicInfo->pCtx, numOfCols, keyBufSize, pkey);
for(int32_t i = 0; i < numOfCols; ++i) {
for (int32_t i = 0; i < numOfCols; ++i) {
pBasicInfo->pCtx[i].pBuf = pAggSup->pResultBuf;
}
@ -5732,6 +5744,10 @@ void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows) {
}
static STableQueryInfo* initTableQueryInfo(const STableGroupInfo* pTableGroupInfo) {
if (pTableGroupInfo->numOfTables == 0) {
return NULL;
}
STableQueryInfo* pTableQueryInfo = taosMemoryCalloc(pTableGroupInfo->numOfTables, sizeof(STableQueryInfo));
if (pTableQueryInfo == NULL) {
return NULL;
@ -5757,7 +5773,8 @@ static STableQueryInfo* initTableQueryInfo(const STableGroupInfo* pTableGroupInf
SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResultBlock, SExprInfo* pScalarExprInfo,
int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo, const STableGroupInfo* pTableGroupInfo) {
int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo,
const STableGroupInfo* pTableGroupInfo) {
SAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SAggOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@ -5771,7 +5788,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo*
int32_t code =
initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResultBlock, keyBufSize, pTaskInfo->id.str);
pInfo->pTableQueryInfo = initTableQueryInfo(pTableGroupInfo);
if (code != TSDB_CODE_SUCCESS || pInfo->pTableQueryInfo == NULL) {
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@ -5926,8 +5943,8 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* p
pOperator->pExpr = pExprInfo;
pOperator->numOfOutput = num;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doProjectOperation, NULL, NULL, destroyProjectOperatorInfo,
NULL, NULL, NULL);
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doProjectOperation, NULL, NULL,
destroyProjectOperatorInfo, NULL, NULL, NULL);
pOperator->pTaskInfo = pTaskInfo;
int32_t code = appendDownstream(pOperator, &downstream, 1);
@ -5952,12 +5969,12 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
goto _error;
}
pInfo->order = TSDB_ORDER_ASC;
pInfo->interval = *pInterval;
// pInfo->execModel = OPTR_EXEC_MODEL_STREAM;
pInfo->execModel = pTaskInfo->execModel;
pInfo->win = pTaskInfo->window;
pInfo->twAggSup = *pTwAggSupp;
pInfo->order = TSDB_ORDER_ASC;
pInfo->interval = *pInterval;
// pInfo->execModel = OPTR_EXEC_MODEL_STREAM;
pInfo->execModel = pTaskInfo->execModel;
pInfo->win = pTaskInfo->window;
pInfo->twAggSup = *pTwAggSupp;
pInfo->primaryTsIndex = primaryTsSlotId;
int32_t numOfRows = 4096;
@ -5984,8 +6001,8 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
pOperator->numOfOutput = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, doStreamIntervalAgg, NULL, destroyIntervalOperatorInfo,
aggEncodeResultRow, aggDecodeResultRow, NULL);
pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, doStreamIntervalAgg, NULL,
destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
@ -6004,18 +6021,19 @@ _error:
SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
STimeWindowAggSupp *pTwAggSupp, const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo) {
STimeWindowAggSupp* pTwAggSupp, const STableGroupInfo* pTableGroupInfo,
SExecTaskInfo* pTaskInfo) {
STableIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(STableIntervalOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
pInfo->order = TSDB_ORDER_ASC;
pInfo->interval = *pInterval;
pInfo->execModel = OPTR_EXEC_MODEL_STREAM;
pInfo->win = pTaskInfo->window;
pInfo->twAggSup = *pTwAggSupp;
pInfo->order = TSDB_ORDER_ASC;
pInfo->interval = *pInterval;
pInfo->execModel = OPTR_EXEC_MODEL_STREAM;
pInfo->win = pTaskInfo->window;
pInfo->twAggSup = *pTwAggSupp;
pInfo->primaryTsIndex = primaryTsSlotId;
int32_t numOfRows = 4096;
@ -6042,8 +6060,8 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExpr
pOperator->numOfOutput = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doStreamIntervalAgg, doStreamIntervalAgg, NULL, destroyIntervalOperatorInfo,
aggEncodeResultRow, aggDecodeResultRow, NULL);
pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doStreamIntervalAgg, doStreamIntervalAgg, NULL,
destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
@ -6052,13 +6070,12 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExpr
return pOperator;
_error:
_error:
destroyIntervalOperatorInfo(pInfo, numOfCols);
taosMemoryFreeClear(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
}
SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
@ -6122,8 +6139,8 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf
pOperator->pTaskInfo = pTaskInfo;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStateWindowAgg, NULL, NULL, destroyStateWindowOperatorInfo,
aggEncodeResultRow, aggDecodeResultRow, NULL);
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStateWindowAgg, NULL, NULL,
destroyStateWindowOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
int32_t code = appendDownstream(pOperator, &downstream, 1);
return pOperator;
@ -6168,8 +6185,8 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo
pOperator->numOfOutput = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSessionWindowAgg, NULL, NULL, destroySWindowOperatorInfo,
aggEncodeResultRow, aggDecodeResultRow, NULL);
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSessionWindowAgg, NULL, NULL,
destroySWindowOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
pOperator->pTaskInfo = pTaskInfo;
code = appendDownstream(pOperator, &downstream, 1);
@ -6257,8 +6274,8 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExp
pOperator->numOfOutput = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doFill, NULL, NULL, destroySFillOperatorInfo,
NULL, NULL, NULL);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doFill, NULL, NULL, destroySFillOperatorInfo, NULL, NULL, NULL);
pOperator->pTaskInfo = pTaskInfo;
code = appendDownstream(pOperator, &downstream, 1);
return pOperator;
@ -6269,7 +6286,6 @@ _error:
return NULL;
}
static int32_t getColumnIndexInSource(SQueriedTableInfo* pTableInfo, SExprBasicInfo* pExpr, SColumnInfo* pTagCols) {
int32_t j = 0;
@ -6467,11 +6483,11 @@ static int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableSc
static SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) {
SInterval interval = {
.interval = pTableScanNode->interval,
.sliding = pTableScanNode->sliding,
.interval = pTableScanNode->interval,
.sliding = pTableScanNode->sliding,
.intervalUnit = pTableScanNode->intervalUnit,
.slidingUnit = pTableScanNode->slidingUnit,
.offset = pTableScanNode->offset,
.slidingUnit = pTableScanNode->slidingUnit,
.offset = pTableScanNode->offset,
};
return interval;
@ -6486,7 +6502,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode;
STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
int32_t numOfCols = 0;
int32_t numOfCols = 0;
tsdbReaderT pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableGroupInfo, (uint64_t)queryId, taskId);
if (pDataReader == NULL && terrno != 0) {
return NULL;
@ -6497,14 +6513,15 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
SSDataBlock* pResBlock = createResDataBlock(pScanPhyNode->node.pOutputDataBlockDesc);
SQueryTableDataCond cond = {0};
int32_t code = initQueryTableDataCond(&cond, pTableScanNode);
int32_t code = initQueryTableDataCond(&cond, pTableScanNode);
if (code != TSDB_CODE_SUCCESS) {
return NULL;
}
SInterval interval = extractIntervalInfo(pTableScanNode);
return createTableScanOperatorInfo(pDataReader, &cond, numOfCols, pTableScanNode->dataRequired, pTableScanNode->scanSeq, pColList,
pResBlock, pScanPhyNode->node.pConditions, &interval, pTableScanNode->ratio, pTaskInfo);
return createTableScanOperatorInfo(pDataReader, &cond, numOfCols, pTableScanNode->dataRequired,
pTableScanNode->scanSeq, pColList, pResBlock, pScanPhyNode->node.pConditions,
&interval, pTableScanNode->ratio, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_EXCHANGE == type) {
SExchangePhysiNode* pExchange = (SExchangePhysiNode*)pPhyNode;
SSDataBlock* pResBlock = createResDataBlock(pExchange->node.pOutputDataBlockDesc);
@ -6512,27 +6529,30 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) {
SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode; // simple child table.
int32_t code = doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo, queryId, taskId);
int32_t code = doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo,
queryId, taskId);
SArray* tableIdList = extractTableIdList(pTableGroupInfo);
SSDataBlock* pResBlock = createResDataBlock(pScanPhyNode->node.pOutputDataBlockDesc);
int32_t numOfCols = 0;
SArray* pCols = extractColMatchInfo(pScanPhyNode->pScanCols, pScanPhyNode->node.pOutputDataBlockDesc, &numOfCols);
SOperatorInfo* pOperator = createStreamScanOperatorInfo(pHandle->reader, pResBlock, pCols, tableIdList, pTaskInfo);
SOperatorInfo* pOperator =
createStreamScanOperatorInfo(pHandle->reader, pResBlock, pCols, tableIdList, pTaskInfo);
taosArrayDestroy(tableIdList);
return pOperator;
} else if (QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN == type) {
SSystemTableScanPhysiNode* pSysScanPhyNode = (SSystemTableScanPhysiNode*)pPhyNode;
SScanPhysiNode* pScanNode = &pSysScanPhyNode->scan;
SScanPhysiNode* pScanNode = &pSysScanPhyNode->scan;
SSDataBlock* pResBlock = createResDataBlock(pScanNode->node.pOutputDataBlockDesc);
int32_t numOfOutputCols = 0;
SArray* colList = extractColMatchInfo(pScanNode->pScanCols, pScanNode->node.pOutputDataBlockDesc, &numOfOutputCols);
SArray* colList =
extractColMatchInfo(pScanNode->pScanCols, pScanNode->node.pOutputDataBlockDesc, &numOfOutputCols);
SOperatorInfo* pOperator = createSysTableScanOperatorInfo(
pHandle, pResBlock, &pScanNode->tableName, pScanNode->node.pConditions, pSysScanPhyNode->mgmtEpSet,
colList, pTaskInfo, pSysScanPhyNode->showRewrite, pSysScanPhyNode->accountId);
pHandle, pResBlock, &pScanNode->tableName, pScanNode->node.pConditions, pSysScanPhyNode->mgmtEpSet, colList,
pTaskInfo, pSysScanPhyNode->showRewrite, pSysScanPhyNode->accountId);
return pOperator;
} else {
ASSERT(0);
@ -6649,8 +6669,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
return pOptr;
}
static int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode) {
static int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode) {
pCond->loadExternalRows = false;
pCond->order = pTableScanNode->scanSeq[0] > 0 ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
@ -6664,7 +6683,7 @@ static int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableS
pCond->twindow = pTableScanNode->scanRange;
#if 1
//todo work around a problem, remove it later
// todo work around a problem, remove it later
if ((pCond->order == TSDB_ORDER_ASC && pCond->twindow.skey > pCond->twindow.ekey) ||
(pCond->order == TSDB_ORDER_DESC && pCond->twindow.skey < pCond->twindow.ekey)) {
TSWAP(pCond->twindow.skey, pCond->twindow.ekey);
@ -6708,22 +6727,22 @@ SArray* extractColumnInfo(SNodeList* pNodeList) {
// todo extract method
SColumn c = {0};
c.slotId = pColNode->slotId;
c.colId = pColNode->colId;
c.type = pColNode->node.resType.type;
c.bytes = pColNode->node.resType.bytes;
c.scale = pColNode->node.resType.scale;
c.slotId = pColNode->slotId;
c.colId = pColNode->colId;
c.type = pColNode->node.resType.type;
c.bytes = pColNode->node.resType.bytes;
c.scale = pColNode->node.resType.scale;
c.precision = pColNode->node.resType.precision;
taosArrayPush(pList, &c);
} else if (nodeType(pNode->pExpr) == QUERY_NODE_VALUE) {
SValueNode* pValNode = (SValueNode*) pNode->pExpr;
SColumn c = {0};
SValueNode* pValNode = (SValueNode*)pNode->pExpr;
SColumn c = {0};
c.slotId = pNode->slotId;
c.colId = pNode->slotId;
c.type = pValNode->node.type;
c.bytes = pValNode->node.resType.bytes;
c.scale = pValNode->node.resType.scale;
c.colId = pNode->slotId;
c.type = pValNode->node.type;
c.bytes = pValNode->node.resType.bytes;
c.scale = pValNode->node.resType.scale;
c.precision = pValNode->node.resType.precision;
taosArrayPush(pList, &c);
@ -6906,8 +6925,10 @@ tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle*
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
#if 0
return tsdbQueryTables(pHandle->reader, &cond, pTableGroupInfo, queryId, taskId);
#endif
return tsdbQueryTables(pHandle->vnode, &cond, pTableGroupInfo, queryId, taskId);
_error:
terrno = code;
@ -6925,7 +6946,8 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead
goto _complete;
}
(*pTaskInfo)->pRoot = createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, &(*pTaskInfo)->tableqinfoGroupInfo);
(*pTaskInfo)->pRoot =
createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId, &(*pTaskInfo)->tableqinfoGroupInfo);
if (NULL == (*pTaskInfo)->pRoot) {
code = terrno;
goto _complete;
@ -7263,8 +7285,8 @@ SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOf
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doMergeJoin, NULL, NULL, destroyBasicOperatorInfo,
NULL, NULL, NULL);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doMergeJoin, NULL, NULL, destroyBasicOperatorInfo, NULL, NULL, NULL);
int32_t code = appendDownstream(pOperator, pDownstream, numOfDownstream);
return pOperator;

View File

@ -13,7 +13,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <common/ttime.h>
#include "common/ttime.h"
#include "filter.h"
#include "function.h"
#include "functionMgt.h"
@ -21,7 +21,7 @@
#include "querynodes.h"
#include "tglobal.h"
#include "tname.h"
#include "vnode.h"
#include "systable.h"
#include "tdatablock.h"
#include "tmsg.h"
@ -36,6 +36,9 @@
#define SET_REVERSE_SCAN_FLAG(_info) ((_info)->scanFlag = REVERSE_SCAN)
#define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC))
int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo);
int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, const char* dbName);
void switchCtxOrder(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
for (int32_t i = 0; i < numOfOutput; ++i) {
SWITCH_ORDER(pCtx[i].order);
@ -65,21 +68,6 @@ static void setupQueryRangeForReverseScan(STableScanInfo* pTableScanInfo) {
#endif
}
// relocated the column data according to the slotId
static void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray* pCols) {
int32_t numOfCols = pBlock->info.numOfCols;
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* p = taosArrayGet(pCols, i);
SColMatchInfo* pmInfo = taosArrayGet(pColMatchInfo, i);
if (!pmInfo->output) {
continue;
}
ASSERT(pmInfo->colId == p->info.colId);
taosArraySet(pBlock->pDataBlock, pmInfo->targetSlotId, p);
}
}
static void getNextTimeWindow(SInterval* pInterval, STimeWindow* tw, int32_t order) {
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(order);
if (pInterval->intervalUnit != 'n' && pInterval->intervalUnit != 'y') {
@ -244,11 +232,6 @@ int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo,
}
relocateColumnData(pBlock, pTableScanInfo->pColMatchInfo, pCols);
// reset the block to be 0 by default, this blockId is assigned by physical plan and is used by direct upstream
// operator.
pBlock->info.blockId = 0;
doFilter(pTableScanInfo->pFilterNode, pBlock);
if (pBlock->info.rows == 0) {
pCost->filterOutBlocks += 1;
@ -788,7 +771,12 @@ static SSDataBlock* doFilterResult(SSysTableScanInfo* pInfo) {
continue;
}
colDataAppend(pDest, numOfRow, colDataGetData(pSrc, j), false);
if (colDataIsNull_s(pSrc, j)) {
colDataAppendNULL(pDest, numOfRow);
} else {
colDataAppend(pDest, numOfRow, colDataGetData(pSrc, j), false);
}
numOfRow += 1;
}
} else {
@ -802,6 +790,67 @@ static SSDataBlock* doFilterResult(SSysTableScanInfo* pInfo) {
return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
}
static SSDataBlock* buildSysTableMetaBlock() {
SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
pBlock->info.numOfCols = 10;
pBlock->info.hasVarCol = true;
pBlock->pDataBlock = taosArrayInit(pBlock->info.numOfCols, sizeof(SColumnInfoData));
SColumnInfoData colInfoData = {0};
colInfoData.info.colId = 1;
colInfoData.info.type = TSDB_DATA_TYPE_VARCHAR;
colInfoData.info.bytes = (TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE;
taosArrayPush(pBlock->pDataBlock, &colInfoData);
colInfoData.info.colId = 2;
colInfoData.info.type = TSDB_DATA_TYPE_VARCHAR;
colInfoData.info.bytes = (TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE;
taosArrayPush(pBlock->pDataBlock, &colInfoData);
colInfoData.info.colId = 3;
colInfoData.info.type = TSDB_DATA_TYPE_TIMESTAMP;
colInfoData.info.bytes = 8;
taosArrayPush(pBlock->pDataBlock, &colInfoData);
colInfoData.info.colId = 4;
colInfoData.info.type = TSDB_DATA_TYPE_INT;
colInfoData.info.bytes = 4;
taosArrayPush(pBlock->pDataBlock, &colInfoData);
colInfoData.info.colId = 5;
colInfoData.info.type = TSDB_DATA_TYPE_VARCHAR;
colInfoData.info.bytes = (TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE;
taosArrayPush(pBlock->pDataBlock, &colInfoData);
colInfoData.info.colId = 6;
colInfoData.info.type = TSDB_DATA_TYPE_BIGINT;
colInfoData.info.bytes = 8;
taosArrayPush(pBlock->pDataBlock, &colInfoData);
colInfoData.info.colId = 7;
colInfoData.info.type = TSDB_DATA_TYPE_INT;
colInfoData.info.bytes = 4;
taosArrayPush(pBlock->pDataBlock, &colInfoData);
colInfoData.info.colId = 8;
colInfoData.info.type = TSDB_DATA_TYPE_INT;
colInfoData.info.bytes = 4;
taosArrayPush(pBlock->pDataBlock, &colInfoData);
colInfoData.info.colId = 9;
colInfoData.info.type = TSDB_DATA_TYPE_VARCHAR;
colInfoData.info.bytes = 512 + VARSTR_HEADER_SIZE;
taosArrayPush(pBlock->pDataBlock, &colInfoData);
colInfoData.info.colId = 10;
colInfoData.info.type = TSDB_DATA_TYPE_VARCHAR;
colInfoData.info.bytes = 20 + VARSTR_HEADER_SIZE;
taosArrayPush(pBlock->pDataBlock, &colInfoData);
return pBlock;
}
static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator, bool* newgroup) {
// build message and send to mnode to fetch the content of system tables.
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@ -810,115 +859,135 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator, bool* newgroup) {
// retrieve local table list info from vnode
const char* name = tNameGetTableName(&pInfo->name);
if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0) {
if (pInfo->pCur == NULL) {
pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta);
}
// the retrieve is executed on the mnode, so return tables that belongs to the information schema database.
if (pInfo->readHandle.mnd != NULL) {
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
blockDataCleanup(pInfo->pRes);
buildSysDbTableInfo(pInfo);
int32_t tableNameSlotId = 1;
SColumnInfoData* pTableNameCol = taosArrayGet(pInfo->pRes->pDataBlock, tableNameSlotId);
doFilterResult(pInfo);
pInfo->loadInfo.totalRows += pInfo->pRes->info.rows;
int32_t numOfRows = 0;
pOperator->status = OP_EXEC_DONE;
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
} else {
if (pInfo->pCur == NULL) {
pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta);
}
const char* db = NULL;
int32_t vgId = 0;
vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId);
blockDataCleanup(pInfo->pRes);
SName sn = {0};
char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
tNameFromString(&sn, db, T_NAME_ACCT|T_NAME_DB);
int32_t numOfRows = 0;
tNameGetDbName(&sn, varDataVal(dbname));
varDataSetLen(dbname, strlen(varDataVal(dbname)));
const char* db = NULL;
int32_t vgId = 0;
vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId);
char n[TSDB_TABLE_NAME_LEN] = {0};
while (metaTbCursorNext(pInfo->pCur) == 0) {
STR_TO_VARSTR(n, pInfo->pCur->mr.me.name);
colDataAppend(pTableNameCol, numOfRows, n, false);
SName sn = {0};
char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB);
int32_t tableType = pInfo->pCur->mr.me.type;
tNameGetDbName(&sn, varDataVal(dbname));
varDataSetLen(dbname, strlen(varDataVal(dbname)));
// database name
SColumnInfoData* pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 0);
colDataAppend(pColInfoData, numOfRows, dbname, false);
SSDataBlock* p = buildSysTableMetaBlock();
blockDataEnsureCapacity(p, pInfo->capacity);
// vgId
pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 6);
colDataAppend(pColInfoData, numOfRows, (char*) &vgId, false);
char n[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
while (metaTbCursorNext(pInfo->pCur) == 0) {
STR_TO_VARSTR(n, pInfo->pCur->mr.me.name);
// table comment
// todo: set the correct comment
pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 8);
colDataAppendNULL(pColInfoData, numOfRows);
// table name
SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0);
colDataAppend(pColInfoData, numOfRows, n, false);
char str[256] = {0};
if (tableType == TSDB_CHILD_TABLE) {
SMetaReader mr = {0};
metaReaderInit(&mr, pInfo->readHandle.meta, 0);
metaGetTableEntryByUid(&mr, pInfo->pCur->mr.me.ctbEntry.suid);
// database name
pColInfoData = taosArrayGet(p->pDataBlock, 1);
colDataAppend(pColInfoData, numOfRows, dbname, false);
pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 3);
colDataAppend(pColInfoData, numOfRows, (char*) &mr.me.stbEntry.schema.nCols, false);
// vgId
pColInfoData = taosArrayGet(p->pDataBlock, 6);
colDataAppend(pColInfoData, numOfRows, (char*)&vgId, false);
// create time
int64_t ts = pInfo->pCur->mr.me.ctbEntry.ctime;
pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 2);
colDataAppend(pColInfoData, numOfRows, (char*) &ts, false);
// super table name
STR_TO_VARSTR(str, mr.me.name);
pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 4);
colDataAppend(pColInfoData, numOfRows, str, false);
metaReaderClear(&mr);
// uid
pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 5);
colDataAppend(pColInfoData, numOfRows, (char*) &pInfo->pCur->mr.me.uid, false);
// ttl
pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 7);
colDataAppend(pColInfoData, numOfRows, (char*) &pInfo->pCur->mr.me.ctbEntry.ttlDays, false);
STR_TO_VARSTR(str, "CHILD_TABLE");
} else if (tableType == TSDB_NORMAL_TABLE) {
// create time
pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 2);
colDataAppend(pColInfoData, numOfRows, (char*) &pInfo->pCur->mr.me.ntbEntry.ctime, false);
// number of columns
pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 3);
colDataAppend(pColInfoData, numOfRows, (char*) &pInfo->pCur->mr.me.ntbEntry.schema.nCols, false);
// super table name
pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 4);
// table comment
// todo: set the correct comment
pColInfoData = taosArrayGet(p->pDataBlock, 8);
colDataAppendNULL(pColInfoData, numOfRows);
// uid
pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 5);
colDataAppend(pColInfoData, numOfRows, (char*) &pInfo->pCur->mr.me.uid, false);
char str[256] = {0};
int32_t tableType = pInfo->pCur->mr.me.type;
if (tableType == TSDB_CHILD_TABLE) {
// create time
int64_t ts = pInfo->pCur->mr.me.ctbEntry.ctime;
pColInfoData = taosArrayGet(p->pDataBlock, 2);
colDataAppend(pColInfoData, numOfRows, (char*)&ts, false);
// ttl
pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 7);
colDataAppend(pColInfoData, numOfRows, (char*) &pInfo->pCur->mr.me.ntbEntry.ttlDays, false);
SMetaReader mr = {0};
metaReaderInit(&mr, pInfo->readHandle.meta, 0);
metaGetTableEntryByUid(&mr, pInfo->pCur->mr.me.ctbEntry.suid);
STR_TO_VARSTR(str, "NORMAL_TABLE");
// number of columns
pColInfoData = taosArrayGet(p->pDataBlock, 3);
colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.stbEntry.schema.nCols, false);
// super table name
STR_TO_VARSTR(str, mr.me.name);
pColInfoData = taosArrayGet(p->pDataBlock, 4);
colDataAppend(pColInfoData, numOfRows, str, false);
metaReaderClear(&mr);
// uid
pColInfoData = taosArrayGet(p->pDataBlock, 5);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.uid, false);
// ttl
pColInfoData = taosArrayGet(p->pDataBlock, 7);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ctbEntry.ttlDays, false);
STR_TO_VARSTR(str, "CHILD_TABLE");
} else if (tableType == TSDB_NORMAL_TABLE) {
// create time
pColInfoData = taosArrayGet(p->pDataBlock, 2);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ctime, false);
// number of columns
pColInfoData = taosArrayGet(p->pDataBlock, 3);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.schema.nCols, false);
// super table name
pColInfoData = taosArrayGet(p->pDataBlock, 4);
colDataAppendNULL(pColInfoData, numOfRows);
// uid
pColInfoData = taosArrayGet(p->pDataBlock, 5);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.uid, false);
// ttl
pColInfoData = taosArrayGet(p->pDataBlock, 7);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ttlDays, false);
STR_TO_VARSTR(str, "NORMAL_TABLE");
}
pColInfoData = taosArrayGet(p->pDataBlock, 9);
colDataAppend(pColInfoData, numOfRows, str, false);
if (++numOfRows >= pInfo->capacity) {
break;
}
}
pColInfoData = taosArrayGet(pInfo->pRes->pDataBlock, 9);
colDataAppend(pColInfoData, numOfRows, str, false);
p->info.rows = numOfRows;
pInfo->pRes->info.rows = numOfRows;
if (++numOfRows >= pInfo->capacity) {
break;
}
relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock);
doFilterResult(pInfo);
pInfo->loadInfo.totalRows += pInfo->pRes->info.rows;
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
}
pInfo->loadInfo.totalRows += numOfRows;
pInfo->pRes->info.rows = numOfRows;
// pInfo->elapsedTime;
// pInfo->totalBytes;
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
} else { // load the meta from mnode of the given epset
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
@ -988,6 +1057,67 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator, bool* newgroup) {
}
}
int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo) {
SSDataBlock* p = buildSysTableMetaBlock();
blockDataEnsureCapacity(p, pInfo->capacity);
size_t size = 0;
const SSysTableMeta* pSysDbTableMeta = NULL;
getInfosDbMeta(&pSysDbTableMeta, &size);
p->info.rows = buildDbTableInfoBlock(p, pSysDbTableMeta, size, TSDB_INFORMATION_SCHEMA_DB);
getPerfDbMeta(&pSysDbTableMeta, &size);
p->info.rows = buildDbTableInfoBlock(p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB);
relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock);
// blockDataDestroy(p); todo handle memory leak
pInfo->pRes->info.rows = p->info.rows;
return p->info.rows;
}
int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size, const char* dbName) {
char n[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t numOfRows = p->info.rows;
for(int32_t i = 0; i < size; ++i) {
const SSysTableMeta* pm = &pSysDbTableMeta[i];
SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0);
STR_TO_VARSTR(n, pm->name);
colDataAppend(pColInfoData, numOfRows, n, false);
// database name
STR_TO_VARSTR(n, dbName);
pColInfoData = taosArrayGet(p->pDataBlock, 1);
colDataAppend(pColInfoData, numOfRows, n, false);
// create time
pColInfoData = taosArrayGet(p->pDataBlock, 2);
colDataAppendNULL(pColInfoData, numOfRows);
// number of columns
pColInfoData = taosArrayGet(p->pDataBlock, 3);
colDataAppend(pColInfoData, numOfRows, (char*)&pm->colNum, false);
for(int32_t j = 4; j <= 8; ++j) {
pColInfoData = taosArrayGet(p->pDataBlock, j);
colDataAppendNULL(pColInfoData, numOfRows);
}
STR_TO_VARSTR(n, "SYSTEM_TABLE");
pColInfoData = taosArrayGet(p->pDataBlock, 9);
colDataAppend(pColInfoData, numOfRows, n, false);
numOfRows += 1;
}
return numOfRows;
}
SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSDataBlock* pResBlock, const SName* pName,
SNode* pCondition, SEpSet epset, SArray* colList,
SExecTaskInfo* pTaskInfo, bool showRewrite, int32_t accountId) {

View File

@ -68,6 +68,11 @@ bool getTopBotFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv);
int32_t topFunction(SqlFunctionCtx *pCtx);
int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
bool getSpreadFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
bool spreadFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
int32_t spreadFunction(SqlFunctionCtx* pCtx);
int32_t spreadFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
#ifdef __cplusplus
}
#endif

View File

@ -212,7 +212,16 @@ static int32_t translateBottom(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
// todo
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_NUMERIC_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
pFunc->node.resType = (SDataType) { .bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE };
return TSDB_CODE_SUCCESS;
}
@ -431,435 +440,541 @@ static int32_t translateToJson(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{.name = "count",
.type = FUNCTION_TYPE_COUNT,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED,
.translateFunc = translateCount,
.dataRequiredFunc = countDataRequired,
.getEnvFunc = getCountFuncEnv,
.initFunc = functionSetup,
.processFunc = countFunction,
.finalizeFunc = functionFinalize},
{.name = "sum",
.type = FUNCTION_TYPE_SUM,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED,
.translateFunc = translateSum,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getSumFuncEnv,
.initFunc = functionSetup,
.processFunc = sumFunction,
.finalizeFunc = functionFinalize},
{.name = "min",
.type = FUNCTION_TYPE_MIN,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED,
.translateFunc = translateInOutNum,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = minFunctionSetup,
.processFunc = minFunction,
.finalizeFunc = functionFinalize},
{.name = "max",
.type = FUNCTION_TYPE_MAX,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED,
.translateFunc = translateInOutNum,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = maxFunctionSetup,
.processFunc = maxFunction,
.finalizeFunc = functionFinalize},
{.name = "stddev",
.type = FUNCTION_TYPE_STDDEV,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = getStddevFuncEnv,
.initFunc = stddevFunctionSetup,
.processFunc = stddevFunction,
.finalizeFunc = stddevFinalize},
{.name = "avg",
.type = FUNCTION_TYPE_AVG,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = getAvgFuncEnv,
.initFunc = avgFunctionSetup,
.processFunc = avgFunction,
.finalizeFunc = avgFinalize},
{.name = "percentile",
.type = FUNCTION_TYPE_PERCENTILE,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translatePercentile,
.getEnvFunc = getPercentileFuncEnv,
.initFunc = percentileFunctionSetup,
.processFunc = percentileFunction,
.finalizeFunc = percentileFinalize},
{.name = "apercentile",
.type = FUNCTION_TYPE_APERCENTILE,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateApercentile,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = maxFunctionSetup,
.processFunc = maxFunction,
.finalizeFunc = functionFinalize},
{
.name = "top",
.type = FUNCTION_TYPE_TOP,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateTop,
.getEnvFunc = getTopBotFuncEnv,
.initFunc = functionSetup,
.processFunc = topFunction,
.finalizeFunc = topBotFinalize,
},
{.name = "bottom",
.type = FUNCTION_TYPE_BOTTOM,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateBottom,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = maxFunctionSetup,
.processFunc = maxFunction,
.finalizeFunc = functionFinalize},
{.name = "spread",
.type = FUNCTION_TYPE_SPREAD,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateSpread,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = maxFunctionSetup,
.processFunc = maxFunction,
.finalizeFunc = functionFinalize},
{.name = "last_row",
.type = FUNCTION_TYPE_LAST_ROW,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC,
.translateFunc = translateLastRow,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = maxFunctionSetup,
.processFunc = maxFunction,
.finalizeFunc = functionFinalize},
{.name = "first",
.type = FUNCTION_TYPE_FIRST,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC,
.translateFunc = translateFirstLast,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
.processFunc = firstFunction,
.finalizeFunc = functionFinalize},
{.name = "last",
.type = FUNCTION_TYPE_LAST,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC,
.translateFunc = translateFirstLast,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
.processFunc = lastFunction,
.finalizeFunc = functionFinalize},
{.name = "diff",
.type = FUNCTION_TYPE_DIFF,
.classification = FUNC_MGT_NONSTANDARD_SQL_FUNC,
.translateFunc = translateInOutNum,
.getEnvFunc = getDiffFuncEnv,
.initFunc = diffFunctionSetup,
.processFunc = diffFunction,
.finalizeFunc = functionFinalize},
{.name = "abs",
.type = FUNCTION_TYPE_ABS,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInOutNum,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = absFunction,
.finalizeFunc = NULL},
{.name = "log",
.type = FUNCTION_TYPE_LOG,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateIn2NumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = logFunction,
.finalizeFunc = NULL},
{.name = "pow",
.type = FUNCTION_TYPE_POW,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateIn2NumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = powFunction,
.finalizeFunc = NULL},
{.name = "sqrt",
.type = FUNCTION_TYPE_SQRT,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = sqrtFunction,
.finalizeFunc = NULL},
{.name = "ceil",
.type = FUNCTION_TYPE_CEIL,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInOutNum,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = ceilFunction,
.finalizeFunc = NULL},
{.name = "floor",
.type = FUNCTION_TYPE_FLOOR,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInOutNum,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = floorFunction,
.finalizeFunc = NULL},
{.name = "round",
.type = FUNCTION_TYPE_ROUND,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInOutNum,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = roundFunction,
.finalizeFunc = NULL},
{.name = "sin",
.type = FUNCTION_TYPE_SIN,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = sinFunction,
.finalizeFunc = NULL},
{.name = "cos",
.type = FUNCTION_TYPE_COS,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = cosFunction,
.finalizeFunc = NULL},
{.name = "tan",
.type = FUNCTION_TYPE_TAN,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = tanFunction,
.finalizeFunc = NULL},
{.name = "asin",
.type = FUNCTION_TYPE_ASIN,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = asinFunction,
.finalizeFunc = NULL},
{.name = "acos",
.type = FUNCTION_TYPE_ACOS,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = acosFunction,
.finalizeFunc = NULL},
{.name = "atan",
.type = FUNCTION_TYPE_ATAN,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = atanFunction,
.finalizeFunc = NULL},
{.name = "length",
.type = FUNCTION_TYPE_LENGTH,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateLength,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = lengthFunction,
.finalizeFunc = NULL},
{.name = "char_length",
.type = FUNCTION_TYPE_CHAR_LENGTH,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateLength,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = charLengthFunction,
.finalizeFunc = NULL},
{.name = "concat",
.type = FUNCTION_TYPE_CONCAT,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateConcat,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = concatFunction,
.finalizeFunc = NULL},
{.name = "concat_ws",
.type = FUNCTION_TYPE_CONCAT_WS,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateConcatWs,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = concatWsFunction,
.finalizeFunc = NULL},
{.name = "lower",
.type = FUNCTION_TYPE_LOWER,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateInOutStr,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = lowerFunction,
.finalizeFunc = NULL},
{.name = "upper",
.type = FUNCTION_TYPE_UPPER,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateInOutStr,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = upperFunction,
.finalizeFunc = NULL},
{.name = "ltrim",
.type = FUNCTION_TYPE_LTRIM,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateInOutStr,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = ltrimFunction,
.finalizeFunc = NULL},
{.name = "rtrim",
.type = FUNCTION_TYPE_RTRIM,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateInOutStr,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = rtrimFunction,
.finalizeFunc = NULL},
{.name = "substr",
.type = FUNCTION_TYPE_SUBSTR,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateSubstr,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = substrFunction,
.finalizeFunc = NULL},
{.name = "cast",
.type = FUNCTION_TYPE_CAST,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateCast,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = castFunction,
.finalizeFunc = NULL},
{.name = "to_iso8601",
.type = FUNCTION_TYPE_TO_ISO8601,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateToIso8601,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = toISO8601Function,
.finalizeFunc = NULL},
{.name = "to_unixtimestamp",
.type = FUNCTION_TYPE_TO_UNIXTIMESTAMP,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateToUnixtimestamp,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = toUnixtimestampFunction,
.finalizeFunc = NULL},
{.name = "timetruncate",
.type = FUNCTION_TYPE_TIMETRUNCATE,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateTimeTruncate,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = timeTruncateFunction,
.finalizeFunc = NULL},
{.name = "timediff",
.type = FUNCTION_TYPE_TIMEDIFF,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateTimeDiff,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = timeDiffFunction,
.finalizeFunc = NULL},
{.name = "now",
.type = FUNCTION_TYPE_NOW,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_DATETIME_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = nowFunction,
.finalizeFunc = NULL},
{.name = "today",
.type = FUNCTION_TYPE_TODAY,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_DATETIME_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = todayFunction,
.finalizeFunc = NULL},
{.name = "timezone",
.type = FUNCTION_TYPE_TIMEZONE,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateTimezone,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = timezoneFunction,
.finalizeFunc = NULL},
{.name = "_rowts",
.type = FUNCTION_TYPE_ROWTS,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,
.sprocessFunc = NULL,
.finalizeFunc = NULL},
{.name = "tbname",
.type = FUNCTION_TYPE_TBNAME,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_SCAN_PC_FUNC,
.translateFunc = translateTbnameColumn,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = NULL,
.finalizeFunc = NULL},
{.name = "_qstartts",
.type = FUNCTION_TYPE_QSTARTTS,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_WINDOW_PC_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,
.sprocessFunc = qStartTsFunction,
.finalizeFunc = NULL},
{.name = "_qendts",
.type = FUNCTION_TYPE_QENDTS,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_WINDOW_PC_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,
.sprocessFunc = qEndTsFunction,
.finalizeFunc = NULL},
{.name = "_wstartts",
.type = FUNCTION_TYPE_WSTARTTS,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_WINDOW_PC_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,
.sprocessFunc = winStartTsFunction,
.finalizeFunc = NULL},
{.name = "_wendts",
.type = FUNCTION_TYPE_WENDTS,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_WINDOW_PC_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,
.sprocessFunc = winEndTsFunction,
.finalizeFunc = NULL},
{.name = "_wduration",
.type = FUNCTION_TYPE_WDURATION,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_WINDOW_PC_FUNC,
.translateFunc = translateWduration,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,
.sprocessFunc = winDurFunction,
.finalizeFunc = NULL},
{.name = "to_json",
.type = FUNCTION_TYPE_TO_JSON,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateToJson,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = toJsonFunction,
.finalizeFunc = NULL}};
{
.name = "count",
.type = FUNCTION_TYPE_COUNT,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED,
.translateFunc = translateCount,
.dataRequiredFunc = countDataRequired,
.getEnvFunc = getCountFuncEnv,
.initFunc = functionSetup,
.processFunc = countFunction,
.finalizeFunc = functionFinalize
},
{
.name = "sum",
.type = FUNCTION_TYPE_SUM,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED,
.translateFunc = translateSum,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getSumFuncEnv,
.initFunc = functionSetup,
.processFunc = sumFunction,
.finalizeFunc = functionFinalize
},
{
.name = "min",
.type = FUNCTION_TYPE_MIN,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED,
.translateFunc = translateInOutNum,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = minFunctionSetup,
.processFunc = minFunction,
.finalizeFunc = functionFinalize
},
{
.name = "max",
.type = FUNCTION_TYPE_MAX,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED,
.translateFunc = translateInOutNum,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = maxFunctionSetup,
.processFunc = maxFunction,
.finalizeFunc = functionFinalize
},
{
.name = "stddev",
.type = FUNCTION_TYPE_STDDEV,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = getStddevFuncEnv,
.initFunc = stddevFunctionSetup,
.processFunc = stddevFunction,
.finalizeFunc = stddevFinalize
},
{
.name = "avg",
.type = FUNCTION_TYPE_AVG,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = getAvgFuncEnv,
.initFunc = avgFunctionSetup,
.processFunc = avgFunction,
.finalizeFunc = avgFinalize
},
{
.name = "percentile",
.type = FUNCTION_TYPE_PERCENTILE,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translatePercentile,
.getEnvFunc = getPercentileFuncEnv,
.initFunc = percentileFunctionSetup,
.processFunc = percentileFunction,
.finalizeFunc = percentileFinalize
},
{
.name = "apercentile",
.type = FUNCTION_TYPE_APERCENTILE,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateApercentile,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = maxFunctionSetup,
.processFunc = maxFunction,
.finalizeFunc = functionFinalize
},
{
.name = "top",
.type = FUNCTION_TYPE_TOP,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateTop,
.getEnvFunc = getTopBotFuncEnv,
.initFunc = functionSetup,
.processFunc = topFunction,
.finalizeFunc = topBotFinalize,
},
{
.name = "bottom",
.type = FUNCTION_TYPE_BOTTOM,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateBottom,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = maxFunctionSetup,
.processFunc = maxFunction,
.finalizeFunc = functionFinalize
},
{
.name = "spread",
.type = FUNCTION_TYPE_SPREAD,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateSpread,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getSpreadFuncEnv,
.initFunc = spreadFunctionSetup,
.processFunc = spreadFunction,
.finalizeFunc = spreadFinalize
},
{
.name = "last_row",
.type = FUNCTION_TYPE_LAST_ROW,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC,
.translateFunc = translateLastRow,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = maxFunctionSetup,
.processFunc = maxFunction,
.finalizeFunc = functionFinalize
},
{
.name = "first",
.type = FUNCTION_TYPE_FIRST,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC,
.translateFunc = translateFirstLast,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
.processFunc = firstFunction,
.finalizeFunc = functionFinalize
},
{
.name = "last",
.type = FUNCTION_TYPE_LAST,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC,
.translateFunc = translateFirstLast,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
.processFunc = lastFunction,
.finalizeFunc = functionFinalize
},
{
.name = "diff",
.type = FUNCTION_TYPE_DIFF,
.classification = FUNC_MGT_NONSTANDARD_SQL_FUNC,
.translateFunc = translateInOutNum,
.getEnvFunc = getDiffFuncEnv,
.initFunc = diffFunctionSetup,
.processFunc = diffFunction,
.finalizeFunc = functionFinalize
},
{
.name = "abs",
.type = FUNCTION_TYPE_ABS,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInOutNum,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = absFunction,
.finalizeFunc = NULL
},
{
.name = "log",
.type = FUNCTION_TYPE_LOG,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateIn2NumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = logFunction,
.finalizeFunc = NULL
},
{
.name = "pow",
.type = FUNCTION_TYPE_POW,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateIn2NumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = powFunction,
.finalizeFunc = NULL
},
{
.name = "sqrt",
.type = FUNCTION_TYPE_SQRT,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = sqrtFunction,
.finalizeFunc = NULL
},
{
.name = "ceil",
.type = FUNCTION_TYPE_CEIL,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInOutNum,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = ceilFunction,
.finalizeFunc = NULL
},
{
.name = "floor",
.type = FUNCTION_TYPE_FLOOR,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInOutNum,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = floorFunction,
.finalizeFunc = NULL
},
{
.name = "round",
.type = FUNCTION_TYPE_ROUND,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInOutNum,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = roundFunction,
.finalizeFunc = NULL
},
{
.name = "sin",
.type = FUNCTION_TYPE_SIN,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = sinFunction,
.finalizeFunc = NULL
},
{
.name = "cos",
.type = FUNCTION_TYPE_COS,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = cosFunction,
.finalizeFunc = NULL
},
{
.name = "tan",
.type = FUNCTION_TYPE_TAN,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = tanFunction,
.finalizeFunc = NULL
},
{
.name = "asin",
.type = FUNCTION_TYPE_ASIN,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = asinFunction,
.finalizeFunc = NULL
},
{
.name = "acos",
.type = FUNCTION_TYPE_ACOS,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = acosFunction,
.finalizeFunc = NULL
},
{
.name = "atan",
.type = FUNCTION_TYPE_ATAN,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateInNumOutDou,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = atanFunction,
.finalizeFunc = NULL
},
{
.name = "length",
.type = FUNCTION_TYPE_LENGTH,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateLength,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = lengthFunction,
.finalizeFunc = NULL
},
{
.name = "char_length",
.type = FUNCTION_TYPE_CHAR_LENGTH,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateLength,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = charLengthFunction,
.finalizeFunc = NULL
},
{
.name = "concat",
.type = FUNCTION_TYPE_CONCAT,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateConcat,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = concatFunction,
.finalizeFunc = NULL
},
{
.name = "concat_ws",
.type = FUNCTION_TYPE_CONCAT_WS,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateConcatWs,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = concatWsFunction,
.finalizeFunc = NULL
},
{
.name = "lower",
.type = FUNCTION_TYPE_LOWER,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateInOutStr,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = lowerFunction,
.finalizeFunc = NULL
},
{
.name = "upper",
.type = FUNCTION_TYPE_UPPER,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateInOutStr,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = upperFunction,
.finalizeFunc = NULL
},
{
.name = "ltrim",
.type = FUNCTION_TYPE_LTRIM,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateInOutStr,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = ltrimFunction,
.finalizeFunc = NULL
},
{
.name = "rtrim",
.type = FUNCTION_TYPE_RTRIM,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateInOutStr,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = rtrimFunction,
.finalizeFunc = NULL
},
{
.name = "substr",
.type = FUNCTION_TYPE_SUBSTR,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
.translateFunc = translateSubstr,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = substrFunction,
.finalizeFunc = NULL
},
{
.name = "cast",
.type = FUNCTION_TYPE_CAST,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateCast,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = castFunction,
.finalizeFunc = NULL
},
{
.name = "to_iso8601",
.type = FUNCTION_TYPE_TO_ISO8601,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateToIso8601,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = toISO8601Function,
.finalizeFunc = NULL
},
{
.name = "to_unixtimestamp",
.type = FUNCTION_TYPE_TO_UNIXTIMESTAMP,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateToUnixtimestamp,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = toUnixtimestampFunction,
.finalizeFunc = NULL
},
{
.name = "timetruncate",
.type = FUNCTION_TYPE_TIMETRUNCATE,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateTimeTruncate,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = timeTruncateFunction,
.finalizeFunc = NULL
},
{
.name = "timediff",
.type = FUNCTION_TYPE_TIMEDIFF,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateTimeDiff,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = timeDiffFunction,
.finalizeFunc = NULL
},
{
.name = "now",
.type = FUNCTION_TYPE_NOW,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_DATETIME_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = nowFunction,
.finalizeFunc = NULL
},
{
.name = "today",
.type = FUNCTION_TYPE_TODAY,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_DATETIME_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = todayFunction,
.finalizeFunc = NULL
},
{
.name = "timezone",
.type = FUNCTION_TYPE_TIMEZONE,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateTimezone,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = timezoneFunction,
.finalizeFunc = NULL
},
{
.name = "_rowts",
.type = FUNCTION_TYPE_ROWTS,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,
.sprocessFunc = NULL,
.finalizeFunc = NULL
},
{
.name = "tbname",
.type = FUNCTION_TYPE_TBNAME,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC,
.translateFunc = translateTbnameColumn,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = NULL,
.finalizeFunc = NULL
},
{
.name = "_qstartts",
.type = FUNCTION_TYPE_QSTARTTS,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_WINDOW_PC_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,
.sprocessFunc = qStartTsFunction,
.finalizeFunc = NULL
},
{
.name = "_qendts",
.type = FUNCTION_TYPE_QENDTS,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_WINDOW_PC_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,
.sprocessFunc = qEndTsFunction,
.finalizeFunc = NULL
},
{
.name = "_wstartts",
.type = FUNCTION_TYPE_WSTARTTS,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_WINDOW_PC_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,
.sprocessFunc = winStartTsFunction,
.finalizeFunc = NULL
},
{
.name = "_wendts",
.type = FUNCTION_TYPE_WENDTS,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_WINDOW_PC_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,
.sprocessFunc = winEndTsFunction,
.finalizeFunc = NULL
},
{
.name = "_wduration",
.type = FUNCTION_TYPE_WDURATION,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_WINDOW_PC_FUNC,
.translateFunc = translateWduration,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,
.sprocessFunc = winDurFunction,
.finalizeFunc = NULL
},
{
.name = "to_json",
.type = FUNCTION_TYPE_TO_JSON,
.classification = FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateToJson,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = toJsonFunction,
.finalizeFunc = NULL
}
};
const int32_t funcMgtBuiltinsNum = (sizeof(funcMgtBuiltins) / sizeof(SBuiltinFuncDefinition));

File diff suppressed because it is too large Load Diff

View File

@ -22,6 +22,7 @@
#include "scalar.h"
#include "tglobal.h"
#include "ttime.h"
#include "systable.h"
#define generateDealNodeErrMsg(pCxt, code, ...) \
(pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, code, ##__VA_ARGS__) ? DEAL_RES_ERROR : DEAL_RES_ERROR)
@ -780,17 +781,17 @@ static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRea
SArray* vgroupList = NULL;
if ('\0' != pRealTable->qualDbName[0]) {
// todo release after mnode can be processed
// if (0 != strcmp(pRealTable->qualDbName, TSDB_INFORMATION_SCHEMA_DB)) {
code = getDBVgInfo(pCxt, pRealTable->qualDbName, &vgroupList);
// }
if (0 != strcmp(pRealTable->qualDbName, TSDB_INFORMATION_SCHEMA_DB)) {
code = getDBVgInfo(pCxt, pRealTable->qualDbName, &vgroupList);
}
} else {
code = getDBVgInfoImpl(pCxt, pName, &vgroupList);
}
// todo release after mnode can be processed
// if (TSDB_CODE_SUCCESS == code) {
// code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &vgroupList);
// }
if (TSDB_CODE_SUCCESS == code) {
code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &vgroupList);
}
if (TSDB_CODE_SUCCESS == code) {
code = toVgroupsInfo(vgroupList, &pRealTable->pVgroupList);

View File

@ -18,6 +18,7 @@
#include "catalog.h"
#include "functionMgt.h"
#include "tglobal.h"
#include "systable.h"
typedef struct SSlotIdInfo {
int16_t slotId;

View File

@ -17,6 +17,7 @@
#include "queryInt.h"
#include "query.h"
#include "trpc.h"
#include "systable.h"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wformat-truncation"

View File

@ -34,6 +34,7 @@ void qwBuildFetchRsp(void *msg, SOutputData *input, int32_t len, bool qComplete)
rsp->compressed = input->compressed;
rsp->compLen = htonl(len);
rsp->numOfRows = htonl(input->numOfRows);
rsp->numOfCols = htonl(input->numOfCols);
}
void qwFreeFetchRsp(void *msg) {

View File

@ -15,7 +15,15 @@ typedef int16_t (*_len_fn)(char *, int32_t);
/** Math functions **/
static double tlog(double v, double base) {
return log(v) / log(base);
double a = log(v);
double b = log(base);
if (isnan(a) || isinf(a)) {
return a;
} else if (isnan(b) || isinf(b)) {
return b;
} else {
return a / b;
}
}
int32_t absFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
@ -160,22 +168,64 @@ static int32_t doScalarFunctionUnique2(SScalarParam *pInput, int32_t inputNum, S
}
double *out = (double *)pOutputData->pData;
double result;
for (int32_t i = 0; i < pInput->numOfRows; ++i) {
if (colDataIsNull_s(pInputData[0], i) ||
colDataIsNull_s(pInputData[1], 0)) {
colDataAppendNULL(pOutputData, i);
continue;
int32_t numOfRows = TMAX(pInput[0].numOfRows, pInput[1].numOfRows);
if (pInput[0].numOfRows == pInput[1].numOfRows) {
for (int32_t i = 0; i < numOfRows; ++i) {
if (colDataIsNull_s(pInputData[0], i) ||
colDataIsNull_s(pInputData[1], i)) {
colDataAppendNULL(pOutputData, i);
continue;
}
result = valFn(getValueFn[0](pInputData[0]->pData, i), getValueFn[1](pInputData[1]->pData, i));
if (isinf(result) || isnan(result)) {
colDataAppendNULL(pOutputData, i);
} else {
out[i] = result;
}
}
double result = valFn(getValueFn[0](pInputData[0]->pData, i), getValueFn[1](pInputData[1]->pData, 0));
if (isinf(result) || isnan(result)) {
colDataAppendNULL(pOutputData, i);
} else if (pInput[0].numOfRows == 1) { //left operand is constant
if (colDataIsNull_s(pInputData[0], 0)) {
colDataAppendNNULL(pOutputData, 0, pInput[1].numOfRows);
} else {
out[i] = result;
for (int32_t i = 0; i < numOfRows; ++i) {
if (colDataIsNull_s(pInputData[1], i)) {
colDataAppendNULL(pOutputData, i);
continue;
}
result = valFn(getValueFn[0](pInputData[0]->pData, 0), getValueFn[1](pInputData[1]->pData, i));
if (isinf(result) || isnan(result)) {
colDataAppendNULL(pOutputData, i);
continue;
}
out[i] = result;
}
}
} else if (pInput[1].numOfRows == 1) {
if (colDataIsNull_s(pInputData[1], 0)) {
colDataAppendNNULL(pOutputData, 0, pInput[0].numOfRows);
} else {
for (int32_t i = 0; i < numOfRows; ++i) {
if (colDataIsNull_s(pInputData[0], i)) {
colDataAppendNULL(pOutputData, i);
continue;
}
result = valFn(getValueFn[0](pInputData[0]->pData, i), getValueFn[1](pInputData[1]->pData, 0));
if (isinf(result) || isnan(result)) {
colDataAppendNULL(pOutputData, i);
continue;
}
out[i] = result;
}
}
}
pOutput->numOfRows = pInput->numOfRows;
pOutput->numOfRows = numOfRows;
return TSDB_CODE_SUCCESS;
}

View File

@ -158,8 +158,6 @@ int32_t streamExecTask(SStreamTask* pTask, SMsgCb* pMsgCb, const void* input, in
//
} else if (pTask->sinkType == TASK_SINK__FETCH) {
//
} else if (pTask->sinkType == TASK_SINK__SHOW) {
blockDebugShowData(pRes);
} else {
ASSERT(pTask->sinkType == TASK_SINK__NONE);
}
@ -280,8 +278,6 @@ int32_t tEncodeSStreamTask(SCoder* pEncoder, const SStreamTask* pTask) {
if (tEncodeI64(pEncoder, pTask->smaSink.smaId) < 0) return -1;
} else if (pTask->sinkType == TASK_SINK__FETCH) {
if (tEncodeI8(pEncoder, pTask->fetchSink.reserved) < 0) return -1;
} else if (pTask->sinkType == TASK_SINK__SHOW) {
if (tEncodeI8(pEncoder, pTask->showSink.reserved) < 0) return -1;
} else {
ASSERT(pTask->sinkType == TASK_SINK__NONE);
}
@ -326,8 +322,6 @@ int32_t tDecodeSStreamTask(SCoder* pDecoder, SStreamTask* pTask) {
if (tDecodeI64(pDecoder, &pTask->smaSink.smaId) < 0) return -1;
} else if (pTask->sinkType == TASK_SINK__FETCH) {
if (tDecodeI8(pDecoder, &pTask->fetchSink.reserved) < 0) return -1;
} else if (pTask->sinkType == TASK_SINK__SHOW) {
if (tDecodeI8(pDecoder, &pTask->showSink.reserved) < 0) return -1;
} else {
ASSERT(pTask->sinkType == TASK_SINK__NONE);
}

View File

@ -54,15 +54,28 @@ static int64_t adjustInterval(int64_t interval, int32_t precision) {
if (precision != TSDB_TIME_PRECISION_MILLI) {
val = convertTimePrecision(interval, precision, TSDB_TIME_PRECISION_MILLI);
}
if (val < MIN_INTERVAL) {
val = MIN_INTERVAL;
} else if (val > MAX_INTERVAL) {
if (val <= 0 || val > MAX_INTERVAL) {
val = MAX_INTERVAL;
} else if (val < MIN_INTERVAL) {
val = MIN_INTERVAL;
}
if (precision != TSDB_TIME_PRECISION_MILLI) {
val = convertTimePrecision(val, TSDB_TIME_PRECISION_MILLI, precision);
}
val = convertTimePrecision(val, TSDB_TIME_PRECISION_MILLI, precision);
return val;
}
static int64_t adjustWatermark(int64_t interval, int32_t watermark) {
if (watermark <= 0 || watermark > MAX_NUM_SCALABLE_BF * interval) {
watermark = MAX_NUM_SCALABLE_BF * interval;
} else if (watermark < MIN_NUM_SCALABLE_BF * interval) {
watermark = MIN_NUM_SCALABLE_BF * interval;
}
return watermark;
}
SUpdateInfo *updateInfoInitP(SInterval* pInterval, int64_t watermark) {
return updateInfoInit(pInterval->interval, pInterval->precision, watermark);
}
@ -76,14 +89,9 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma
pInfo->pTsSBFs = NULL;
pInfo->minTS = -1;
pInfo->interval = adjustInterval(interval, precision);
pInfo->watermark = watermark;
pInfo->watermark = adjustWatermark(pInfo->interval, watermark);
uint64_t bfSize = (uint64_t)(watermark / pInfo->interval);
if (bfSize < MIN_NUM_SCALABLE_BF) {
bfSize = MIN_NUM_SCALABLE_BF;
} else if (bfSize > MAX_NUM_SCALABLE_BF) {
bfSize = MAX_NUM_SCALABLE_BF;
}
uint64_t bfSize = (uint64_t)(pInfo->watermark / pInfo->interval);
pInfo->pTsSBFs = taosArrayInit(bfSize, sizeof(SScalableBf));
if (pInfo->pTsSBFs == NULL) {

View File

@ -90,11 +90,21 @@ TEST(TD_STREAM_UPDATE_TEST, update) {
}
}
SUpdateInfo *pSU4 = updateInfoInit(-1, TSDB_TIME_PRECISION_MILLI, -1);
GTEST_ASSERT_EQ(pSU4->watermark, 120 * pSU4->interval);
GTEST_ASSERT_EQ(pSU4->interval, MILLISECOND_PER_MINUTE);
SUpdateInfo *pSU5 = updateInfoInit(0, TSDB_TIME_PRECISION_MILLI, 0);
GTEST_ASSERT_EQ(pSU5->watermark, 120 * pSU4->interval);
GTEST_ASSERT_EQ(pSU5->interval, MILLISECOND_PER_MINUTE);
updateInfoDestroy(pSU);
updateInfoDestroy(pSU1);
updateInfoDestroy(pSU2);
updateInfoDestroy(pSU3);
updateInfoDestroy(pSU4);
updateInfoDestroy(pSU5);
}
int main(int argc, char* argv[]) {

View File

@ -810,16 +810,17 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
SWorkThrdObj* thrd = (SWorkThrdObj*)taosMemoryCalloc(1, sizeof(SWorkThrdObj));
thrd->quit = false;
srv->pThreadObj[i] = thrd;
thrd->pTransInst = shandle;
srv->pipe[i] = (uv_pipe_t*)taosMemoryCalloc(2, sizeof(uv_pipe_t));
int fds[2];
if (uv_socketpair(AF_UNIX, SOCK_STREAM, fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE) != 0) {
uv_os_sock_t fds[2];
if (uv_socketpair(SOCK_STREAM, 0, fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE) != 0) {
goto End;
}
uv_pipe_init(srv->loop, &(srv->pipe[i][0]), 1);
uv_pipe_open(&(srv->pipe[i][0]), fds[1]); // init write
thrd->pTransInst = shandle;
thrd->fd = fds[0];
thrd->pipe = &(srv->pipe[i][1]); // init read

View File

@ -40,11 +40,11 @@ int wordexp(char *words, wordexp_t *pwordexp, int flags) {
memset(pwordexp->wordPos, 0, 1025);
if (_fullpath(pwordexp->wordPos, words, 1024) == NULL) {
pwordexp->we_wordv[0] = words;
printf("failed to parse relative path:%s to abs path", words);
printf("failed to parse relative path:%s to abs path\n", words);
return -1;
}
printf("parse relative path:%s to abs path:%s", words, pwordexp->wordPos);
printf("parse relative path:%s to abs path:%s\n", words, pwordexp->wordPos);
return 0;
}

View File

@ -22,20 +22,6 @@
#define W_OK 2
#define R_OK 4
#if defined(_MSDOS)
#define open _open
#endif
#if defined(_WIN32)
extern int openA(const char *, int, ...); /* MsvcLibX ANSI version of open */
extern int openU(const char *, int, ...); /* MsvcLibX UTF-8 version of open */
#if defined(_UTF8_SOURCE) || defined(_BSD_SOURCE) || defined(_GNU_SOURCE)
#define open openU
#else /* _ANSI_SOURCE */
#define open openA
#endif /* defined(_UTF8_SOURCE) */
#endif /* defined(_WIN32) */
#define _SEND_FILE_STEP_ 1000
#else
@ -228,9 +214,6 @@ int32_t taosDevInoFile(const char *path, int64_t *stDev, int64_t *stIno) {
void autoDelFileListAdd(const char *path) { return; }
TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) {
#ifdef WINDOWS
return NULL;
#else
int fd = -1;
FILE *fp = NULL;
if (tdFileOptions & TD_FILE_STREAM) {
@ -263,7 +246,11 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) {
access |= (tdFileOptions & TD_FILE_APPEND) ? O_APPEND : 0;
access |= (tdFileOptions & TD_FILE_TEXT) ? O_TEXT : 0;
access |= (tdFileOptions & TD_FILE_EXCL) ? O_EXCL : 0;
#ifdef WINDOWS
fd = _open(path, access, _S_IREAD|_S_IWRITE);
#else
fd = open(path, access, S_IRWXU | S_IRWXG | S_IRWXO);
#endif
if (fd == -1) {
return NULL;
}
@ -286,7 +273,6 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) {
pFile->fp = fp;
pFile->refId = 0;
return pFile;
#endif
}
int64_t taosCloseFile(TdFilePtr *ppFile) {

View File

@ -866,6 +866,16 @@ void taosSetCoreDump(bool enable) {
SysNameInfo taosGetSysNameInfo() {
#ifdef WINDOWS
SysNameInfo info = {0};
DWORD dwVersion = GetVersion();
tstrncpy(info.sysname, getenv("OS"), sizeof(info.sysname));
tstrncpy(info.nodename, getenv("COMPUTERNAME"), sizeof(info.nodename));
sprintf_s(info.release, sizeof(info.release), "%d", dwVersion & 0x0F);
sprintf_s(info.version, sizeof(info.release), "%d", (dwVersion >> 8) & 0x0F);
tstrncpy(info.machine, getenv("PROCESSOR_ARCHITECTURE"), sizeof(info.machine));
return info;
#elif defined(_TD_DARWIN_64)
SysNameInfo info = {0};

View File

@ -687,13 +687,13 @@ int32_t cfgLoadFromEnvFile(SConfig *pConfig, const char *envFile) {
const char *filepath = ".env";
if (envFile != NULL && strlen(envFile)>0) {
if (!taosCheckExistFile(envFile)) {
uError("fial to load env file: %s", envFile);
uError("failed to load env file: %s", envFile);
return -1;
}
filepath = envFile;
}else {
if (!taosCheckExistFile(filepath)) {
uInfo("fial to load env file: %s", filepath);
uInfo("failed to load env file: %s", filepath);
return 0;
}
}
@ -826,7 +826,7 @@ int32_t cfgLoadFromApollUrl(SConfig *pConfig, const char *url) {
if (strncmp(url, "jsonFile", 8) == 0) {
char *filepath = p;
if (!taosCheckExistFile(filepath)) {
uError("fial to load json file: %s", filepath);
uError("failed to load json file: %s", filepath);
return -1;
}
@ -957,13 +957,13 @@ int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char* apolloUrl
const char *filepath = ".env";
if (envFile != NULL && strlen(envFile)>0) {
if (!taosCheckExistFile(envFile)) {
uError("fial to load env file: %s", envFile);
uError("failed to load env file: %s", envFile);
return -1;
}
filepath = envFile;
}else {
if (!taosCheckExistFile(filepath)) {
uInfo("fial to load env file: %s", filepath);
uInfo("failed to load env file: %s", filepath);
return 0;
}
}

View File

@ -215,6 +215,7 @@ sql select count(c1) from stb1
print ====> sql : select count(c1) from stb1
print ====> rows: $data00
if $data00 != 20 then
print expect 20, actual: $data00
return -1
endi

View File

@ -101,7 +101,7 @@ if $rows != 1 then
endi
#sql select * from information_schema.user_streams
sql select * from information_schema.user_tables
if $rows != 1 then
if $rows != 28 then
return -1
endi
#sql select * from information_schema.user_table_distributed
@ -199,7 +199,7 @@ if $rows != 1 then
endi
#sql select * from information_schema.user_streams
sql select * from information_schema.user_tables
if $rows != 1 then
if $rows != 28 then
return -1
endi
#sql select * from information_schema.user_table_distributed

View File

@ -73,17 +73,19 @@ class TDTestCase:
hostname = socket.gethostname()
serverPort = '7080'
rpcDebugFlagVal = '143'
clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135'}
clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
clientCfgDict["serverPort"] = serverPort
clientCfgDict["firstEp"] = hostname + ':' + serverPort
clientCfgDict["secondEp"] = hostname + ':' + serverPort
clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
clientCfgDict["fqdn"] = hostname
updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':''}
updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
updatecfgDict["clientCfg"] = clientCfgDict
updatecfgDict["serverPort"] = serverPort
updatecfgDict["firstEp"] = hostname + ':' + serverPort
updatecfgDict["secondEp"] = hostname + ':' + serverPort
updatecfgDict["fqdn"] = hostname
print ("===================: ", updatecfgDict)
@ -288,7 +290,7 @@ class TDTestCase:
retCode = taos_command(buildPath, "f", keyDict['f'], 'performance_schema', keyDict['c'], '', '', '')
print("============ ret code: ", retCode)
if retCode != "TAOS_OK":
tdLog.exit("taos -s fail")
tdLog.exit("taos -f fail")
print ("========== check new db ==========")
tdSql.query("show databases")

View File

@ -0,0 +1,282 @@
import taos
import sys
import time
import socket
import pexpect
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''):
if len(key) == 0:
tdLog.exit("taos test key is null!")
taosCmd = buildPath + '/build/bin/taos '
if len(cfgDir) != 0:
taosCmd = taosCmd + '-c ' + cfgDir
taosCmd = taosCmd + ' -' + key
if len(value) != 0:
if key == 'p':
taosCmd = taosCmd + value
else:
taosCmd = taosCmd + ' ' + value
if len(key1) != 0:
taosCmd = taosCmd + ' -' + key1
if key1 == 'p':
taosCmd = taosCmd + value1
else:
if len(value1) != 0:
taosCmd = taosCmd + ' ' + value1
tdLog.info ("taos cmd: %s" % taosCmd)
child = pexpect.spawn(taosCmd, timeout=3)
#output = child.readline()
#print (output.decode())
if len(expectString) != 0:
i = child.expect([expectString, pexpect.TIMEOUT, pexpect.EOF], timeout=6)
else:
i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6)
retResult = child.before.decode()
print("cmd return result:\n%s\n"%retResult)
#print(child.after.decode())
if i == 0:
print ('taos login success! Here can run sql, taos> ')
if len(sqlString) != 0:
child.sendline (sqlString)
w = child.expect(["Query OK", pexpect.TIMEOUT, pexpect.EOF], timeout=1)
retResult = child.before.decode()
if w == 0:
return "TAOS_OK", retResult
else:
return "TAOS_FAIL", retResult
else:
if key == 'A' or key1 == 'A' or key == 'C' or key1 == 'C' or key == 'V' or key1 == 'V':
return "TAOS_OK", retResult
else:
return "TAOS_OK", retResult
else:
if key == 'A' or key1 == 'A' or key == 'C' or key1 == 'C' or key == 'V' or key1 == 'V':
return "TAOS_OK", retResult
else:
return "TAOS_FAIL", retResult
class TDTestCase:
#updatecfgDict = {'clientCfg': {'serverPort': 7080, 'firstEp': 'trd02:7080', 'secondEp':'trd02:7080'},\
# 'serverPort': 7080, 'firstEp': 'trd02:7080'}
hostname = socket.gethostname()
serverPort = '7080'
rpcDebugFlagVal = '143'
clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
clientCfgDict["serverPort"] = serverPort
clientCfgDict["firstEp"] = hostname + ':' + serverPort
clientCfgDict["secondEp"] = hostname + ':' + serverPort
clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
clientCfgDict["fqdn"] = hostname
updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
updatecfgDict["clientCfg"] = clientCfgDict
updatecfgDict["serverPort"] = serverPort
updatecfgDict["firstEp"] = hostname + ':' + serverPort
updatecfgDict["secondEp"] = hostname + ':' + serverPort
clientCfgDict["fqdn"] = hostname
print ("===================: ", updatecfgDict)
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
# time.sleep(2)
tdSql.query("create user testpy pass 'testpy'")
#hostname = socket.gethostname()
#tdLog.info ("hostname: %s" % hostname)
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
cfgPath = buildPath + "/../sim/psim/cfg"
tdLog.info("cfgPath: %s" % cfgPath)
checkNetworkStatus = ['0: unavailable', '1: network ok', '2: service ok', '3: service degraded', '4: exiting']
netrole = ['client', 'server']
keyDict = {'h':'', 'P':'6030', 'p':'testpy', 'u':'testpy', 'a':'', 'A':'', 'c':'', 'C':'', 's':'', 'r':'', 'f':'', \
'k':'', 't':'', 'n':'', 'l':'1024', 'N':'100', 'V':'', 'd':'db', 'w':'30', '-help':'', '-usage':'', '?':''}
keyDict['h'] = self.hostname
keyDict['c'] = cfgPath
keyDict['P'] = self.serverPort
tdLog.printNoPrefix("================================ parameter: -h wiht error value")
#newDbName="dbh"
#sqlString = 'create database ' + newDbName + ';'
keyDict['h'] = 'abc'
retCode, retVal = taos_command(buildPath, "h", keyDict['h'], "taos>", keyDict['c'], '')
if (retCode == "TAOS_FAIL") and ("Unable to establish connection" in retVal):
tdLog.info("taos -h %s test suceess"%keyDict['h'])
else:
tdLog.exit("taos -h %s fail"%keyDict['h'])
keyDict['h'] = '\'abc\''
retCode, retVal = taos_command(buildPath, "h", keyDict['h'], "taos>", keyDict['c'], '')
if (retCode == "TAOS_FAIL") and ("Unable to establish connection" in retVal):
tdLog.info("taos -h %s test suceess"%keyDict['h'])
else:
tdLog.exit("taos -h %s fail"%keyDict['h'])
keyDict['h'] = '3'
retCode, retVal = taos_command(buildPath, "h", keyDict['h'], "taos>", keyDict['c'], '')
if (retCode == "TAOS_FAIL") and ("Unable to establish connection" in retVal):
tdLog.info("taos -h %s test suceess"%keyDict['h'])
else:
tdLog.exit("taos -h %s fail"%keyDict['h'])
keyDict['h'] = '\'3\''
retCode, retVal = taos_command(buildPath, "h", keyDict['h'], "taos>", keyDict['c'], '')
if (retCode == "TAOS_FAIL") and ("Unable to establish connection" in retVal):
tdLog.info("taos -h %s test suceess"%keyDict['h'])
else:
tdLog.exit("taos -h %s fail"%keyDict['h'])
tdLog.printNoPrefix("================================ parameter: -P wiht error value")
#newDbName="dbh"
#sqlString = 'create database ' + newDbName + ';'
keyDict['P'] = 'abc'
retCode, retVal = taos_command(buildPath, "P", keyDict['P'], "taos>", keyDict['c'], '')
if (retCode == "TAOS_FAIL") and ("Invalid port" in retVal):
tdLog.info("taos -P %s test suceess"%keyDict['P'])
else:
tdLog.exit("taos -P %s fail"%keyDict['P'])
keyDict['P'] = '\'abc\''
retCode, retVal = taos_command(buildPath, "P", keyDict['P'], "taos>", keyDict['c'], '')
if (retCode == "TAOS_FAIL") and ("Invalid port" in retVal):
tdLog.info("taos -P %s test suceess"%keyDict['P'])
else:
tdLog.exit("taos -P %s fail"%keyDict['P'])
keyDict['P'] = '3'
retCode, retVal = taos_command(buildPath, "P", keyDict['P'], "taos>", keyDict['c'], '')
if (retCode == "TAOS_FAIL") and ("Unable to establish connection" in retVal):
tdLog.info("taos -P %s test suceess"%keyDict['P'])
else:
tdLog.exit("taos -P %s fail"%keyDict['P'])
keyDict['P'] = '\'3\''
retCode, retVal = taos_command(buildPath, "P", keyDict['P'], "taos>", keyDict['c'], '')
if (retCode == "TAOS_FAIL") and ("Unable to establish connection" in retVal):
tdLog.info("taos -P %s test suceess"%keyDict['P'])
else:
tdLog.exit("taos -P %s fail"%keyDict['P'])
keyDict['P'] = '12ab'
retCode, retVal = taos_command(buildPath, "P", keyDict['P'], "taos>", keyDict['c'], '')
if (retCode == "TAOS_FAIL") and ("Unable to establish connection" in retVal):
tdLog.info("taos -P %s test suceess"%keyDict['P'])
else:
tdLog.exit("taos -P %s fail"%keyDict['P'])
keyDict['P'] = '\'12ab\''
retCode, retVal = taos_command(buildPath, "P", keyDict['P'], "taos>", keyDict['c'], '')
if (retCode == "TAOS_FAIL") and ("Unable to establish connection" in retVal):
tdLog.info("taos -P %s test suceess"%keyDict['P'])
else:
tdLog.exit("taos -P %s fail"%keyDict['P'])
tdLog.printNoPrefix("================================ parameter: -f with error sql ")
pwd=os.getcwd()
newDbName="dbf"
sqlFile = pwd + "/0-others/sql.txt"
sql1 = "echo 'create database " + newDbName + "' > " + sqlFile
sql2 = "echo 'use " + newDbName + "' >> " + sqlFile
sql3 = "echo 'create table ntbf (ts timestamp, c binary(40)) no this item' >> " + sqlFile
sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
sql5 = "echo 'show databases' >> " + sqlFile
os.system(sql1)
os.system(sql2)
os.system(sql3)
os.system(sql4)
os.system(sql5)
keyDict['f'] = pwd + "/0-others/sql.txt"
retCode, retVal = taos_command(buildPath, "f", keyDict['f'], 'performance_schema', keyDict['c'], '', '', '')
#print("============ ret code: ", retCode)
if retCode != "TAOS_OK":
tdLog.exit("taos -f fail")
print ("========== check new db ==========")
tdSql.query("show databases")
for i in range(tdSql.queryRows):
#print ("dbseq: %d, dbname: %s"%(i, tdSql.getData(i, 0)))
if tdSql.getData(i, 0) == newDbName:
break
else:
tdLog.exit("create db fail after taos -f fail")
sqlString = "select * from " + newDbName + ".ntbf"
tdSql.error(sqlString)
shellCmd = "rm -f " + sqlFile
os.system(shellCmd)
keyDict['f'] = pwd + "/0-others/noexistfile.txt"
retCode, retVal = taos_command(buildPath, "f", keyDict['f'], 'failed to open file', keyDict['c'], '', '', '')
#print("============ ret code: ", retCode)
if retCode != "TAOS_OK":
tdLog.exit("taos -f fail")
tdSql.query('drop database %s'%newDbName)
tdLog.printNoPrefix("================================ parameter: -a with error value")
#newDbName="dba"
errorPassword = 'errorPassword'
sqlString = 'create database ' + newDbName + ';'
retCode, retVal = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'a', errorPassword)
if retCode != "TAOS_FAIL":
tdLog.exit("taos -u %s -a %s"%(keyDict['u'], errorPassword))
tdLog.printNoPrefix("================================ parameter: -p with error value")
#newDbName="dba"
keyDict['p'] = 'errorPassword'
retCode, retVal = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'p', keyDict['p'])
if retCode == "TAOS_FAIL" and "Authentication failure" in retVal:
tdLog.info("taos -p %s test suceess"%keyDict['p'])
else:
tdLog.exit("taos -u %s -p %s"%(keyDict['u'], keyDict['p']))
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,202 @@
import taos
import sys
import time
import socket
import pexpect
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''):
if len(key) == 0:
tdLog.exit("taos test key is null!")
taosCmd = buildPath + '/build/bin/taos '
if len(cfgDir) != 0:
taosCmd = taosCmd + '-c ' + cfgDir
taosCmd = taosCmd + ' -' + key
if len(value) != 0:
if key == 'p':
taosCmd = taosCmd + value
else:
taosCmd = taosCmd + ' ' + value
if len(key1) != 0:
taosCmd = taosCmd + ' -' + key1
if key1 == 'p':
taosCmd = taosCmd + value1
else:
if len(value1) != 0:
taosCmd = taosCmd + ' ' + value1
tdLog.info ("taos cmd: %s" % taosCmd)
child = pexpect.spawn(taosCmd, timeout=3)
#output = child.readline()
#print (output.decode())
if len(expectString) != 0:
i = child.expect([expectString, pexpect.TIMEOUT, pexpect.EOF], timeout=6)
else:
i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6)
retResult = child.before.decode()
print("expect() return code: %d, content:\n %s\n"%(i, retResult))
#print(child.after.decode())
if i == 0:
print ('taos login success! Here can run sql, taos> ')
if len(sqlString) != 0:
child.sendline (sqlString)
w = child.expect(["Query OK", pexpect.TIMEOUT, pexpect.EOF], timeout=1)
retResult = child.before.decode()
if w == 0:
return "TAOS_OK", retResult
else:
return "TAOS_FAIL", retResult
else:
if key == 'A' or key1 == 'A' or key == 'C' or key1 == 'C' or key == 'V' or key1 == 'V':
return "TAOS_OK", retResult
else:
return "TAOS_OK", retResult
else:
if key == 'A' or key1 == 'A' or key == 'C' or key1 == 'C' or key == 'V' or key1 == 'V':
return "TAOS_OK", retResult
else:
return "TAOS_FAIL", retResult
class TDTestCase:
#updatecfgDict = {'clientCfg': {'serverPort': 7080, 'firstEp': 'trd02:7080', 'secondEp':'trd02:7080'},\
# 'serverPort': 7080, 'firstEp': 'trd02:7080'}
hostname = socket.gethostname()
serverPort = '7080'
rpcDebugFlagVal = '143'
clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
clientCfgDict["serverPort"] = serverPort
clientCfgDict["firstEp"] = hostname + ':' + serverPort
clientCfgDict["secondEp"] = hostname + ':' + serverPort
clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
clientCfgDict["fqdn"] = hostname
updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
updatecfgDict["clientCfg"] = clientCfgDict
updatecfgDict["serverPort"] = serverPort
updatecfgDict["firstEp"] = hostname + ':' + serverPort
updatecfgDict["secondEp"] = hostname + ':' + serverPort
updatecfgDict["fqdn"] = hostname
print ("===================: ", updatecfgDict)
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdSql.query("create user testpy pass 'testpy'")
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
cfgPath = buildPath + "/../sim/psim/cfg"
tdLog.info("cfgPath: %s" % cfgPath)
checkNetworkStatus = ['0: unavailable', '1: network ok', '2: service ok', '3: service degraded', '4: exiting']
netrole = ['client', 'server']
keyDict = {'h':'', 'P':'6030', 'p':'testpy', 'u':'testpy', 'a':'', 'A':'', 'c':'', 'C':'', 's':'', 'r':'', 'f':'', \
'k':'', 't':'', 'n':'', 'l':'1024', 'N':'100', 'V':'', 'd':'db', 'w':'30', '-help':'', '-usage':'', '?':''}
keyDict['h'] = self.hostname
keyDict['c'] = cfgPath
keyDict['P'] = self.serverPort
tdLog.printNoPrefix("================================ parameter: -k")
sqlString = ''
retCode, retVal = taos_command(buildPath, "k", '', "", keyDict['c'], sqlString)
if "2: service ok" in retVal:
tdLog.info("taos -k success")
else:
tdLog.exit("taos -k fail")
# stop taosd
tdDnodes.stop(1)
#sleep(10)
#tdDnodes.start(1)
#sleep(5)
retCode, retVal = taos_command(buildPath, "k", '', "", keyDict['c'], sqlString)
if "0: unavailable" in retVal:
tdLog.info("taos -k success")
else:
tdLog.exit("taos -k fail")
# restart taosd
tdDnodes.start(1)
#sleep(5)
retCode, retVal = taos_command(buildPath, "k", '', "", keyDict['c'], sqlString)
if "2: service ok" in retVal:
tdLog.info("taos -k success")
else:
tdLog.exit("taos -k fail")
tdLog.printNoPrefix("================================ parameter: -n")
# stop taosd
tdDnodes.stop(1)
role = 'server'
taosCmd = 'nohup ' + buildPath + '/build/bin/taos -c ' + keyDict['c']
taosCmd = taosCmd + ' -n ' + role + ' > /dev/null 2>&1 &'
print (taosCmd)
os.system(taosCmd)
pktLen = '2000'
pktNum = '10'
role = 'client'
taosCmd = buildPath + '/build/bin/taos -c ' + keyDict['c']
taosCmd = taosCmd + ' -n ' + role + ' -l ' + pktLen + ' -N ' + pktNum
print (taosCmd)
child = pexpect.spawn(taosCmd, timeout=3)
i = child.expect([pexpect.TIMEOUT, pexpect.EOF], timeout=6)
retResult = child.before.decode()
print("expect() return code: %d, content:\n %s\n"%(i, retResult))
#print(child.after.decode())
if i == 0:
tdLog.exit('taos -n server fail!')
expectString1 = 'response is received, size:' + pktLen
expectSTring2 = pktNum + '/' + pktNum
if expectString1 in retResult and expectSTring2 in retResult:
tdLog.info("taos -n client success")
else:
tdLog.exit('taos -n client fail!')
os.system('pkill taos')
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -1,5 +1,6 @@
#!/bin/bash
set -e
set -x
python3 ./test.py -f 0-others/taosShell.py

View File

@ -93,6 +93,7 @@ function runSimCaseOneByOnefq {
if [[ $line =~ ^./test.sh* ]] || [[ $line =~ ^run* ]]; then
#case=`echo $line | grep sim$ |awk '{print $NF}'`
case=`echo $line | grep -o ".*\.sim" |awk '{print $NF}'`
echo "$line running ..."
start_time=`date +%s`
date +%F\ %T | tee -a out.log