Merge pull request #10874 from taosdata/3.0_query_integrate

3.0 query integrate
This commit is contained in:
Haojun Liao 2022-03-22 19:18:06 +08:00 committed by GitHub
commit 32c8e1f12c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
102 changed files with 5792 additions and 4266 deletions

1
.gitignore vendored
View File

@ -88,6 +88,7 @@ tests/examples/JDBC/JDBCDemo/.classpath
tests/examples/JDBC/JDBCDemo/.project
tests/examples/JDBC/JDBCDemo/.settings/
source/libs/parser/inc/sql.*
tests/script/tmqResult.txt
# Emacs
# -*- mode: gitignore; -*-

View File

@ -213,7 +213,6 @@ endif(${BUILD_WITH_TRAFT})
# LIBUV
if(${BUILD_WITH_UV})
add_compile_options(-Wno-sign-compare)
if (${TD_WINDOWS})
file(READ "libuv/include/uv.h" CONTENTS)
string(REGEX REPLACE "/([\r]*)\nstruct uv_tcp_s {" "/\\1\ntypedef BOOL (PASCAL *LPFN_CONNECTEX) (SOCKET s, const struct sockaddr* name, int namelen, PVOID lpSendBuffer, DWORD dwSendDataLength,LPDWORD lpdwBytesSent, LPOVERLAPPED lpOverlapped);\\1\nstruct uv_tcp_s {" CONTENTS_NEW "${CONTENTS}")

View File

@ -33,9 +33,10 @@ typedef enum {
TSDB_SUPER_TABLE = 1, // super table
TSDB_CHILD_TABLE = 2, // table created from super table
TSDB_NORMAL_TABLE = 3, // ordinary table
TSDB_STREAM_TABLE = 4, // table created by stream processing
TSDB_STREAM_TABLE = 4, // table created from stream computing
TSDB_TEMP_TABLE = 5, // temp table created by nest query
TSDB_TABLE_MAX = 6
TSDB_SYSTEM_TABLE = 6,
TSDB_TABLE_MAX = 7
} ETableType;
typedef enum {

View File

@ -59,20 +59,10 @@ typedef struct SDataBlockInfo {
int32_t rowSize;
int16_t numOfCols;
int16_t hasVarCol;
union {
int64_t uid;
int64_t blockId;
};
union {int64_t uid; int64_t blockId;};
int64_t groupId; // no need to serialize
} SDataBlockInfo;
// typedef struct SConstantItem {
// SColumnInfo info;
// int32_t startRow; // run-length-encoding to save the space for multiple rows
// int32_t endRow;
// SVariant value;
// } SConstantItem;
// info.numOfCols = taosArrayGetSize(pDataBlock) + taosArrayGetSize(pConstantList);
typedef struct SSDataBlock {
SColumnDataAgg* pBlockAgg;
SArray* pDataBlock; // SArray<SColumnInfoData>
@ -183,10 +173,8 @@ typedef struct SColumn {
int64_t dataBlockId;
};
union {
int16_t colId;
int16_t slotId;
};
char name[TSDB_COL_NAME_LEN];
int8_t flag; // column type: normal column, tag, or user-input column (integer/float/string)

View File

@ -102,8 +102,8 @@ static FORCE_INLINE bool colDataIsNull(const SColumnInfoData* pColumnInfoData, u
: ((p1_)->pData + ((r_) * (p1_)->info.bytes)))
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, const SColumnInfoData* pSource,
uint32_t numOfRow2);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, const SColumnInfoData* pSource, uint32_t numOfRow2);
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows);
int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock);
int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows);
@ -113,13 +113,12 @@ size_t blockDataGetNumOfCols(const SSDataBlock* pBlock);
size_t blockDataGetNumOfRows(const SSDataBlock* pBlock);
int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc);
int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startIndex, int32_t* stopIndex,
int32_t pageSize);
SSDataBlock* blockDataExtractBlock(SSDataBlock* pBlock, int32_t startIndex, int32_t rowCount);
int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startIndex, int32_t* stopIndex, int32_t pageSize);
int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock);
int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf);
SSDataBlock* blockDataExtractBlock(SSDataBlock* pBlock, int32_t startIndex, int32_t rowCount);
size_t blockDataGetSize(const SSDataBlock* pBlock);
size_t blockDataGetRowSize(const SSDataBlock* pBlock);
double blockDataGetSerialRowSize(const SSDataBlock* pBlock);

View File

@ -24,6 +24,7 @@
#include "thash.h"
#include "tlist.h"
#include "trow.h"
#include "tname.h"
#include "tuuid.h"
#ifdef __cplusplus
@ -471,6 +472,11 @@ typedef struct {
int32_t code;
} SQueryTableRsp;
int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp);
int32_t tDeserializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
int32_t numOfVgroups;
@ -863,6 +869,7 @@ void tFreeSShowRsp(SShowRsp* pRsp);
typedef struct {
int32_t type;
char db[TSDB_DB_FNAME_LEN];
char tb[TSDB_TABLE_NAME_LEN];
int64_t showId;
int8_t free;
} SRetrieveTableReq;
@ -880,6 +887,17 @@ typedef struct {
char data[];
} SRetrieveTableRsp;
typedef struct {
int64_t handle;
int64_t useconds;
int8_t completed; // all results are returned to client
int8_t precision;
int8_t compressed;
int32_t compLen;
int32_t numOfRows;
char data[];
} SRetrieveMetaTableRsp;
typedef struct {
char fqdn[TSDB_FQDN_LEN]; // end point, hostname:port
int32_t port;
@ -1347,6 +1365,7 @@ typedef struct {
typedef struct SVCreateTbReq {
int64_t ver; // use a general definition
char* dbFName;
char* name;
uint32_t ttl;
uint32_t keep;
@ -1371,7 +1390,7 @@ typedef struct SVCreateTbReq {
} SVCreateTbReq, SVUpdateTbReq;
typedef struct {
int tmp; // TODO: to avoid compile error
int32_t code;
} SVCreateTbRsp, SVUpdateTbRsp;
int32_t tSerializeSVCreateTbReq(void** buf, SVCreateTbReq* pReq);
@ -1382,13 +1401,17 @@ typedef struct {
SArray* pArray;
} SVCreateTbBatchReq;
typedef struct {
int tmp; // TODO: to avoid compile error
} SVCreateTbBatchRsp;
int32_t tSerializeSVCreateTbBatchReq(void** buf, SVCreateTbBatchReq* pReq);
void* tDeserializeSVCreateTbBatchReq(void* buf, SVCreateTbBatchReq* pReq);
typedef struct {
SArray* rspList; // SArray<SVCreateTbRsp>
} SVCreateTbBatchRsp;
int32_t tSerializeSVCreateTbBatchRsp(void *buf, int32_t bufLen, SVCreateTbBatchRsp *pRsp);
int32_t tDeserializeSVCreateTbBatchRsp(void *buf, int32_t bufLen, SVCreateTbBatchRsp *pRsp);
typedef struct {
int64_t ver;
char* name;

View File

@ -17,7 +17,6 @@
#define _TD_COMMON_NAME_H_
#include "tdef.h"
#include "tmsg.h"
#ifdef __cplusplus
extern "C" {
@ -61,7 +60,8 @@ int32_t tNameFromString(SName* dst, const char* str, uint32_t type);
int32_t tNameSetAcctId(SName* dst, int32_t acctId);
SSchema createSchema(uint8_t type, int32_t bytes, int32_t colId, const char* name);
bool tNameDBNameEqual(SName* left, SName* right);
#ifdef __cplusplus
}

View File

@ -51,130 +51,133 @@
#define TK_USER 33
#define TK_PRIVILEGE 34
#define TK_DROP 35
#define TK_SHOW 36
#define TK_DNODE 37
#define TK_PORT 38
#define TK_NK_INTEGER 39
#define TK_DNODES 40
#define TK_NK_IPTOKEN 41
#define TK_LOCAL 42
#define TK_QNODE 43
#define TK_ON 44
#define TK_QNODES 45
#define TK_DATABASE 46
#define TK_DATABASES 47
#define TK_USE 48
#define TK_IF 49
#define TK_NOT 50
#define TK_EXISTS 51
#define TK_BLOCKS 52
#define TK_CACHE 53
#define TK_CACHELAST 54
#define TK_COMP 55
#define TK_DAYS 56
#define TK_FSYNC 57
#define TK_MAXROWS 58
#define TK_MINROWS 59
#define TK_KEEP 60
#define TK_PRECISION 61
#define TK_QUORUM 62
#define TK_REPLICA 63
#define TK_TTL 64
#define TK_WAL 65
#define TK_VGROUPS 66
#define TK_SINGLE_STABLE 67
#define TK_STREAM_MODE 68
#define TK_RETENTIONS 69
#define TK_FILE_FACTOR 70
#define TK_NK_FLOAT 71
#define TK_TABLE 72
#define TK_NK_LP 73
#define TK_NK_RP 74
#define TK_STABLE 75
#define TK_TABLES 76
#define TK_STABLES 77
#define TK_ADD 78
#define TK_COLUMN 79
#define TK_MODIFY 80
#define TK_RENAME 81
#define TK_TAG 82
#define TK_SET 83
#define TK_NK_EQ 84
#define TK_USING 85
#define TK_TAGS 86
#define TK_NK_DOT 87
#define TK_NK_COMMA 88
#define TK_COMMENT 89
#define TK_BOOL 90
#define TK_TINYINT 91
#define TK_SMALLINT 92
#define TK_INT 93
#define TK_INTEGER 94
#define TK_BIGINT 95
#define TK_FLOAT 96
#define TK_DOUBLE 97
#define TK_BINARY 98
#define TK_TIMESTAMP 99
#define TK_NCHAR 100
#define TK_UNSIGNED 101
#define TK_JSON 102
#define TK_VARCHAR 103
#define TK_MEDIUMBLOB 104
#define TK_BLOB 105
#define TK_VARBINARY 106
#define TK_DECIMAL 107
#define TK_SMA 108
#define TK_ROLLUP 109
#define TK_INDEX 110
#define TK_FULLTEXT 111
#define TK_FUNCTION 112
#define TK_INTERVAL 113
#define TK_TOPIC 114
#define TK_AS 115
#define TK_MNODES 116
#define TK_NK_BOOL 117
#define TK_NK_VARIABLE 118
#define TK_BETWEEN 119
#define TK_IS 120
#define TK_NULL 121
#define TK_NK_LT 122
#define TK_NK_GT 123
#define TK_NK_LE 124
#define TK_NK_GE 125
#define TK_NK_NE 126
#define TK_LIKE 127
#define TK_MATCH 128
#define TK_NMATCH 129
#define TK_IN 130
#define TK_FROM 131
#define TK_JOIN 132
#define TK_INNER 133
#define TK_SELECT 134
#define TK_DISTINCT 135
#define TK_WHERE 136
#define TK_PARTITION 137
#define TK_BY 138
#define TK_SESSION 139
#define TK_STATE_WINDOW 140
#define TK_SLIDING 141
#define TK_FILL 142
#define TK_VALUE 143
#define TK_NONE 144
#define TK_PREV 145
#define TK_LINEAR 146
#define TK_NEXT 147
#define TK_GROUP 148
#define TK_HAVING 149
#define TK_ORDER 150
#define TK_SLIMIT 151
#define TK_SOFFSET 152
#define TK_LIMIT 153
#define TK_OFFSET 154
#define TK_ASC 155
#define TK_DESC 156
#define TK_NULLS 157
#define TK_FIRST 158
#define TK_LAST 159
#define TK_DNODE 36
#define TK_PORT 37
#define TK_NK_INTEGER 38
#define TK_DNODES 39
#define TK_NK_IPTOKEN 40
#define TK_LOCAL 41
#define TK_QNODE 42
#define TK_ON 43
#define TK_DATABASE 44
#define TK_USE 45
#define TK_IF 46
#define TK_NOT 47
#define TK_EXISTS 48
#define TK_BLOCKS 49
#define TK_CACHE 50
#define TK_CACHELAST 51
#define TK_COMP 52
#define TK_DAYS 53
#define TK_FSYNC 54
#define TK_MAXROWS 55
#define TK_MINROWS 56
#define TK_KEEP 57
#define TK_PRECISION 58
#define TK_QUORUM 59
#define TK_REPLICA 60
#define TK_TTL 61
#define TK_WAL 62
#define TK_VGROUPS 63
#define TK_SINGLE_STABLE 64
#define TK_STREAM_MODE 65
#define TK_RETENTIONS 66
#define TK_FILE_FACTOR 67
#define TK_NK_FLOAT 68
#define TK_TABLE 69
#define TK_NK_LP 70
#define TK_NK_RP 71
#define TK_STABLE 72
#define TK_ADD 73
#define TK_COLUMN 74
#define TK_MODIFY 75
#define TK_RENAME 76
#define TK_TAG 77
#define TK_SET 78
#define TK_NK_EQ 79
#define TK_USING 80
#define TK_TAGS 81
#define TK_NK_DOT 82
#define TK_NK_COMMA 83
#define TK_COMMENT 84
#define TK_BOOL 85
#define TK_TINYINT 86
#define TK_SMALLINT 87
#define TK_INT 88
#define TK_INTEGER 89
#define TK_BIGINT 90
#define TK_FLOAT 91
#define TK_DOUBLE 92
#define TK_BINARY 93
#define TK_TIMESTAMP 94
#define TK_NCHAR 95
#define TK_UNSIGNED 96
#define TK_JSON 97
#define TK_VARCHAR 98
#define TK_MEDIUMBLOB 99
#define TK_BLOB 100
#define TK_VARBINARY 101
#define TK_DECIMAL 102
#define TK_SMA 103
#define TK_ROLLUP 104
#define TK_SHOW 105
#define TK_DATABASES 106
#define TK_TABLES 107
#define TK_STABLES 108
#define TK_MNODES 109
#define TK_MODULES 110
#define TK_QNODES 111
#define TK_FUNCTIONS 112
#define TK_INDEXES 113
#define TK_FROM 114
#define TK_LIKE 115
#define TK_INDEX 116
#define TK_FULLTEXT 117
#define TK_FUNCTION 118
#define TK_INTERVAL 119
#define TK_TOPIC 120
#define TK_AS 121
#define TK_NK_BOOL 122
#define TK_NK_VARIABLE 123
#define TK_BETWEEN 124
#define TK_IS 125
#define TK_NULL 126
#define TK_NK_LT 127
#define TK_NK_GT 128
#define TK_NK_LE 129
#define TK_NK_GE 130
#define TK_NK_NE 131
#define TK_MATCH 132
#define TK_NMATCH 133
#define TK_IN 134
#define TK_JOIN 135
#define TK_INNER 136
#define TK_SELECT 137
#define TK_DISTINCT 138
#define TK_WHERE 139
#define TK_PARTITION 140
#define TK_BY 141
#define TK_SESSION 142
#define TK_STATE_WINDOW 143
#define TK_SLIDING 144
#define TK_FILL 145
#define TK_VALUE 146
#define TK_NONE 147
#define TK_PREV 148
#define TK_LINEAR 149
#define TK_NEXT 150
#define TK_GROUP 151
#define TK_HAVING 152
#define TK_ORDER 153
#define TK_SLIMIT 154
#define TK_SOFFSET 155
#define TK_LIMIT 156
#define TK_OFFSET 157
#define TK_ASC 158
#define TK_DESC 159
#define TK_NULLS 160
#define TK_FIRST 161
#define TK_LAST 162
#define TK_NK_SPACE 300
#define TK_NK_COMMENT 301

View File

@ -103,16 +103,17 @@ int32_t catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* vers
* @param pTransporter (input, rpc object)
* @param pMgmtEps (input, mnode EPs)
* @param pDBName (input, full db name)
* @param forceUpdate (input, force update db vgroup info from mnode)
* @param pVgroupList (output, vgroup info list, element is SVgroupInfo, NEED to simply free the array by caller)
* @return error code
*/
int32_t catalogGetDBVgInfo(SCatalog* pCatalog, void *pTransporter, const SEpSet* pMgmtEps, const char* pDBName, bool forceUpdate, SArray** pVgroupList);
int32_t catalogGetDBVgInfo(SCatalog* pCatalog, void *pTransporter, const SEpSet* pMgmtEps, const char* pDBName, SArray** pVgroupList);
int32_t catalogUpdateDBVgInfo(SCatalog* pCatalog, const char* dbName, uint64_t dbId, SDBVgInfo* dbInfo);
int32_t catalogRemoveDB(SCatalog* pCatalog, const char* dbName, uint64_t dbId);
int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName);
int32_t catalogRemoveStbMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId, const char* stbName, uint64_t suid);
/**
@ -120,7 +121,7 @@ int32_t catalogRemoveStbMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId,
* @param pCatalog (input, got with catalogGetHandle)
* @param pTransporter (input, rpc object)
* @param pMgmtEps (input, mnode EPs)
* @param pTableName (input, table name, NOT including db name)
* @param pTableName (input, table name)
* @param pTableMeta(output, table meta data, NEED to free it by calller)
* @return error code
*/
@ -131,7 +132,7 @@ int32_t catalogGetTableMeta(SCatalog* pCatalog, void * pTransporter, const SEpSe
* @param pCatalog (input, got with catalogGetHandle)
* @param pTransporter (input, rpc object)
* @param pMgmtEps (input, mnode EPs)
* @param pTableName (input, table name, NOT including db name)
* @param pTableName (input, table name)
* @param pTableMeta(output, table meta data, NEED to free it by calller)
* @return error code
*/
@ -140,12 +141,22 @@ int32_t catalogGetSTableMeta(SCatalog* pCatalog, void * pTransporter, const SEpS
int32_t catalogUpdateSTableMeta(SCatalog* pCatalog, STableMetaRsp *rspMsg);
/**
* Force refresh DB's local cached vgroup info.
* @param pCtg (input, got with catalogGetHandle)
* @param pTrans (input, rpc object)
* @param pMgmtEps (input, mnode EPs)
* @param dbFName (input, db full name)
* @return error code
*/
int32_t catalogRefreshDBVgInfo(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, const char* dbFName);
/**
* Force refresh a table's local cached meta data.
* @param pCatalog (input, got with catalogGetHandle)
* @param pTransporter (input, rpc object)
* @param pMgmtEps (input, mnode EPs)
* @param pTableName (input, table name, NOT including db name)
* @param pTableName (input, table name)
* @param isSTable (input, is super table or not, 1:supposed to be stable, 0: supposed not to be stable, -1:not sure)
* @return error code
*/
@ -156,7 +167,7 @@ int32_t catalogUpdateSTableMeta(SCatalog* pCatalog, STableMetaRsp *rspMsg);
* @param pCatalog (input, got with catalogGetHandle)
* @param pTransporter (input, rpc object)
* @param pMgmtEps (input, mnode EPs)
* @param pTableName (input, table name, NOT including db name)
* @param pTableName (input, table name)
* @param pTableMeta(output, table meta data, NEED to free it by calller)
* @param isSTable (input, is super table or not, 1:supposed to be stable, 0: supposed not to be stable, -1:not sure)
* @return error code
@ -170,7 +181,7 @@ int32_t catalogUpdateSTableMeta(SCatalog* pCatalog, STableMetaRsp *rspMsg);
* @param pCatalog (input, got with catalogGetHandle)
* @param pTransporter (input, rpc object)
* @param pMgmtEps (input, mnode EPs)
* @param pTableName (input, table name, NOT including db name)
* @param pTableName (input, table name)
* @param pVgroupList (output, vgroup info list, element is SVgroupInfo, NEED to simply free the array by caller)
* @return error code
*/
@ -181,7 +192,7 @@ int32_t catalogGetTableDistVgInfo(SCatalog* pCatalog, void *pTransporter, const
* @param pCatalog (input, got with catalogGetHandle)
* @param pTransporter (input, rpc object)
* @param pMgmtEps (input, mnode EPs)
* @param pTableName (input, table name, NOT including db name)
* @param pTableName (input, table name)
* @param vgInfo (output, vgroup info)
* @return error code
*/

View File

@ -21,6 +21,7 @@ extern "C" {
#endif
#include "tcommon.h"
#include "query.h"
typedef void* qTaskInfo_t;
typedef void* DataSinkHandle;
@ -30,6 +31,7 @@ struct SSubplan;
typedef struct SReadHandle {
void* reader;
void* meta;
void* config;
} SReadHandle;
#define STREAM_DATA_TYPE_SUBMIT_BLOCK 0x1

View File

@ -163,7 +163,7 @@ typedef struct SInputColumnInfoData {
typedef struct SqlFunctionCtx {
SInputColumnInfoData input;
SResultDataInfo resDataInfo;
uint32_t order; // asc|desc
uint32_t order; // data block scanner order: asc|desc
////////////////////////////////////////////////////////////////
int32_t startRow; // start row index
int32_t size; // handled processed row number

View File

@ -179,7 +179,8 @@ typedef struct SAlterDnodeStmt {
typedef struct SShowStmt {
ENodeType type;
char dbName[TSDB_DB_NAME_LEN];
SNode* pDbName; // SValueNode
SNode* pTbNamePattern; // SValueNode
} SShowStmt;
typedef enum EIndexType {

View File

@ -78,7 +78,6 @@ typedef enum ENodeType {
QUERY_NODE_CREATE_DATABASE_STMT,
QUERY_NODE_DROP_DATABASE_STMT,
QUERY_NODE_ALTER_DATABASE_STMT,
QUERY_NODE_SHOW_DATABASES_STMT, // temp
QUERY_NODE_CREATE_TABLE_STMT,
QUERY_NODE_CREATE_SUBTABLE_CLAUSE,
QUERY_NODE_CREATE_MULTI_TABLE_STMT,
@ -86,20 +85,13 @@ typedef enum ENodeType {
QUERY_NODE_DROP_TABLE_STMT,
QUERY_NODE_DROP_SUPER_TABLE_STMT,
QUERY_NODE_ALTER_TABLE_STMT,
QUERY_NODE_SHOW_TABLES_STMT, // temp
QUERY_NODE_SHOW_STABLES_STMT,
QUERY_NODE_CREATE_USER_STMT,
QUERY_NODE_ALTER_USER_STMT,
QUERY_NODE_DROP_USER_STMT,
QUERY_NODE_SHOW_USERS_STMT,
QUERY_NODE_USE_DATABASE_STMT,
QUERY_NODE_CREATE_DNODE_STMT,
QUERY_NODE_DROP_DNODE_STMT,
QUERY_NODE_ALTER_DNODE_STMT,
QUERY_NODE_SHOW_DNODES_STMT,
QUERY_NODE_SHOW_VGROUPS_STMT,
QUERY_NODE_SHOW_MNODES_STMT,
QUERY_NODE_SHOW_QNODES_STMT,
QUERY_NODE_CREATE_INDEX_STMT,
QUERY_NODE_DROP_INDEX_STMT,
QUERY_NODE_CREATE_QNODE_STMT,
@ -107,6 +99,18 @@ typedef enum ENodeType {
QUERY_NODE_CREATE_TOPIC_STMT,
QUERY_NODE_DROP_TOPIC_STMT,
QUERY_NODE_ALTER_LOCAL_STMT,
QUERY_NODE_SHOW_DATABASES_STMT,
QUERY_NODE_SHOW_TABLES_STMT,
QUERY_NODE_SHOW_STABLES_STMT,
QUERY_NODE_SHOW_USERS_STMT,
QUERY_NODE_SHOW_DNODES_STMT,
QUERY_NODE_SHOW_VGROUPS_STMT,
QUERY_NODE_SHOW_MNODES_STMT,
QUERY_NODE_SHOW_MODULES_STMT,
QUERY_NODE_SHOW_QNODES_STMT,
QUERY_NODE_SHOW_FUNCTIONS_STMT,
QUERY_NODE_SHOW_INDEXES_STMT,
QUERY_NODE_SHOW_STREAMS_STMT,
// logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN,

View File

@ -35,8 +35,7 @@ typedef struct SLogicNode {
typedef enum EScanType {
SCAN_TYPE_TAG,
SCAN_TYPE_TABLE,
SCAN_TYPE_STABLE,
SCAN_TYPE_TOPIC,
SCAN_TYPE_SYSTEM_TABLE,
SCAN_TYPE_STREAM
} EScanType;
@ -165,10 +164,14 @@ typedef struct SScanPhysiNode {
SName tableName;
} SScanPhysiNode;
typedef SScanPhysiNode SSystemTableScanPhysiNode;
typedef SScanPhysiNode STagScanPhysiNode;
typedef SScanPhysiNode SStreamScanPhysiNode;
typedef struct SSystemTableScanPhysiNode {
SScanPhysiNode scan;
SEpSet mgmtEpSet;
} SSystemTableScanPhysiNode;
typedef struct STableScanPhysiNode {
SScanPhysiNode scan;
uint8_t scanFlag; // denotes reversed scan of data or not
@ -244,6 +247,7 @@ typedef struct SSubplan {
ESubplanType subplanType;
int32_t msgType; // message type for subplan, used to denote the send message type to vnode.
int32_t level; // the execution level of current subplan, starting from 0 in a top-down manner.
char dbFName[TSDB_DB_FNAME_LEN];
SQueryNodeAddr execNode; // for the scan/modify subplan, the optional execution node
SQueryNodeStat execNodeStat; // only for scan subplan
SNodeList* pChildren; // the datasource subplan,from which to fetch the result

View File

@ -130,6 +130,7 @@ typedef struct SRealTableNode {
STableNode table; // QUERY_NODE_REAL_TABLE
struct STableMeta* pMeta;
SVgroupsInfo* pVgroupList;
char useDbName[TSDB_DB_NAME_LEN];
} SRealTableNode;
typedef struct STempTableNode {

View File

@ -52,7 +52,8 @@ typedef struct SQuery {
SSchema* pResSchema;
SCmdMsgInfo* pCmdMsg;
int32_t msgType;
bool streamQuery;
SArray* pDbList;
SArray* pTableList;
} SQuery;
int32_t qParseQuerySql(SParseContext* pCxt, SQuery** pQuery);

View File

@ -25,6 +25,7 @@ extern "C" {
typedef struct SPlanContext {
uint64_t queryId;
int32_t acctId;
SEpSet mgmtEpSet;
SNode* pAstRoot;
bool topicQuery;
bool streamQuery;

View File

@ -169,6 +169,9 @@ const SSchema* tGetTbnameColumnSchema();
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
int32_t queryCreateTableMetaFromMsg(STableMetaRsp* msg, bool isSuperTable, STableMeta** pMeta);
char *jobTaskStatusStr(int32_t status);
SSchema createSchema(uint8_t type, int32_t bytes, int32_t colId, const char* name);
extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen);
extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t msgSize);
@ -178,6 +181,15 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define SET_META_TYPE_TABLE(t) (t) = META_TYPE_TABLE
#define SET_META_TYPE_BOTH_TABLE(t) (t) = META_TYPE_BOTH_TABLE
#define NEED_CLIENT_RM_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_INVALID_TABLE_ID || (_code) == TSDB_CODE_VND_TB_NOT_EXIST)
#define NEED_CLIENT_REFRESH_VG_ERROR(_code) ((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_TABLE_RECREATED)
#define NEED_CLIENT_HANDLE_ERROR(_code) (NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code))
#define NEED_SCHEDULER_RETRY_ERROR(_code) ((_code) == TSDB_CODE_RPC_REDIRECT || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
#define REQUEST_MAX_TRY_TIMES 5
#define qFatal(...) \
do { \
if (qDebugFlag & DEBUG_FATAL) { \

View File

@ -309,6 +309,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513)
#define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514)
#define TSDB_CODE_VND_TB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0515)
#define TSDB_CODE_VND_HASH_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x0516)
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
@ -334,8 +335,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TDB_MESSED_MSG TAOS_DEF_ERROR_CODE(0, 0x0614)
#define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615)
#define TSDB_CODE_TDB_NO_CACHE_LAST_ROW TAOS_DEF_ERROR_CODE(0, 0x0616)
#define TSDB_CODE_TDB_NO_SMA_INDEX_IN_META TAOS_DEF_ERROR_CODE(0, 0x0617)
#define TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0618)
#define TSDB_CODE_TDB_TABLE_RECREATED TAOS_DEF_ERROR_CODE(0, 0x0617)
#define TSDB_CODE_TDB_NO_SMA_INDEX_IN_META TAOS_DEF_ERROR_CODE(0, 0x0618)
#define TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0619)
// query
#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700)
@ -437,10 +439,12 @@ int32_t* taosGetErrno();
#define TSDB_CODE_CTG_SYS_ERROR TAOS_DEF_ERROR_CODE(0, 0x2404)
#define TSDB_CODE_CTG_DB_DROPPED TAOS_DEF_ERROR_CODE(0, 0x2405)
#define TSDB_CODE_CTG_OUT_OF_SERVICE TAOS_DEF_ERROR_CODE(0, 0x2406)
#define TSDB_CODE_CTG_VG_META_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x2407)
//scheduler
//scheduler&qworker
#define TSDB_CODE_SCH_STATUS_ERROR TAOS_DEF_ERROR_CODE(0, 0x2501)
#define TSDB_CODE_SCH_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2502)
#define TSDB_CODE_QW_MSG_ERROR TAOS_DEF_ERROR_CODE(0, 0x2503)
//parser
#define TSDB_CODE_PAR_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x2600)

View File

@ -99,7 +99,7 @@ extern const int32_t TYPE_BYTES[15];
#define TSDB_INS_TABLE_MNODES "mnodes"
#define TSDB_INS_TABLE_MODULES "modules"
#define TSDB_INS_TABLE_QNODES "qnodes"
#define TSDB_INS_TABLE_USER_DATABASE "user_database"
#define TSDB_INS_TABLE_USER_DATABASES "user_databases"
#define TSDB_INS_TABLE_USER_FUNCTIONS "user_functions"
#define TSDB_INS_TABLE_USER_INDEXES "user_indexes"
#define TSDB_INS_TABLE_USER_STABLES "user_stables"

View File

@ -86,7 +86,7 @@ int32_t taosHashGetSize(const SHashObj *pHashObj);
* @param size
* @return
*/
int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size);
int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t size);
/**
* return the payload data with the specified key

View File

@ -37,9 +37,8 @@ extern "C" {
#define CHECK_CODE_GOTO(expr, label) \
do { \
int32_t code = expr; \
code = expr; \
if (TSDB_CODE_SUCCESS != code) { \
terrno = code; \
goto label; \
} \
} while (0)
@ -186,6 +185,8 @@ typedef struct SRequestObj {
char* msgBuf;
void* pInfo; // sql parse info, generated by parser module
int32_t code;
SArray* dbList;
SArray* tableList;
SQueryExecMetric metric;
SRequestSendRecvBody body;
} SRequestObj;

View File

@ -159,9 +159,13 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery) {
}
code = qParseQuerySql(&cxt, pQuery);
if (TSDB_CODE_SUCCESS == code && ((*pQuery)->haveResultSet)) {
if (TSDB_CODE_SUCCESS == code) {
if ((*pQuery)->haveResultSet) {
setResSchemaInfo(&pRequest->body.resInfo, (*pQuery)->pResSchema, (*pQuery)->numOfResCols);
}
TSWAP(pRequest->dbList, (*pQuery)->pDbList, SArray*);
TSWAP(pRequest->tableList, (*pQuery)->pTableList, SArray*);
}
return code;
}
@ -191,8 +195,17 @@ int32_t execDdlQuery(SRequestObj* pRequest, SQuery* pQuery) {
int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArray* pNodeList) {
pRequest->type = pQuery->msgType;
SPlanContext cxt = { .queryId = pRequest->requestId, .pAstRoot = pQuery->pRoot, .acctId = pRequest->pTscObj->acctId };
return qCreateQueryPlan(&cxt, pPlan, pNodeList);
SPlanContext cxt = {
.queryId = pRequest->requestId,
.acctId = pRequest->pTscObj->acctId,
.mgmtEpSet = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp),
.pAstRoot = pQuery->pRoot
};
int32_t code = qCreateQueryPlan(&cxt, pPlan, pNodeList);
if (code != 0) {
return code;
}
return code;
}
void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols) {
@ -219,6 +232,7 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
}
pRequest->code = code;
terrno = code;
return pRequest->code;
}
@ -231,9 +245,96 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
}
pRequest->code = res.code;
terrno = res.code;
return pRequest->code;
}
SRequestObj* execQueryImpl(STscObj* pTscObj, const char* sql, int sqlLen) {
SRequestObj* pRequest = NULL;
SQuery* pQuery = NULL;
int32_t code = 0;
SArray* pNodeList = taosArrayInit(4, sizeof(struct SQueryNodeAddr));
CHECK_CODE_GOTO(buildRequest(pTscObj, sql, sqlLen, &pRequest), _return);
CHECK_CODE_GOTO(parseSql(pRequest, false, &pQuery), _return);
if (pQuery->directRpc) {
CHECK_CODE_GOTO(execDdlQuery(pRequest, pQuery), _return);
} else {
CHECK_CODE_GOTO(getPlan(pRequest, pQuery, &pRequest->body.pDag, pNodeList), _return);
CHECK_CODE_GOTO(scheduleQuery(pRequest, pRequest->body.pDag, pNodeList), _return);
}
_return:
taosArrayDestroy(pNodeList);
qDestroyQuery(pQuery);
if (NULL != pRequest && TSDB_CODE_SUCCESS != code) {
pRequest->code = terrno;
}
return pRequest;
}
int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest) {
SCatalog *pCatalog = NULL;
int32_t code = 0;
int32_t dbNum = taosArrayGetSize(pRequest->dbList);
int32_t tblNum = taosArrayGetSize(pRequest->tableList);
if (dbNum <= 0 && tblNum <= 0) {
return TSDB_CODE_QRY_APP_ERROR;
}
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
SEpSet epset = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
for (int32_t i = 0; i < dbNum; ++i) {
char *dbFName = taosArrayGet(pRequest->dbList, i);
code = catalogRefreshDBVgInfo(pCatalog, pTscObj->pAppInfo->pTransporter, &epset, dbFName);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
for (int32_t i = 0; i < tblNum; ++i) {
SName *tableName = taosArrayGet(pRequest->tableList, i);
code = catalogRefreshTableMeta(pCatalog, pTscObj->pAppInfo->pTransporter, &epset, tableName, -1);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
return code;
}
SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
SRequestObj* pRequest = NULL;
int32_t retryNum = 0;
int32_t code = 0;
while (retryNum++ < REQUEST_MAX_TRY_TIMES) {
pRequest = execQueryImpl(pTscObj, sql, sqlLen);
if (TSDB_CODE_SUCCESS == pRequest->code || !NEED_CLIENT_HANDLE_ERROR(pRequest->code)) {
break;
}
code = refreshMeta(pTscObj, pRequest);
if (code) {
pRequest->code = code;
break;
}
}
return pRequest;
}
TAOS_RES* taos_query_l(TAOS* taos, const char* sql, int sqlLen) {
STscObj* pTscObj = (STscObj*)taos;
if (sqlLen > (size_t)TSDB_MAX_ALLOWED_SQL_LEN) {
@ -242,30 +343,7 @@ TAOS_RES* taos_query_l(TAOS* taos, const char* sql, int sqlLen) {
return NULL;
}
SRequestObj* pRequest = NULL;
SQuery* pQuery = NULL;
SArray* pNodeList = taosArrayInit(4, sizeof(struct SQueryNodeAddr));
terrno = TSDB_CODE_SUCCESS;
CHECK_CODE_GOTO(buildRequest(pTscObj, sql, sqlLen, &pRequest), _return);
CHECK_CODE_GOTO(parseSql(pRequest, false, &pQuery), _return);
if (pQuery->directRpc) {
CHECK_CODE_GOTO(execDdlQuery(pRequest, pQuery), _return);
} else {
CHECK_CODE_GOTO(getPlan(pRequest, pQuery, &pRequest->body.pDag, pNodeList), _return);
CHECK_CODE_GOTO(scheduleQuery(pRequest, pRequest->body.pDag, pNodeList), _return);
pRequest->code = terrno;
}
_return:
taosArrayDestroy(pNodeList);
qDestroyQuery(pQuery);
if (NULL != pRequest && TSDB_CODE_SUCCESS != terrno) {
pRequest->code = terrno;
}
return pRequest;
return execQuery(pTscObj, sql, sqlLen);
}
int initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSet) {
@ -384,7 +462,7 @@ static void destroySendMsgInfo(SMsgSendInfo* pMsgBody) {
tfree(pMsgBody);
}
bool persistConnForSpecificMsg(void* parenct, tmsg_t msgType) {
return msgType == TDMT_VND_QUERY_RSP || msgType == TDMT_VND_FETCH_RSP || msgType == TDMT_VND_RES_READY_RSP;
return msgType == TDMT_VND_QUERY_RSP || msgType == TDMT_VND_FETCH_RSP || msgType == TDMT_VND_RES_READY_RSP || msgType == TDMT_VND_QUERY_HEARTBEAT_RSP;
}
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->ahandle;
@ -395,7 +473,6 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
assert(pRequest->self == pSendInfo->requestObjRefId);
pRequest->metric.rsp = taosGetTimestampMs();
pRequest->code = pMsg->code;
STscObj* pTscObj = pRequest->pTscObj;
if (pEpSet) {

View File

@ -238,7 +238,11 @@ int32_t processCreateDbRsp(void* param, const SDataBuf* pMsg, int32_t code) {
// todo rsp with the vnode id list
SRequestObj* pRequest = param;
free(pMsg->pData);
if (code != TSDB_CODE_SUCCESS) {
setErrno(pRequest, code);
}
tsem_post(&pRequest->body.rspSem);
return code;
}
int32_t processUseDbRsp(void* param, const SDataBuf* pMsg, int32_t code) {

View File

@ -485,6 +485,7 @@ TAOS_RES* tmq_create_stream(TAOS* taos, const char* streamName, const char* tbNa
tscDebug("start to create stream: %s", streamName);
int32_t code = 0;
CHECK_CODE_GOTO(buildRequest(pTscObj, sql, sqlLen, &pRequest), _return);
CHECK_CODE_GOTO(parseSql(pRequest, false, &pQueryNode), _return);
@ -571,6 +572,7 @@ TAOS_RES* tmq_create_topic(TAOS* taos, const char* topicName, const char* sql, i
tscDebug("start to create topic: %s", topicName);
int32_t code = TSDB_CODE_SUCCESS;
CHECK_CODE_GOTO(buildRequest(pTscObj, sql, sqlLen, &pRequest), _return);
CHECK_CODE_GOTO(parseSql(pRequest, true, &pQueryNode), _return);

View File

@ -271,6 +271,8 @@ TEST(testCase, create_stable_Test) {
}
taos_free_result(pRes);
pRes = taos_query(pConn, "use abc1");
pRes = taos_query(pConn, "create table if not exists abc1.st1(ts timestamp, k int) tags(a int)");
if (taos_errno(pRes) != 0) {
printf("error in create stable, reason:%s\n", taos_errstr(pRes));

View File

@ -260,6 +260,56 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, co
return numOfRow1 + numOfRow2;
}
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows) {
ASSERT(pColumnInfoData != NULL && pSource != NULL && pColumnInfoData->info.type == pSource->info.type);
if (numOfRows == 0) {
return numOfRows;
}
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
// Handle the bitmap
char* p = realloc(pColumnInfoData->varmeta.offset, sizeof(int32_t) * numOfRows);
if (p == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pColumnInfoData->varmeta.offset = (int32_t*) p;
memcpy(pColumnInfoData->varmeta.offset, pSource->varmeta.offset, sizeof(int32_t) * numOfRows);
if (pColumnInfoData->varmeta.allocLen < pSource->varmeta.length) {
char* tmp = realloc(pColumnInfoData->pData, pSource->varmeta.length);
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pColumnInfoData->pData = tmp;
pColumnInfoData->varmeta.allocLen = pSource->varmeta.length;
}
memcpy(pColumnInfoData->pData, pSource->pData, pSource->varmeta.length);
pColumnInfoData->varmeta.length = pSource->varmeta.length;
} else {
char* tmp = realloc(pColumnInfoData->nullbitmap, BitmapLen(numOfRows));
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pColumnInfoData->nullbitmap = tmp;
memcpy(pColumnInfoData->nullbitmap, pSource->nullbitmap, BitmapLen(numOfRows));
int32_t newSize = numOfRows * pColumnInfoData->info.bytes;
tmp = realloc(pColumnInfoData->pData, newSize);
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pColumnInfoData->pData = tmp;
memcpy(pColumnInfoData->pData, pSource->pData, pSource->info.bytes * numOfRows);
}
return 0;
}
size_t blockDataGetNumOfCols(const SSDataBlock* pBlock) {
ASSERT(pBlock && pBlock->info.numOfCols == taosArrayGetSize(pBlock->pDataBlock));
return pBlock->info.numOfCols;

View File

@ -1733,7 +1733,10 @@ int32_t tSerializeSRetrieveTableReq(void *buf, int32_t bufLen, SRetrieveTableReq
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI64(&encoder, pReq->showId) < 0) return -1;
if (tEncodeI32(&encoder, pReq->type) < 0) return -1;
if (tEncodeI8(&encoder, pReq->free) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->db) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->tb) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -1747,8 +1750,10 @@ int32_t tDeserializeSRetrieveTableReq(void *buf, int32_t bufLen, SRetrieveTableR
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->showId) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->type) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->free) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->db) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->tb) < 0) return -1;
tEndDecode(&decoder);
tCoderClear(&decoder);
return 0;
@ -2619,6 +2624,78 @@ int32_t tDeserializeSSchedulerHbRsp(void *buf, int32_t bufLen, SSchedulerHbRsp *
void tFreeSSchedulerHbRsp(SSchedulerHbRsp *pRsp) { taosArrayDestroy(pRsp->taskStatus); }
int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) {
SCoder encoder = {0};
tCoderInit(&encoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_ENCODER);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pRsp->code) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tCoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) {
SCoder decoder = {0};
tCoderInit(&decoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_DECODER);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pRsp->code) < 0) return -1;
tEndDecode(&decoder);
tCoderClear(&decoder);
return 0;
}
int32_t tSerializeSVCreateTbBatchRsp(void *buf, int32_t bufLen, SVCreateTbBatchRsp *pRsp) {
SCoder encoder = {0};
tCoderInit(&encoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_ENCODER);
if (tStartEncode(&encoder) < 0) return -1;
if (pRsp->rspList) {
int32_t num = taosArrayGetSize(pRsp->rspList);
if (tEncodeI32(&encoder, num) < 0) return -1;
for (int32_t i = 0; i < num; ++i) {
SVCreateTbRsp *rsp = taosArrayGet(pRsp->rspList, i);
if (tEncodeI32(&encoder, rsp->code) < 0) return -1;
}
} else {
if (tEncodeI32(&encoder, 0) < 0) return -1;
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tCoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSVCreateTbBatchRsp(void *buf, int32_t bufLen, SVCreateTbBatchRsp *pRsp) {
SCoder decoder = {0};
int32_t num = 0;
tCoderInit(&decoder, TD_LITTLE_ENDIAN, buf, bufLen, TD_DECODER);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &num) < 0) return -1;
if (num > 0) {
pRsp->rspList = taosArrayInit(num, sizeof(SVCreateTbRsp));
if (NULL == pRsp->rspList) return -1;
for (int32_t i = 0; i < num; ++i) {
SVCreateTbRsp rsp = {0};
if (tDecodeI32(&decoder, &rsp.code) < 0) return -1;
if (NULL == taosArrayPush(pRsp->rspList, &rsp)) return -1;
}
} else {
pRsp->rspList = NULL;
}
tEndDecode(&decoder);
tCoderClear(&decoder);
return 0;
}
int32_t tSerializeSVCreateTSmaReq(void **buf, SVCreateTSmaReq *pReq) {
int32_t tlen = 0;

View File

@ -222,6 +222,27 @@ int32_t tNameSetAcctId(SName* dst, int32_t acctId) {
return 0;
}
bool tNameDBNameEqual(SName* left, SName* right) {
if (NULL == left) {
if (NULL == right) {
return true;
}
return false;
}
if (NULL == right) {
return false;
}
if (left->acctId != right->acctId) {
return false;
}
return (0 == strcmp(left->dbname, right->dbname));
}
int32_t tNameFromString(SName* dst, const char* str, uint32_t type) {
assert(dst != NULL && str != NULL && strlen(str) > 0);
@ -273,13 +294,4 @@ int32_t tNameFromString(SName* dst, const char* str, uint32_t type) {
return 0;
}
SSchema createSchema(uint8_t type, int32_t bytes, int32_t colId, const char* name) {
SSchema s = {0};
s.type = type;
s.bytes = bytes;
s.colId = colId;
tstrncpy(s.name, name, tListLen(s.name));
return s;
}

View File

@ -130,6 +130,7 @@ void mmInitMsgHandles(SMgmtWrapper *pWrapper) {
dndSetMsgHandle(pWrapper, TDMT_MND_HEARTBEAT, (NodeMsgFp)mmProcessWriteMsg, 0);
dndSetMsgHandle(pWrapper, TDMT_MND_SHOW, (NodeMsgFp)mmProcessReadMsg, 0);
dndSetMsgHandle(pWrapper, TDMT_MND_SHOW_RETRIEVE, (NodeMsgFp)mmProcessReadMsg, 0);
dndSetMsgHandle(pWrapper, TDMT_MND_SYSTABLE_RETRIEVE, (NodeMsgFp)mmProcessReadMsg, 0);
dndSetMsgHandle(pWrapper, TDMT_MND_STATUS, (NodeMsgFp)mmProcessReadMsg, 0);
dndSetMsgHandle(pWrapper, TDMT_MND_KILL_TRANS, (NodeMsgFp)mmProcessWriteMsg, 0);
dndSetMsgHandle(pWrapper, TDMT_MND_GRANT, (NodeMsgFp)mmProcessWriteMsg, 0);

View File

@ -930,12 +930,12 @@ static void mndBuildDBVgroupInfo(SDbObj *pDb, SMnode *pMnode, SArray *pVgList) {
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
while (vindex < pDb->cfg.numOfVgroups) {
while (true) {
SVgObj *pVgroup = NULL;
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
if (pVgroup->dbUid == pDb->uid) {
if (NULL == pDb || pVgroup->dbUid == pDb->uid) {
SVgroupInfo vgInfo = {0};
vgInfo.vgId = pVgroup->vgId;
vgInfo.hashBegin = pVgroup->hashBegin;
@ -960,6 +960,10 @@ static void mndBuildDBVgroupInfo(SDbObj *pDb, SMnode *pMnode, SArray *pVgList) {
}
sdbRelease(pSdb, pVgroup);
if (pDb && (vindex >= pDb->cfg.numOfVgroups)) {
break;
}
}
sdbCancelFetch(pSdb, pIter);
@ -981,7 +985,25 @@ static int32_t mndProcessUseDbReq(SNodeMsg *pReq) {
char *p = strchr(usedbReq.db, '.');
if (p && 0 == strcmp(p + 1, TSDB_INFORMATION_SCHEMA_DB)) {
memcpy(usedbRsp.db, usedbReq.db, TSDB_DB_FNAME_LEN);
static int32_t vgVersion = 1;
if (usedbReq.vgVersion < vgVersion) {
usedbRsp.pVgroupInfos = taosArrayInit(10, sizeof(SVgroupInfo));
if (usedbRsp.pVgroupInfos == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto USE_DB_OVER;
}
mndBuildDBVgroupInfo(NULL, pMnode, usedbRsp.pVgroupInfos);
usedbRsp.vgVersion = vgVersion++;
if (taosArrayGetSize(usedbRsp.pVgroupInfos) <= 0) {
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
}
} else {
usedbRsp.vgVersion = usedbReq.vgVersion;
code = 0;
}
usedbRsp.vgNum = taosArrayGetSize(usedbRsp.pVgroupInfos);
} else {
pDb = mndAcquireDb(pMnode, usedbReq.db);
if (pDb == NULL) {
@ -1341,21 +1363,14 @@ char *mnGetDbStr(char *src) {
return pos;
}
static int32_t mndRetrieveDbs(SNodeMsg *pReq, SShowObj *pShow, char *data, int32_t rows) {
SMnode *pMnode = pReq->pNode;
SSdb *pSdb = pMnode->pSdb;
int32_t numOfRows = 0;
SDbObj *pDb = NULL;
char *pWrite;
static char* getDataPosition(char* pData, SShowObj* pShow, int32_t cols, int32_t rows, int32_t capacityOfRow) {
return pData + pShow->offset[cols] * capacityOfRow + pShow->bytes[cols] * rows;
}
static void dumpDbInfoToPayload(char* data, SDbObj* pDb, SShowObj* pShow, int32_t rows, int32_t rowCapacity, int64_t numOfTables) {
int32_t cols = 0;
while (numOfRows < rows) {
pShow->pIter = sdbFetch(pSdb, SDB_DB, pShow->pIter, (void **)&pDb);
if (pShow->pIter == NULL) break;
cols = 0;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
char* pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
char *name = mnGetDbStr(pDb->name);
if (name != NULL) {
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, name, pShow->bytes[cols]);
@ -1364,31 +1379,31 @@ static int32_t mndRetrieveDbs(SNodeMsg *pReq, SShowObj *pShow, char *data, int32
}
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int64_t *)pWrite = pDb->createdTime;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int16_t *)pWrite = pDb->cfg.numOfVgroups;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int16_t *)pWrite = 0; // todo
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int64_t *)pWrite = numOfTables;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int16_t *)pWrite = pDb->cfg.replications;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int16_t *)pWrite = pDb->cfg.quorum;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int16_t *)pWrite = pDb->cfg.daysPerFile;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
char tmp[128] = {0};
if (pDb->cfg.daysToKeep0 > pDb->cfg.daysToKeep1 || pDb->cfg.daysToKeep0 > pDb->cfg.daysToKeep2) {
sprintf(tmp, "%d,%d,%d", pDb->cfg.daysToKeep1, pDb->cfg.daysToKeep2, pDb->cfg.daysToKeep0);
@ -1398,39 +1413,39 @@ static int32_t mndRetrieveDbs(SNodeMsg *pReq, SShowObj *pShow, char *data, int32
STR_WITH_SIZE_TO_VARSTR(pWrite, tmp, strlen(tmp));
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int32_t *)pWrite = pDb->cfg.cacheBlockSize;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int32_t *)pWrite = pDb->cfg.totalBlocks;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int32_t *)pWrite = pDb->cfg.minRows;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int32_t *)pWrite = pDb->cfg.maxRows;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int8_t *)pWrite = pDb->cfg.walLevel;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int32_t *)pWrite = pDb->cfg.fsyncPeriod;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int8_t *)pWrite = pDb->cfg.compression;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int8_t *)pWrite = pDb->cfg.cacheLastRow;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
char *prec = NULL;
switch (pDb->cfg.precision) {
case TSDB_TIME_PRECISION_MILLI:
@ -1449,15 +1464,48 @@ static int32_t mndRetrieveDbs(SNodeMsg *pReq, SShowObj *pShow, char *data, int32
STR_WITH_SIZE_TO_VARSTR(pWrite, prec, 2);
cols++;
// pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
// pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
// *(int8_t *)pWrite = pDb->cfg.update;
// cols++;
}
static void setInformationSchemaDbCfg(SDbObj* pDbObj) {
ASSERT(pDbObj != NULL);
strncpy(pDbObj->name, TSDB_INFORMATION_SCHEMA_DB, tListLen(pDbObj->name));
pDbObj->createdTime = 0;
pDbObj->cfg.numOfVgroups = 0;
pDbObj->cfg.quorum = 1;
pDbObj->cfg.replications = 1;
pDbObj->cfg.update = 1;
pDbObj->cfg.precision = TSDB_TIME_PRECISION_MILLI;
}
static int32_t mndRetrieveDbs(SNodeMsg *pReq, SShowObj *pShow, char *data, int32_t rowsCapacity) {
SMnode *pMnode = pReq->pNode;
SSdb *pSdb = pMnode->pSdb;
int32_t numOfRows = 0;
SDbObj *pDb = NULL;
while (numOfRows < rowsCapacity) {
pShow->pIter = sdbFetch(pSdb, SDB_DB, pShow->pIter, (void **)&pDb);
if (pShow->pIter == NULL) {
break;
}
dumpDbInfoToPayload(data, pDb, pShow, numOfRows, rowsCapacity, 0);
numOfRows++;
sdbRelease(pSdb, pDb);
}
mndVacuumResult(data, pShow->numOfColumns, numOfRows, rows, pShow);
// Append the information_schema database into the result.
if (numOfRows < rowsCapacity) {
SDbObj dummyISDb = {0};
setInformationSchemaDbCfg(&dummyISDb);
dumpDbInfoToPayload(data, &dummyISDb, pShow, numOfRows, rowsCapacity, 14);
numOfRows += 1;
}
mndVacuumResult(data, pShow->numOfColumns, numOfRows, rowsCapacity, pShow);
pShow->numOfReads += numOfRows;
return numOfRows;

View File

@ -752,7 +752,7 @@ static int32_t mndGetDnodeMeta(SNodeMsg *pReq, SShowObj *pShow, STableMetaRsp *p
pSchema[cols].bytes = pShow->bytes[cols];
cols++;
pShow->bytes[cols] = 24 + VARSTR_HEADER_SIZE;
pShow->bytes[cols] = 256 + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
strcpy(pSchema[cols].name, "offline_reason");
pSchema[cols].bytes = pShow->bytes[cols];

View File

@ -16,55 +16,54 @@
#define _DEFAULT_SOURCE
#include "mndInfoSchema.h"
static const SInfosTableSchema dnodesSchema[] = {{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "end_point", .bytes = 134, .type = TSDB_DATA_TYPE_BINARY},
{.name = "vnodes", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "cores", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "status", .bytes = 10, .type = TSDB_DATA_TYPE_BINARY},
{.name = "role", .bytes = 10, .type = TSDB_DATA_TYPE_BINARY},
static const SInfosTableSchema dnodesSchema[] = {{.name = "id", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "max_vnodes",.bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "offline_reason", .bytes = 256, .type = TSDB_DATA_TYPE_BINARY},
{.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
};
static const SInfosTableSchema mnodesSchema[] = {{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "end_point", .bytes = 134, .type = TSDB_DATA_TYPE_BINARY},
{.name = "role", .bytes = 10, .type = TSDB_DATA_TYPE_BINARY},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "role_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SInfosTableSchema modulesSchema[] = {{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "end_point", .bytes = 134, .type = TSDB_DATA_TYPE_BINARY},
{.name = "endpoint", .bytes = 134, .type = TSDB_DATA_TYPE_BINARY},
{.name = "module", .bytes = 10, .type = TSDB_DATA_TYPE_BINARY},
};
static const SInfosTableSchema qnodesSchema[] = {{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "end_point", .bytes = 134, .type = TSDB_DATA_TYPE_BINARY},
{.name = "endpoint", .bytes = 134, .type = TSDB_DATA_TYPE_BINARY},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
static const SInfosTableSchema userDBSchema[] = {{.name = "name", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY},
{.name = "created_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "ntables", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "vgroups", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "replica", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "quorum", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "days", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "keep", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
static const SInfosTableSchema userDBSchema[] = {{.name = "name", .bytes = (TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "vgroups", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "replica", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "quorum", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "days", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "keep", .bytes = 24 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "cache", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "blocks", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "wallevel", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "wallevel", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "fsync", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "comp", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "cachelast", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "precision", .bytes = 2, .type = TSDB_DATA_TYPE_BINARY},
{.name = "status", .bytes = 10, .type = TSDB_DATA_TYPE_BINARY},
{.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "cachelast", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "precision", .bytes = 3 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
// {.name = "update", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, // disable update
};
static const SInfosTableSchema userFuncSchema[] = {{.name = "name", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY},
{.name = "created_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "ntables", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "precision", .bytes = 2, .type = TSDB_DATA_TYPE_BINARY},
{.name = "status", .bytes = 10, .type = TSDB_DATA_TYPE_BINARY},
};
static const SInfosTableSchema userIdxSchema[] = {{.name = "table_database", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY},
static const SInfosTableSchema userIdxSchema[] = {{.name = "db_name", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY},
{.name = "table_name", .bytes = 192, .type = TSDB_DATA_TYPE_BINARY},
{.name = "index_database", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY},
{.name = "index_name", .bytes = 192, .type = TSDB_DATA_TYPE_BINARY},
@ -72,26 +71,31 @@ static const SInfosTableSchema userIdxSchema[] = {{.name = "table_database", .
{.name = "index_type", .bytes = 10, .type = TSDB_DATA_TYPE_BINARY},
{.name = "index_extensions", .bytes = 256, .type = TSDB_DATA_TYPE_BINARY},
};
static const SInfosTableSchema userStbsSchema[] = {{.name = "db_name", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY},
{.name = "stable_name", .bytes = 192, .type = TSDB_DATA_TYPE_BINARY},
{.name = "created_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
static const SInfosTableSchema userStbsSchema[] = {{.name = "db_name", .bytes = (TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "stable_name", .bytes = (TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "tags", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "tables", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "last_update", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "table_comment", .bytes = 1024 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_INT},
};
static const SInfosTableSchema userStreamsSchema[] = {{.name = "stream_name", .bytes = 192, .type = TSDB_DATA_TYPE_BINARY},
{.name = "user_name", .bytes = 23, .type = TSDB_DATA_TYPE_BINARY},
{.name = "dest_table", .bytes = 192, .type = TSDB_DATA_TYPE_BINARY},
{.name = "created_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "sql", .bytes = 1024, .type = TSDB_DATA_TYPE_BINARY},
};
static const SInfosTableSchema userTblsSchema[] = {{.name = "db_name", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY},
static const SInfosTableSchema userTblsSchema[] = {
{.name = "table_name", .bytes = 192, .type = TSDB_DATA_TYPE_BINARY},
{.name = "created_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "db_name", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "stable_name", .bytes = 192, .type = TSDB_DATA_TYPE_BINARY},
{.name = "tid", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "vg_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "uid", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "ttl", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "table_comment", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
};
static const SInfosTableSchema userTblDistSchema[] = {{.name = "db_name", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY},
{.name = "table_name", .bytes = 192, .type = TSDB_DATA_TYPE_BINARY},
@ -107,13 +111,15 @@ static const SInfosTableSchema userTblDistSchema[] = {{.name = "db_name",
{.name = "rows_in_mem", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "seek_header_time", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
};
static const SInfosTableSchema userUsersSchema[] = {{.name = "user_name", .bytes = 23, .type = TSDB_DATA_TYPE_BINARY},
{.name = "privilege", .bytes = 256, .type = TSDB_DATA_TYPE_BINARY},
static const SInfosTableSchema userUsersSchema[] = {{.name = "name", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "privilege", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "account", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
};
static const SInfosTableSchema vgroupsSchema[] = {{.name = "vg_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
static const SInfosTableSchema vgroupsSchema[] = {{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "db_name", .bytes = (TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "tables", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "status", .bytes = 10, .type = TSDB_DATA_TYPE_BINARY},
{.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "onlines", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "v1_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "v1_status", .bytes = 10, .type = TSDB_DATA_TYPE_BINARY},
@ -122,13 +128,15 @@ static const SInfosTableSchema vgroupsSchema[] = {{.name = "vg_id", .
{.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "v3_status", .bytes = 10, .type = TSDB_DATA_TYPE_BINARY},
{.name = "compacting", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
};
static const SInfosTableMeta infosMeta[] = {{TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema)},
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema)},
{TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema)},
{TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema)},
{TSDB_INS_TABLE_USER_DATABASE, userDBSchema, tListLen(userDBSchema)},
{TSDB_INS_TABLE_USER_DATABASES, userDBSchema, tListLen(userDBSchema)},
{TSDB_INS_TABLE_USER_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)},
{TSDB_INS_TABLE_USER_INDEXES, userIdxSchema, tListLen(userIdxSchema)},
{TSDB_INS_TABLE_USER_STABLES, userStbsSchema, tListLen(userStbsSchema)},
@ -139,7 +147,7 @@ static const SInfosTableMeta infosMeta[] = {{TSDB_INS_TABLE_DNODES, dnodesSchema
{TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema)},
};
//connection/application/
int32_t mndInitInfosTableSchema(const SInfosTableSchema *pSrc, int32_t colNum, SSchema **pDst) {
SSchema *schema = calloc(colNum, sizeof(SSchema));
if (NULL == schema) {
@ -147,7 +155,6 @@ int32_t mndInitInfosTableSchema(const SInfosTableSchema *pSrc, int32_t colNum, S
return -1;
}
for (int32_t i = 0; i < colNum; ++i) {
strcpy(schema[i].name, pSrc[i].name);
@ -157,7 +164,6 @@ int32_t mndInitInfosTableSchema(const SInfosTableSchema *pSrc, int32_t colNum, S
}
*pDst = schema;
return TSDB_CODE_SUCCESS;
}
@ -165,7 +171,7 @@ int32_t mndInsInitMeta(SHashObj *hash) {
STableMetaRsp meta = {0};
strcpy(meta.dbFName, TSDB_INFORMATION_SCHEMA_DB);
meta.tableType = TSDB_NORMAL_TABLE;
meta.tableType = TSDB_SYSTEM_TABLE;
meta.sversion = 1;
meta.tversion = 1;

View File

@ -692,11 +692,11 @@ static int32_t mndRetrieveMnodes(SNodeMsg *pReq, SShowObj *pShow, char *data, in
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int64_t *)pWrite = pObj->createdTime;
*(int64_t *)pWrite = pObj->roleTime;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int64_t *)pWrite = pObj->roleTime;
*(int64_t *)pWrite = pObj->createdTime;
cols++;
numOfRows++;

View File

@ -289,6 +289,20 @@ static int32_t mndProcessRetrieveSysTableReq(SNodeMsg *pReq) {
mError("failed to process show-meta req since %s", terrstr());
return -1;
}
STableMetaRsp *meta = (STableMetaRsp *)taosHashGet(pMnode->infosMeta, retrieveReq.tb, strlen(retrieveReq.tb));
pShow->numOfRows = 100;
int32_t offset = 0;
for(int32_t i = 0; i < meta->numOfColumns; ++i) {
pShow->numOfColumns = meta->numOfColumns;
pShow->offset[i] = offset;
int32_t bytes = meta->pSchemas[i].bytes;
pShow->rowSize += bytes;
pShow->bytes[i] = bytes;
offset += bytes;
}
} else {
pShow = mndAcquireShowObj(pMnode, retrieveReq.showId);
if (pShow == NULL) {
@ -330,7 +344,7 @@ static int32_t mndProcessRetrieveSysTableReq(SNodeMsg *pReq) {
size = pShow->rowSize * rowsToRead;
size += SHOW_STEP_SIZE;
SRetrieveTableRsp *pRsp = rpcMallocCont(size);
SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size);
if (pRsp == NULL) {
mndReleaseShowObj((SShowObj*) pShow, false);
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -338,6 +352,8 @@ static int32_t mndProcessRetrieveSysTableReq(SNodeMsg *pReq) {
return -1;
}
pRsp->handle = htobe64(pShow->id);
// if free flag is set, client wants to clean the resources
if ((retrieveReq.free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE) {
rowsRead = (*retrieveFp)(pReq, (SShowObj*) pShow, pRsp->data, rowsToRead);

View File

@ -1490,7 +1490,7 @@ static void mndExtractTableName(char *tableId, char *name) {
int32_t pos = -1;
int32_t num = 0;
for (pos = 0; tableId[pos] != 0; ++pos) {
if (tableId[pos] == '.') num++;
if (tableId[pos] == TS_PATH_DELIMITER[0]) num++;
if (num == 2) break;
}
@ -1508,8 +1508,11 @@ static int32_t mndRetrieveStb(SNodeMsg *pReq, SShowObj *pShow, char *data, int32
char *pWrite;
char prefix[TSDB_DB_FNAME_LEN] = {0};
SDbObj *pDb = mndAcquireDb(pMnode, pShow->db);
SDbObj* pDb = NULL;
if (strlen(pShow->db) > 0) {
pDb = mndAcquireDb(pMnode, pShow->db);
if (pDb == NULL) return 0;
}
tstrncpy(prefix, pShow->db, TSDB_DB_FNAME_LEN);
strcat(prefix, TS_PATH_DELIMITER);
@ -1519,7 +1522,7 @@ static int32_t mndRetrieveStb(SNodeMsg *pReq, SShowObj *pShow, char *data, int32
pShow->pIter = sdbFetch(pSdb, SDB_STB, pShow->pIter, (void **)&pStb);
if (pShow->pIter == NULL) break;
if (pStb->dbUid != pDb->uid) {
if (pDb != NULL && pStb->dbUid != pDb->uid) {
if (strncmp(pStb->db, pDb->name, prefixLen) == 0) {
mError("Inconsistent table data, name:%s, db:%s, dbUid:%" PRIu64, pStb->name, pDb->name, pDb->uid);
}
@ -1530,8 +1533,17 @@ static int32_t mndRetrieveStb(SNodeMsg *pReq, SShowObj *pShow, char *data, int32
cols = 0;
SName name = {0};
char db[TSDB_DB_NAME_LEN] = {0};
tNameFromString(&name, pStb->db, T_NAME_ACCT|T_NAME_DB);
tNameGetDbName(&name, db);
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_TO_VARSTR(pWrite, db);
cols++;
char stbName[TSDB_TABLE_NAME_LEN] = {0};
tstrncpy(stbName, pStb->name + prefixLen, TSDB_TABLE_NAME_LEN);
mndExtractTableName(pStb->name, stbName);
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_TO_VARSTR(pWrite, stbName);
cols++;
@ -1548,11 +1560,26 @@ static int32_t mndRetrieveStb(SNodeMsg *pReq, SShowObj *pShow, char *data, int32
*(int32_t *)pWrite = pStb->numOfTags;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = 0; // number of tables
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int64_t *)pWrite = pStb->updateTime; // number of tables
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_TO_VARSTR(pWrite, pStb->comment);
cols++;
numOfRows++;
sdbRelease(pSdb, pStb);
}
if (pDb != NULL) {
mndReleaseDb(pMnode, pDb);
}
pShow->numOfReads += numOfRows;
mndVacuumResult(data, pShow->numOfColumns, numOfRows, rows, pShow);
return numOfRows;

View File

@ -559,25 +559,53 @@ static int32_t mndRetrieveVgroups(SNodeMsg *pReq, SShowObj *pShow, char *data, i
int32_t cols = 0;
char *pWrite;
SDbObj *pDb = mndAcquireDb(pMnode, pShow->db);
if (pDb == NULL) return 0;
SDbObj *pDb = NULL;
if (strlen(pShow->db) > 0) {
pDb = mndAcquireDb(pMnode, pShow->db);
if (pDb == NULL) {
return 0;
}
}
while (numOfRows < rows) {
pShow->pIter = sdbFetch(pSdb, SDB_VGROUP, pShow->pIter, (void **)&pVgroup);
if (pShow->pIter == NULL) break;
if (pVgroup->dbUid == pDb->uid) {
if (pDb != NULL && pVgroup->dbUid != pDb->uid) {
continue;
}
cols = 0;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = pVgroup->vgId;
cols++;
SName name = {0};
char db[TSDB_DB_NAME_LEN] = {0};
tNameFromString(&name, pVgroup->dbName, T_NAME_ACCT|T_NAME_DB);
tNameGetDbName(&name, db);
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_TO_VARSTR(pWrite, db);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = pVgroup->numOfTables;
cols++;
for (int32_t i = 0; i < pShow->replica; ++i) {
//status
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_TO_VARSTR(pWrite, "ready"); // TODO
cols++;
//onlines
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = pVgroup->replica;
cols++;
for (int32_t i = 0; i < pVgroup->replica; ++i) {
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int16_t *)pWrite = pVgroup->vnodeGid[i].dnodeId;
cols++;
@ -587,13 +615,15 @@ static int32_t mndRetrieveVgroups(SNodeMsg *pReq, SShowObj *pShow, char *data, i
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, role, pShow->bytes[cols]);
cols++;
}
numOfRows++;
}
numOfRows++;
sdbRelease(pSdb, pVgroup);
}
if (pDb != NULL) {
mndReleaseDb(pMnode, pDb);
}
mndVacuumResult(data, pShow->numOfColumns, numOfRows, rows, pShow);
pShow->numOfReads += numOfRows;
return numOfRows;
@ -671,7 +701,7 @@ static int32_t mndRetrieveVnodes(SNodeMsg *pReq, SShowObj *pShow, char *data, in
SVgObj *pVgroup = NULL;
char *pWrite;
int32_t cols = 0;
int32_t dnodeId = pShow->replica;
// int32_t dnodeId = pShow->replica;
while (numOfRows < rows) {
pShow->pIter = sdbFetch(pSdb, SDB_VGROUP, pShow->pIter, (void **)&pVgroup);
@ -679,17 +709,33 @@ static int32_t mndRetrieveVnodes(SNodeMsg *pReq, SShowObj *pShow, char *data, in
for (int32_t i = 0; i < pVgroup->replica && numOfRows < rows; ++i) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
if (pVgid->dnodeId != dnodeId) continue;
cols = 0;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(uint32_t *)pWrite = pVgroup->vgId;
cols++;
SName name = {0};
char db[TSDB_DB_NAME_LEN] = {0};
tNameFromString(&name, pVgroup->dbName, T_NAME_ACCT|T_NAME_DB);
tNameGetDbName(&name, db);
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_TO_VARSTR(pWrite, db);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(uint32_t *)pWrite = 0; //todo: Tables
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_TO_VARSTR(pWrite, mndGetRoleStr(pVgid->role));
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(uint32_t *)pWrite = pVgroup->replica; //onlines
cols++;
numOfRows++;
}

View File

@ -5,7 +5,7 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME mnode_test_db
COMMAND mnode_test_db
)
#add_test(
# NAME mnode_test_db
# COMMAND mnode_test_db
#)

View File

@ -5,7 +5,7 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME mnode_test_dnode
COMMAND mnode_test_dnode
)
#add_test(
# NAME mnode_test_dnode
# COMMAND mnode_test_dnode
#)

View File

@ -5,7 +5,7 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME mnode_test_stb
COMMAND mnode_test_stb
)
#add_test(
# NAME mnode_test_stb
# COMMAND mnode_test_stb
#)

View File

@ -12,4 +12,5 @@ target_link_libraries(
PRIVATE os
PRIVATE common
PRIVATE util
PRIVATE qcom
)

View File

@ -61,6 +61,7 @@ STSchema * metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver);
STSma * metaGetSmaInfoByIndex(SMeta *pMeta, int64_t indexUid);
STSmaWrapper * metaGetSmaInfoByTable(SMeta *pMeta, tb_uid_t uid);
SArray * metaGetSmaTbUids(SMeta *pMeta, bool isDup);
int metaGetTbNum(SMeta *pMeta);
SMTbCursor *metaOpenTbCursor(SMeta *pMeta);
void metaCloseTbCursor(SMTbCursor *pTbCur);

View File

@ -183,6 +183,9 @@ void vnodeOptionsInit(SVnodeCfg *pOptions);
*/
void vnodeOptionsClear(SVnodeCfg *pOptions);
int vnodeValidateTableHash(SVnodeCfg *pVnodeOptions, char *tableFName);
/* ------------------------ FOR COMPILE ------------------------ */
int32_t vnodeAlter(SVnode *pVnode, const SVnodeCfg *pCfg);

View File

@ -705,6 +705,18 @@ SMTbCursor *metaOpenTbCursor(SMeta *pMeta) {
return pTbCur;
}
int metaGetTbNum(SMeta *pMeta) {
SMetaDB *pDB = pMeta->pDB;
DB_BTREE_STAT *sp1;
pDB->pTbDB->stat(pDB->pNtbIdx, NULL, &sp1, 0);
DB_BTREE_STAT *sp2;
pDB->pTbDB->stat(pDB->pCtbIdx, NULL, &sp2, 0);
return sp1->bt_nkeys + sp2->bt_nkeys;
}
void metaCloseTbCursor(SMTbCursor *pTbCur) {
if (pTbCur) {
if (pTbCur->pCur) {

View File

@ -401,14 +401,20 @@ static STsdbReadHandle* tsdbQueryTablesImpl(STsdb* tsdb, STsdbQueryCond* pCond,
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
SColumnInfoData colInfo = {{0}, 0};
colInfo.info = pCond->colList[i];
colInfo.pData = calloc(1, EXTRA_BYTES + pReadHandle->outputCapacity * pCond->colList[i].bytes);
if (colInfo.pData == NULL) {
if (!IS_VAR_DATA_TYPE(colInfo.info.type)) {
colInfo.nullbitmap = calloc(1, BitmapLen(pReadHandle->outputCapacity));
}
if (colInfo.pData == NULL || (colInfo.nullbitmap == NULL && (!IS_VAR_DATA_TYPE(colInfo.info.type)))) {
goto _end;
}
taosArrayPush(pReadHandle->pColumns, &colInfo);
pReadHandle->statis[i].colId = colInfo.info.colId;
}

View File

@ -33,3 +33,25 @@ int vnodeValidateOptions(const SVnodeCfg *pVnodeOptions) {
void vnodeOptionsCopy(SVnodeCfg *pDest, const SVnodeCfg *pSrc) {
memcpy((void *)pDest, (void *)pSrc, sizeof(SVnodeCfg));
}
int vnodeValidateTableHash(SVnodeCfg *pVnodeOptions, char *tableFName) {
uint32_t hashValue = 0;
switch (pVnodeOptions->hashMethod) {
default:
hashValue = MurmurHash3_32(tableFName, strlen(tableFName));
break;
}
// TODO OPEN THIS !!!!!!!
#if 0
if (hashValue < pVnodeOptions->hashBegin || hashValue > pVnodeOptions->hashEnd) {
terrno = TSDB_CODE_VND_HASH_MISMATCH;
return TSDB_CODE_VND_HASH_MISMATCH;
}
#endif
return TSDB_CODE_SUCCESS;
}

View File

@ -27,7 +27,7 @@ int32_t vnodeSync(SVnode *pVnode) { return 0; }
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) {
pLoad->vgId = pVnode->vgId;
pLoad->role = TAOS_SYNC_STATE_LEADER;
pLoad->numOfTables = 500;
pLoad->numOfTables = metaGetTbNum(pVnode->pMeta);
pLoad->numOfTimeSeries = 400;
pLoad->totalStorage = 300;
pLoad->compStorage = 200;

View File

@ -27,7 +27,7 @@ void vnodeQueryClose(SVnode *pVnode) { qWorkerDestroy((void **)&pVnode->pQuery);
int vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
vTrace("message in query queue is processing");
SReadHandle handle = {.reader = pVnode->pTsdb, .meta = pVnode->pMeta};
SReadHandle handle = {.reader = pVnode->pTsdb, .meta = pVnode->pMeta, .config = &pVnode->config};
switch (pMsg->msgType) {
case TDMT_VND_QUERY: {
@ -87,11 +87,24 @@ static int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) {
SSchema *pTagSchema;
SRpcMsg rpcMsg;
int msgLen = 0;
int32_t code = TSDB_CODE_VND_APP_ERROR;
int32_t code = 0;
char tableFName[TSDB_TABLE_FNAME_LEN];
int32_t rspLen = 0;
void *pRsp = NULL;
STableInfoReq infoReq = {0};
if (tDeserializeSTableInfoReq(pMsg->pCont, pMsg->contLen, &infoReq) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
metaRsp.dbId = pVnode->config.dbId;
memcpy(metaRsp.dbFName, infoReq.dbFName, sizeof(metaRsp.dbFName));
strcpy(metaRsp.tbName, infoReq.tbName);
sprintf(tableFName, "%s.%s", infoReq.dbFName, infoReq.tbName);
code = vnodeValidateTableHash(&pVnode->config, tableFName);
if (code) {
goto _exit;
}
@ -131,9 +144,6 @@ static int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) {
goto _exit;
}
metaRsp.dbId = pVnode->config.dbId;
memcpy(metaRsp.dbFName, infoReq.dbFName, sizeof(metaRsp.dbFName));
strcpy(metaRsp.tbName, infoReq.tbName);
if (pTbCfg->type == META_CHILD_TABLE) {
strcpy(metaRsp.stbName, pStbCfg->name);
metaRsp.suid = pTbCfg->ctbCfg.suid;
@ -152,22 +162,22 @@ static int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) {
memcpy(POINTER_SHIFT(metaRsp.pSchemas, sizeof(SSchema) * pSW->nCols), pTagSchema, sizeof(SSchema) * nTagCols);
}
int32_t rspLen = tSerializeSTableMetaRsp(NULL, 0, &metaRsp);
_exit:
rspLen = tSerializeSTableMetaRsp(NULL, 0, &metaRsp);
if (rspLen < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
void *pRsp = rpcMallocCont(rspLen);
pRsp = rpcMallocCont(rspLen);
if (pRsp == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
tSerializeSTableMetaRsp(pRsp, rspLen, &metaRsp);
code = 0;
_exit:
tFreeSTableMetaRsp(&metaRsp);
if (pSW != NULL) {

View File

@ -80,9 +80,24 @@ int vnodeApplyWMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
}
case TDMT_VND_CREATE_TABLE: {
SVCreateTbBatchReq vCreateTbBatchReq = {0};
SVCreateTbBatchRsp vCreateTbBatchRsp = {0};
tDeserializeSVCreateTbBatchReq(POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), &vCreateTbBatchReq);
for (int i = 0; i < taosArrayGetSize(vCreateTbBatchReq.pArray); i++) {
int reqNum = taosArrayGetSize(vCreateTbBatchReq.pArray);
for (int i = 0; i < reqNum; i++) {
SVCreateTbReq *pCreateTbReq = taosArrayGet(vCreateTbBatchReq.pArray, i);
char tableFName[TSDB_TABLE_FNAME_LEN];
SMsgHead *pHead = (SMsgHead *)pMsg->pCont;
sprintf(tableFName, "%s.%s", pCreateTbReq->dbFName, pCreateTbReq->name);
int32_t code = vnodeValidateTableHash(&pVnode->config, tableFName);
if (code) {
SVCreateTbRsp rsp;
rsp.code = code;
taosArrayPush(vCreateTbBatchRsp.rspList, &rsp);
}
if (metaCreateTable(pVnode->pMeta, pCreateTbReq) < 0) {
// TODO: handle error
vError("vgId:%d, failed to create table: %s", pVnode->vgId, pCreateTbReq->name);
@ -100,6 +115,19 @@ int vnodeApplyWMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
vTrace("vgId:%d process create %" PRIzu " tables", pVnode->vgId, taosArrayGetSize(vCreateTbBatchReq.pArray));
taosArrayDestroy(vCreateTbBatchReq.pArray);
if (vCreateTbBatchRsp.rspList) {
int32_t contLen = tSerializeSVCreateTbBatchRsp(NULL, 0, &vCreateTbBatchRsp);
void *msg = rpcMallocCont(contLen);
tSerializeSVCreateTbBatchRsp(msg, contLen, &vCreateTbBatchRsp);
taosArrayDestroy(vCreateTbBatchRsp.rspList);
*pRsp = calloc(1, sizeof(SRpcMsg));
(*pRsp)->msgType = TDMT_VND_CREATE_TABLE_RSP;
(*pRsp)->pCont = msg;
(*pRsp)->contLen = contLen;
(*pRsp)->handle = pMsg->handle;
(*pRsp)->ahandle = pMsg->ahandle;
}
break;
}
case TDMT_VND_ALTER_STB: {

View File

@ -304,7 +304,9 @@ TEST(testCase, tSma_Data_Insert_Query_Test) {
break;
}
SDiskCfg pDisks = {.level = 0, .primary = 1};
SDiskCfg pDisks = {0};
pDisks.level = 0;
pDisks.primary = 1;
strncpy(pDisks.dir, "/var/lib/taos", TSDB_FILENAME_LEN);
int32_t numOfDisks = 1;
pTsdb->pTfs = tfsOpen(&pDisks, numOfDisks);

View File

@ -30,6 +30,7 @@ extern "C" {
#define CTG_DEFAULT_CACHE_TBLMETA_NUMBER 1000
#define CTG_DEFAULT_RENT_SECOND 10
#define CTG_DEFAULT_RENT_SLOT_SIZE 10
#define CTG_DEFAULT_MAX_RETRY_TIMES 3
#define CTG_RENT_SLOT_SECOND 1.5
@ -57,10 +58,10 @@ enum {
};
typedef struct SCtgDebug {
bool lockDebug;
bool cacheDebug;
bool apiDebug;
bool metaDebug;
bool lockEnable;
bool cacheEnable;
bool apiEnable;
bool metaEnable;
uint32_t showCachePeriodSec;
} SCtgDebug;
@ -161,6 +162,8 @@ typedef struct SCtgRemoveTblMsg {
typedef struct SCtgMetaAction {
int32_t act;
void *data;
bool syncReq;
uint64_t seqId;
} SCtgMetaAction;
typedef struct SCtgQNode {
@ -168,14 +171,21 @@ typedef struct SCtgQNode {
struct SCtgQNode *next;
} SCtgQNode;
typedef struct SCtgQueue {
SRWLatch qlock;
uint64_t seqId;
uint64_t seqDone;
SCtgQNode *head;
SCtgQNode *tail;
tsem_t reqSem;
tsem_t rspSem;
uint64_t qRemainNum;
} SCtgQueue;
typedef struct SCatalogMgmt {
bool exit;
SRWLatch lock;
SRWLatch qlock;
SCtgQNode *head;
SCtgQNode *tail;
tsem_t sem;
uint64_t qRemainNum;
SCtgQueue queue;
TdThread updateThread;
SHashObj *pCluster; //key: clusterId, value: SCatalog*
SCatalogStat stat;
@ -191,8 +201,8 @@ typedef struct SCtgAction {
ctgActFunc func;
} SCtgAction;
#define CTG_QUEUE_ADD() atomic_add_fetch_64(&gCtgMgmt.qRemainNum, 1)
#define CTG_QUEUE_SUB() atomic_sub_fetch_64(&gCtgMgmt.qRemainNum, 1)
#define CTG_QUEUE_ADD() atomic_add_fetch_64(&gCtgMgmt.queue.qRemainNum, 1)
#define CTG_QUEUE_SUB() atomic_sub_fetch_64(&gCtgMgmt.queue.qRemainNum, 1)
#define CTG_STAT_ADD(n) atomic_add_fetch_64(&(n), 1)
#define CTG_STAT_SUB(n) atomic_sub_fetch_64(&(n), 1)
@ -232,9 +242,9 @@ typedef struct SCtgAction {
#define ctgDebug(param, ...) qDebug("CTG:%p " param, pCtg, __VA_ARGS__)
#define ctgTrace(param, ...) qTrace("CTG:%p " param, pCtg, __VA_ARGS__)
#define CTG_LOCK_DEBUG(...) do { if (gCTGDebug.lockDebug) { qDebug(__VA_ARGS__); } } while (0)
#define CTG_CACHE_DEBUG(...) do { if (gCTGDebug.cacheDebug) { qDebug(__VA_ARGS__); } } while (0)
#define CTG_API_DEBUG(...) do { if (gCTGDebug.apiDebug) { qDebug(__VA_ARGS__); } } while (0)
#define CTG_LOCK_DEBUG(...) do { if (gCTGDebug.lockEnable) { qDebug(__VA_ARGS__); } } while (0)
#define CTG_CACHE_DEBUG(...) do { if (gCTGDebug.cacheEnable) { qDebug(__VA_ARGS__); } } while (0)
#define CTG_API_DEBUG(...) do { if (gCTGDebug.apiEnable) { qDebug(__VA_ARGS__); } } while (0)
#define TD_RWLATCH_WRITE_FLAG_COPY 0x40000000

File diff suppressed because it is too large Load Diff

View File

@ -713,7 +713,7 @@ void *ctgTestGetDbVgroupThread(void *param) {
int32_t n = 0;
while (!ctgTestStop) {
code = catalogGetDBVgInfo(pCtg, mockPointer, (const SEpSet *)mockPointer, ctgTestDbname, false, &vgList);
code = catalogGetDBVgInfo(pCtg, mockPointer, (const SEpSet *)mockPointer, ctgTestDbname, &vgList);
if (code) {
assert(0);
}
@ -2009,7 +2009,7 @@ TEST(dbVgroup, getSetDbVgroupCase) {
strcpy(n.dbname, "db1");
strcpy(n.tname, ctgTestTablename);
code = catalogGetDBVgInfo(pCtg, mockPointer, (const SEpSet *)mockPointer, ctgTestDbname, false, &vgList);
code = catalogGetDBVgInfo(pCtg, mockPointer, (const SEpSet *)mockPointer, ctgTestDbname, &vgList);
ASSERT_EQ(code, 0);
ASSERT_EQ(taosArrayGetSize((const SArray *)vgList), ctgTestVgNum);

View File

@ -253,11 +253,6 @@ typedef struct STaskIdInfo {
char* str;
} STaskIdInfo;
typedef struct STaskBufInfo {
int32_t bufSize; // total available buffer size in bytes
int32_t remainBuf; // remain buffer size
} STaskBufInfo;
typedef struct SExecTaskInfo {
STaskIdInfo id;
char* content;
@ -269,8 +264,7 @@ typedef struct SExecTaskInfo {
uint64_t totalRows; // total number of rows
STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray<STableQueryInfo*> structure
char* sql; // query sql string
jmp_buf env; // when error occurs, abort
STaskBufInfo bufInfo; // available buffer info this task
jmp_buf env; //
struct SOperatorInfo* pRoot;
} SExecTaskInfo;
@ -315,7 +309,6 @@ typedef struct STaskRuntimeEnv {
enum {
OP_NOT_OPENED = 0x0,
OP_OPENED = 0x1,
OP_IN_EXECUTING = 0x3,
OP_RES_TO_RETURN = 0x5,
OP_EXEC_DONE = 0x9,
};
@ -366,9 +359,9 @@ typedef struct SQInfo {
} SQInfo;
enum {
EX_SOURCE_DATA_NOT_READY = 0x1,
EX_SOURCE_DATA_READY = 0x2,
EX_SOURCE_DATA_EXHAUSTED = 0x3,
DATA_NOT_READY = 0x1,
DATA_READY = 0x2,
DATA_EXHAUSTED = 0x3,
};
typedef struct SSourceDataInfo {
@ -379,6 +372,18 @@ typedef struct SSourceDataInfo {
int32_t status;
} SSourceDataInfo;
typedef struct SLoadRemoteDataInfo {
uint64_t totalSize; // total load bytes from remote
uint64_t totalRows; // total number of rows
uint64_t totalElapsed; // total elapsed time
} SLoadRemoteDataInfo;
enum {
EX_SOURCE_DATA_NOT_READY = 0x1,
EX_SOURCE_DATA_READY = 0x2,
EX_SOURCE_DATA_EXHAUSTED = 0x3,
};
typedef struct SExchangeInfo {
SArray* pSources;
SArray* pSourceDataInfo;
@ -387,9 +392,7 @@ typedef struct SExchangeInfo {
SSDataBlock* pResult;
bool seqLoadData; // sequential load data or not, false by default
int32_t current;
uint64_t totalSize; // total load bytes from remote
uint64_t totalRows; // total number of rows
uint64_t totalElapsed; // total elapsed time
SLoadRemoteDataInfo loadInfo;
} SExchangeInfo;
typedef struct STableScanInfo {
@ -436,19 +439,21 @@ typedef struct SSysTableScanInfo {
void* readHandle;
};
void *pCur; // cursor
SRetrieveTableReq* pReq;
SRetrieveMetaTableRsp *pRsp;
SRetrieveTableReq req;
SEpSet epSet;
int32_t type; // show type
tsem_t ready;
SSchema* pSchema;
SSDataBlock* pRes;
SNode* pCondition; // db_name filter condition, to discard data that are not in current database
void *pCur; // cursor for iterate the local table meta store.
SArray *scanCols; // SArray<int16_t> scan column id list
int32_t type; // show type, TODO remove it
SName name;
SSDataBlock* pRes;
int32_t capacity;
int64_t numOfBlocks; // extract basic running information.
int64_t totalRows;
int64_t elapsedTime;
int64_t totalBytes;
SLoadRemoteDataInfo loadInfo;
} SSysTableScanInfo;
typedef struct SOptrBasicInfo {
@ -496,7 +501,6 @@ typedef struct SProjectOperatorInfo {
SOptrBasicInfo binfo;
SSDataBlock *existDataBlock;
int32_t threshold;
bool hasVarCol;
} SProjectOperatorInfo;
typedef struct SLimitOperatorInfo {
@ -539,8 +543,10 @@ typedef struct SFillOperatorInfo {
typedef struct SGroupbyOperatorInfo {
SOptrBasicInfo binfo;
int32_t colIndex;
SArray* pGroupCols;
char* prevData; // previous group by value
SGroupResInfo groupResInfo;
SAggSupporter aggSup;
} SGroupbyOperatorInfo;
typedef struct SSessionAggOperatorInfo {
@ -631,13 +637,15 @@ SOperatorInfo* createMultiTableAggOperatorInfo(SOperatorInfo* downstream, SExprI
SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t num, SSDataBlock* pResBlock, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createOrderOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SArray* pOrderVal, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createSortedMergeOperatorInfo(SOperatorInfo** downstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t num, SArray* pOrderVal, SArray* pGroupInfo, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createSysTableScanOperatorInfo(void* pSysTableReadHandle, const SArray* pExprInfo, const SSchema* pSchema,
int32_t tableType, SEpSet epset, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createSysTableScanOperatorInfo(void* pSysTableReadHandle, SSDataBlock* pResBlock, const SName* pName,
SNode* pCondition, SEpSet epset, SArray* colList, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createLimitOperatorInfo(SOperatorInfo* downstream, int32_t numOfDownstream, SLimit* pLimit, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval,
const STableGroupInfo* pTableGroupInfo, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock,
SArray* pGroupColList, SExecTaskInfo* pTaskInfo, const STableGroupInfo* pTableGroupInfo);
SOperatorInfo* createTableSeqScanOperatorInfo(void* pTsdbReadHandle, STaskRuntimeEnv* pRuntimeEnv);
SOperatorInfo* createAllTimeIntervalOperatorInfo(STaskRuntimeEnv* pRuntimeEnv, SOperatorInfo* downstream,
@ -645,8 +653,6 @@ SOperatorInfo* createAllTimeIntervalOperatorInfo(STaskRuntimeEnv* pRuntimeEnv, S
SOperatorInfo* createFillOperatorInfo(STaskRuntimeEnv* pRuntimeEnv, SOperatorInfo* downstream, SExprInfo* pExpr,
int32_t numOfOutput, bool multigroupResult);
SOperatorInfo* createGroupbyOperatorInfo(STaskRuntimeEnv* pRuntimeEnv, SOperatorInfo* downstream, SExprInfo* pExpr,
int32_t numOfOutput);
SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(STaskRuntimeEnv* pRuntimeEnv, SOperatorInfo* downstream,
SExprInfo* pExpr, int32_t numOfOutput);
@ -669,7 +675,6 @@ SOperatorInfo* createSLimitOperatorInfo(STaskRuntimeEnv* pRuntimeEnv, SOperatorI
SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pdownstream, int32_t numOfDownstream, SSchema* pSchema,
int32_t numOfOutput);
// int32_t doCreateFilterInfo(SColumnInfo* pCols, int32_t numOfCols, int32_t numOfFilterCols, SSingleColumnFilterInfo** pFilterInfo, uint64_t qId);
void doSetFilterColumnInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, SSDataBlock* pBlock);
bool doFilterDataBlock(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, int32_t numOfRows, int8_t* p);
void doCompactSDataBlock(SSDataBlock* pBlock, int32_t numOfRows, int8_t* p);
@ -679,6 +684,7 @@ SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numO
void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols);
void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order);
void finalizeQueryResult(SqlFunctionCtx* pCtx, int32_t numOfOutput);
void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t* bufCapacity);
@ -692,16 +698,9 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters);
STableQueryInfo* createTableQueryInfo(void* buf, bool groupbyColumn, STimeWindow win);
STableQueryInfo* createTmpTableQueryInfo(STimeWindow win);
int32_t buildArithmeticExprFromMsg(SExprInfo* pArithExprInfo, void* pQueryMsg);
bool isTaskKilled(SExecTaskInfo* pTaskInfo);
int32_t checkForQueryBuf(size_t numOfTables);
bool checkNeedToCompressQueryCol(SQInfo* pQInfo);
void setQueryStatus(STaskRuntimeEnv* pRuntimeEnv, int8_t status);
int32_t doDumpQueryResult(SQInfo* pQInfo, char* data, int8_t compressed, int32_t* compLen);
size_t getResultSize(SQInfo* pQInfo, int64_t* numOfRows);
void setTaskKilled(SExecTaskInfo* pTaskInfo);
void publishOperatorProfEvent(SOperatorInfo* operatorInfo, EQueryProfEventType eventType);
@ -711,8 +710,6 @@ void calculateOperatorProfResults(SQInfo* pQInfo);
void queryCostStatis(SExecTaskInfo* pTaskInfo);
void doDestroyTask(SExecTaskInfo* pTaskInfo);
void freeQueryAttr(STaskAttr* pQuery);
int32_t getMaximumIdleDurationSec();
void doInvokeUdf(struct SUdfInfo* pUdfInfo, SqlFunctionCtx* pCtx, int32_t idx, int32_t type);

View File

@ -15,11 +15,12 @@
#include "dataSinkInt.h"
#include "dataSinkMgt.h"
#include "executorimpl.h"
#include "planner.h"
#include "tcompression.h"
#include "tglobal.h"
#include "tqueue.h"
#include "executorimpl.h"
#include "tdatablock.h"
typedef struct SDataDispatchBuf {
int32_t useSize;
@ -84,8 +85,11 @@ static void copyData(const SInputData* pInput, const SDataBlockDescNode* pSchema
*compLen += compSizes[col];
compSizes[col] = htonl(compSizes[col]);
} else {
memmove(data, pColRes->pData, pColRes->info.bytes * pInput->pData->info.rows);
data += pColRes->info.bytes * pInput->pData->info.rows;
for(int32_t i = 0; i < pInput->pData->info.rows; ++i) {
char* pData = colDataGetData(pColRes, i);
memmove(data, pData, pColRes->info.bytes);
data += pColRes->info.bytes;
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1228,4 +1228,4 @@ TEST(testCase, time_interval_Operator_Test) {
}
#endif
#pragma GCC diagnostic pop
#pragma GCC diagnosti

View File

@ -37,6 +37,12 @@ bool getMinmaxFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv);
void minFunction(SqlFunctionCtx* pCtx);
void maxFunction(SqlFunctionCtx *pCtx);
bool getFirstLastFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv);
void firstFunction(SqlFunctionCtx *pCtx);
void lastFunction(SqlFunctionCtx *pCtx);
void valFunction(SqlFunctionCtx *pCtx);
#ifdef __cplusplus
}
#endif

View File

@ -61,6 +61,36 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.processFunc = maxFunction,
.finalizeFunc = functionFinalizer
},
{
.name = "first",
.type = FUNCTION_TYPE_FIRST,
.classification = FUNC_MGT_AGG_FUNC,
.checkFunc = stubCheckAndGetResultType,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
.processFunc = firstFunction,
.finalizeFunc = functionFinalizer
},
{
.name = "last",
.type = FUNCTION_TYPE_LAST,
.classification = FUNC_MGT_AGG_FUNC,
.checkFunc = stubCheckAndGetResultType,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
.processFunc = lastFunction,
.finalizeFunc = functionFinalizer
},
// {
// .name = "valueAssigner",
// .type = FUNCTION_TYPE_ASSIGNER,
// .classification = FUNC_MGT_AGG_FUNC,
// .checkFunc = stubCheckAndGetResultType,
// .getEnvFunc = getFirstLastFuncEnv,
// .initFunc = functionSetup,
// .processFunc = valFunction,
// .finalizeFunc = functionFinalizer
// },
{
.name = "concat",
.type = FUNCTION_TYPE_CONCAT,
@ -98,6 +128,8 @@ int32_t stubCheckAndGetResultType(SFunctionNode* pFunc) {
pFunc->node.resType = (SDataType) { .bytes = tDataTypes[resType].bytes, .type = resType };
break;
}
case FUNCTION_TYPE_FIRST:
case FUNCTION_TYPE_LAST:
case FUNCTION_TYPE_MIN:
case FUNCTION_TYPE_MAX: {
SColumnNode* pParam = nodesListGetNode(pFunc->pParameterList, 0);

View File

@ -68,13 +68,12 @@ void countFunction(SqlFunctionCtx *pCtx) {
int32_t numOfElem = 0;
/*
* 1. column data missing (schema modified) causes pCtx->hasNull == true. pCtx->isAggSet == true;
* 2. for general non-primary key columns, pCtx->hasNull may be true or false, pCtx->isAggSet == true;
* 3. for primary key column, pCtx->hasNull always be false, pCtx->isAggSet == false;
* 1. column data missing (schema modified) causes pInputCol->hasNull == true. pInput->colDataAggIsSet == true;
* 2. for general non-primary key columns, pInputCol->hasNull may be true or false, pInput->colDataAggIsSet == true;
* 3. for primary key column, pInputCol->hasNull always be false, pInput->colDataAggIsSet == false;
*/
SInputColumnInfoData* pInput = &pCtx->input;
SColumnInfoData* pInputCol = pInput->pData[0];
if (pInput->colDataAggIsSet && pInput->totalRows == pInput->numOfRows) {
numOfElem = pInput->numOfRows - pInput->pColumnDataAgg[0]->numOfNull;
ASSERT(numOfElem >= 0);
@ -169,7 +168,7 @@ void sumFunction(SqlFunctionCtx *pCtx) {
SET_VAL(GET_RES_INFO(pCtx), numOfElem, 1);
}
bool getSumFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
bool getSumFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
pEnv->calcMemSize = sizeof(SSumRes);
return true;
}
@ -261,8 +260,7 @@ bool minFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo) {
return true;
}
bool getMinmaxFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
SNode* pNode = nodesListGetNode(pFunc->pParameterList, 0);
bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
pEnv->calcMemSize = sizeof(int64_t);
return true;
}
@ -442,3 +440,125 @@ void maxFunction(SqlFunctionCtx *pCtx) {
int32_t numOfElems = doMinMaxHelper(pCtx, 0);
SET_VAL(GET_RES_INFO(pCtx), numOfElems, 1);
}
bool getFirstLastFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
SColumnNode* pNode = nodesListGetNode(pFunc->pParameterList, 0);
pEnv->calcMemSize = pNode->node.resType.bytes;
return true;
}
// TODO fix this
// This ordinary first function only handle the data block in ascending order
void firstFunction(SqlFunctionCtx *pCtx) {
if (pCtx->order == TSDB_ORDER_DESC) {
return;
}
int32_t numOfElems = 0;
SResultRowEntryInfo *pResInfo = GET_RES_INFO(pCtx);
char* buf = GET_ROWCELL_INTERBUF(pResInfo);
SInputColumnInfoData* pInput = &pCtx->input;
SColumnInfoData* pInputCol = pInput->pData[0];
// All null data column, return directly.
if (pInput->pColumnDataAgg[0]->numOfNull == pInput->totalRows) {
ASSERT(pInputCol->hasNull == true);
return;
}
// Check for the first not null data
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
if (pInputCol->hasNull && colDataIsNull(pInputCol, pInput->totalRows, i, NULL)) {
continue;
}
char* data = colDataGetData(pInputCol, i);
memcpy(buf, data, pInputCol->info.bytes);
// TODO handle the subsidary value
// if (pCtx->ptsList != NULL) {
// TSKEY k = GET_TS_DATA(pCtx, i);
// DO_UPDATE_TAG_COLUMNS(pCtx, k);
// }
pResInfo->hasResult = DATA_SET_FLAG;
pResInfo->complete = true;
numOfElems++;
break;
}
SET_VAL(pResInfo, numOfElems, 1);
}
void lastFunction(SqlFunctionCtx *pCtx) {
if (pCtx->order != TSDB_ORDER_DESC) {
return;
}
int32_t numOfElems = 0;
SResultRowEntryInfo *pResInfo = GET_RES_INFO(pCtx);
char* buf = GET_ROWCELL_INTERBUF(pResInfo);
SInputColumnInfoData* pInput = &pCtx->input;
SColumnInfoData* pInputCol = pInput->pData[0];
// All null data column, return directly.
if (pInput->pColumnDataAgg[0]->numOfNull == pInput->totalRows) {
ASSERT(pInputCol->hasNull == true);
return;
}
if (pCtx->order == TSDB_ORDER_DESC) {
for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) {
if (pInputCol->hasNull && colDataIsNull(pInputCol, pInput->totalRows, i, NULL)) {
continue;
}
char* data = colDataGetData(pInputCol, i);
memcpy(buf, data, pInputCol->info.bytes);
// TSKEY ts = pCtx->ptsList ? GET_TS_DATA(pCtx, i) : 0;
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
pResInfo->hasResult = DATA_SET_FLAG;
pResInfo->complete = true; // set query completed on this column
numOfElems++;
break;
}
} else { // ascending order
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
if (pInputCol->hasNull && colDataIsNull(pInputCol, pInput->totalRows, i, NULL)) {
continue;
}
char* data = colDataGetData(pInputCol, i);
TSKEY ts = pCtx->ptsList ? GET_TS_DATA(pCtx, i) : 0;
if (pResInfo->hasResult != DATA_SET_FLAG || (*(TSKEY*)buf) < ts) {
pResInfo->hasResult = DATA_SET_FLAG;
memcpy(buf, data, pCtx->inputBytes);
*(TSKEY*)buf = ts;
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
}
numOfElems++;
break;
}
}
SET_VAL(pResInfo, numOfElems, 1);
}
void valFunction(SqlFunctionCtx *pCtx) {
SResultRowEntryInfo *pResInfo = GET_RES_INFO(pCtx);
char* buf = GET_ROWCELL_INTERBUF(pResInfo);
SColumnInfoData* pInputCol = pCtx->input.pData[0];
memcpy(buf, pInputCol->pData, pInputCol->info.bytes);
}

View File

@ -31,6 +31,9 @@
#define COPY_CHAR_POINT_FIELD(fldname) \
do { \
if (NULL == (pSrc)->fldname) { \
break; \
} \
(pDst)->fldname = strdup((pSrc)->fldname); \
} while (0)
@ -108,6 +111,10 @@ static SNode* valueNodeCopy(const SValueNode* pSrc, SValueNode* pDst) {
exprNodeCopy((const SExprNode*)pSrc, (SExprNode*)pDst);
COPY_CHAR_POINT_FIELD(literal);
COPY_SCALAR_FIELD(isDuration);
COPY_SCALAR_FIELD(translate);
if (!pSrc->translate) {
return (SNode*)pDst;
}
switch (pSrc->node.resType.type) {
case TSDB_DATA_TYPE_NULL:
break;
@ -134,7 +141,12 @@ static SNode* valueNodeCopy(const SValueNode* pSrc, SValueNode* pDst) {
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_VARBINARY:
COPY_CHAR_POINT_FIELD(datum.p);
pDst->datum.p = malloc(pSrc->node.resType.bytes + VARSTR_HEADER_SIZE);
if (NULL == pDst->datum.p) {
nodesDestroyNode(pDst);
return NULL;
}
memcpy(pDst->datum.p, pSrc->datum.p, pSrc->node.resType.bytes + VARSTR_HEADER_SIZE);
break;
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_DECIMAL:

View File

@ -110,6 +110,8 @@ const char* nodesNodeName(ENodeType type) {
return "PhysiTableSeqScan";
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
return "PhysiSreamScan";
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
return "PhysiSystemTableScan";
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
return "PhysiProject";
case QUERY_NODE_PHYSICAL_PLAN_JOIN:
@ -630,6 +632,87 @@ static int32_t jsonToPhysiStreamScanNode(const SJson* pJson, void* pObj) {
return jsonToPhysiScanNode(pJson, pObj);
}
static const char* jkEndPointFqdn = "Fqdn";
static const char* jkEndPointPort = "Port";
static int32_t epToJson(const void* pObj, SJson* pJson) {
const SEp* pNode = (const SEp*)pObj;
int32_t code = tjsonAddStringToObject(pJson, jkEndPointFqdn, pNode->fqdn);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkEndPointPort, pNode->port);
}
return code;
}
static int32_t jsonToEp(const SJson* pJson, void* pObj) {
SEp* pNode = (SEp*)pObj;
int32_t code = tjsonGetStringValue(pJson, jkEndPointFqdn, pNode->fqdn);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetSmallIntValue(pJson, jkEndPointPort, &pNode->port);
}
return code;
}
static const char* jkEpSetInUse = "InUse";
static const char* jkEpSetNumOfEps = "NumOfEps";
static const char* jkEpSetEps = "Eps";
static int32_t epSetToJson(const void* pObj, SJson* pJson) {
const SEpSet* pNode = (const SEpSet*)pObj;
int32_t code = tjsonAddIntegerToObject(pJson, jkEpSetInUse, pNode->inUse);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkEpSetNumOfEps, pNode->numOfEps);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddArray(pJson, jkEpSetEps, epToJson, pNode->eps, sizeof(SEp), pNode->numOfEps);
}
return code;
}
static int32_t jsonToEpSet(const SJson* pJson, void* pObj) {
SEpSet* pNode = (SEpSet*)pObj;
int32_t code = tjsonGetTinyIntValue(pJson, jkEpSetInUse, &pNode->inUse);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetTinyIntValue(pJson, jkEpSetNumOfEps, &pNode->numOfEps);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonToArray(pJson, jkEpSetEps, jsonToEp, pNode->eps, sizeof(SEp));
}
return code;
}
static const char* jkSysTableScanPhysiPlanMnodeEpSet = "MnodeEpSet";
static int32_t physiSysTableScanNodeToJson(const void* pObj, SJson* pJson) {
const SSystemTableScanPhysiNode* pNode = (const SSystemTableScanPhysiNode*)pObj;
int32_t code = physiScanNodeToJson(pObj, pJson);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkSysTableScanPhysiPlanMnodeEpSet, epSetToJson, &pNode->mgmtEpSet);
}
return code;
}
static int32_t jsonToPhysiSysTableScanNode(const SJson* pJson, void* pObj) {
SSystemTableScanPhysiNode* pNode = (SSystemTableScanPhysiNode*)pObj;
int32_t code = jsonToPhysiScanNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonToObject(pJson, jkSysTableScanPhysiPlanMnodeEpSet, jsonToEpSet, &pNode->mgmtEpSet);
}
return code;
}
static const char* jkProjectPhysiPlanProjections = "Projections";
static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) {
@ -888,31 +971,6 @@ static int32_t jsonToSubplanId(const SJson* pJson, void* pObj) {
return code;
}
static const char* jkEndPointFqdn = "Fqdn";
static const char* jkEndPointPort = "Port";
static int32_t epToJson(const void* pObj, SJson* pJson) {
const SEp* pNode = (const SEp*)pObj;
int32_t code = tjsonAddStringToObject(pJson, jkEndPointFqdn, pNode->fqdn);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkEndPointPort, pNode->port);
}
return code;
}
static int32_t jsonToEp(const SJson* pJson, void* pObj) {
SEp* pNode = (SEp*)pObj;
int32_t code = tjsonGetStringValue(pJson, jkEndPointFqdn, pNode->fqdn);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetSmallIntValue(pJson, jkEndPointPort, &pNode->port);
}
return code;
}
static const char* jkQueryNodeAddrId = "Id";
static const char* jkQueryNodeAddrInUse = "InUse";
static const char* jkQueryNodeAddrNumOfEps = "NumOfEps";
@ -1244,7 +1302,7 @@ static int32_t datumToJson(const void* pObj, SJson* pJson) {
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_VARBINARY:
code = tjsonAddStringToObject(pJson, jkValueDatum, pNode->datum.p);
code = tjsonAddStringToObject(pJson, jkValueDatum, varDataVal(pNode->datum.p));
break;
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_DECIMAL:
@ -1306,9 +1364,16 @@ static int32_t jsonToDatum(const SJson* pJson, void* pObj) {
break;
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_VARBINARY:
code = tjsonDupStringValue(pJson, jkValueDatum, &pNode->datum.p);
case TSDB_DATA_TYPE_VARBINARY: {
pNode->datum.p = calloc(1, pNode->node.resType.bytes);
if (NULL == pNode->datum.p) {
code = TSDB_CODE_OUT_OF_MEMORY;
break;
}
varDataSetLen(pNode->datum.p, pNode->node.resType.bytes);
code = tjsonGetStringValue(pJson, jkValueDatum, varDataVal(pNode->datum.p));
break;
}
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_DECIMAL:
case TSDB_DATA_TYPE_BLOB:
@ -1496,38 +1561,6 @@ static int32_t jsonToTableNode(const SJson* pJson, void* pObj) {
return code;
}
static const char* jkEpSetInUse = "InUse";
static const char* jkEpSetNumOfEps = "NumOfEps";
static const char* jkEpSetEps = "Eps";
static int32_t epSetToJson(const void* pObj, SJson* pJson) {
const SEpSet* pNode = (const SEpSet*)pObj;
int32_t code = tjsonAddIntegerToObject(pJson, jkEpSetInUse, pNode->inUse);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkEpSetNumOfEps, pNode->numOfEps);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddArray(pJson, jkEpSetEps, epToJson, pNode->eps, sizeof(SEp), pNode->numOfEps);
}
return code;
}
static int32_t jsonToEpSet(const SJson* pJson, void* pObj) {
SEpSet* pNode = (SEpSet*)pObj;
int32_t code = tjsonGetTinyIntValue(pJson, jkEpSetInUse, &pNode->inUse);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetTinyIntValue(pJson, jkEpSetNumOfEps, &pNode->numOfEps);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonToArray(pJson, jkEpSetEps, jsonToEp, pNode->eps, sizeof(SEp));
}
return code;
}
static const char* jkVgroupInfoVgId = "VgId";
static const char* jkVgroupInfoHashBegin = "HashBegin";
static const char* jkVgroupInfoHashEnd = "HashEnd";
@ -2019,6 +2052,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return physiTableScanNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
return physiStreamScanNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
return physiSysTableScanNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
return physiProjectNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_JOIN:
@ -2104,6 +2139,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToPhysiTableScanNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
return jsonToPhysiStreamScanNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
return jsonToPhysiSysTableScanNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
return jsonToPhysiProjectNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_JOIN:

View File

@ -94,8 +94,6 @@ SNodeptr nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(SDropDatabaseStmt));
case QUERY_NODE_ALTER_DATABASE_STMT:
return makeNode(type, sizeof(SAlterDatabaseStmt));
case QUERY_NODE_SHOW_DATABASES_STMT:
return makeNode(type, sizeof(SShowStmt));
case QUERY_NODE_CREATE_TABLE_STMT:
return makeNode(type, sizeof(SCreateTableStmt));
case QUERY_NODE_CREATE_SUBTABLE_CLAUSE:
@ -108,17 +106,12 @@ SNodeptr nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(SDropTableStmt));
case QUERY_NODE_DROP_SUPER_TABLE_STMT:
return makeNode(type, sizeof(SDropSuperTableStmt));
case QUERY_NODE_SHOW_TABLES_STMT:
case QUERY_NODE_SHOW_STABLES_STMT:
return makeNode(type, sizeof(SShowStmt));
case QUERY_NODE_CREATE_USER_STMT:
return makeNode(type, sizeof(SCreateUserStmt));
case QUERY_NODE_ALTER_USER_STMT:
return makeNode(type, sizeof(SAlterUserStmt));
case QUERY_NODE_DROP_USER_STMT:
return makeNode(type, sizeof(SDropUserStmt));
case QUERY_NODE_SHOW_USERS_STMT:
return makeNode(type, sizeof(SShowStmt));
case QUERY_NODE_USE_DATABASE_STMT:
return makeNode(type, sizeof(SUseDatabaseStmt));
case QUERY_NODE_CREATE_DNODE_STMT:
@ -127,12 +120,6 @@ SNodeptr nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(SDropDnodeStmt));
case QUERY_NODE_ALTER_DNODE_STMT:
return makeNode(type, sizeof(SAlterDnodeStmt));
case QUERY_NODE_SHOW_DNODES_STMT:
return makeNode(type, sizeof(SShowStmt));
case QUERY_NODE_SHOW_VGROUPS_STMT:
case QUERY_NODE_SHOW_MNODES_STMT:
case QUERY_NODE_SHOW_QNODES_STMT:
return makeNode(type, sizeof(SShowStmt));
case QUERY_NODE_CREATE_INDEX_STMT:
return makeNode(type, sizeof(SCreateIndexStmt));
case QUERY_NODE_DROP_INDEX_STMT:
@ -145,6 +132,19 @@ SNodeptr nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(SCreateTopicStmt));
case QUERY_NODE_DROP_TOPIC_STMT:
return makeNode(type, sizeof(SDropTopicStmt));
case QUERY_NODE_SHOW_DATABASES_STMT:
case QUERY_NODE_SHOW_TABLES_STMT:
case QUERY_NODE_SHOW_STABLES_STMT:
case QUERY_NODE_SHOW_USERS_STMT:
case QUERY_NODE_SHOW_DNODES_STMT:
case QUERY_NODE_SHOW_VGROUPS_STMT:
case QUERY_NODE_SHOW_MNODES_STMT:
case QUERY_NODE_SHOW_MODULES_STMT:
case QUERY_NODE_SHOW_QNODES_STMT:
case QUERY_NODE_SHOW_FUNCTIONS_STMT:
case QUERY_NODE_SHOW_INDEXES_STMT:
case QUERY_NODE_SHOW_STREAMS_STMT:
return makeNode(type, sizeof(SShowStmt));
case QUERY_NODE_LOGIC_PLAN_SCAN:
return makeNode(type, sizeof(SScanLogicNode));
case QUERY_NODE_LOGIC_PLAN_JOIN:
@ -171,6 +171,8 @@ SNodeptr nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(STableSeqScanPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
return makeNode(type, sizeof(SStreamScanPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
return makeNode(type, sizeof(SSystemTableScanPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
return makeNode(type, sizeof(SProjectPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_JOIN:

View File

@ -87,6 +87,7 @@ SNodeList* addNodeToList(SAstCreateContext* pCxt, SNodeList* pList, SNode* pNode
SNode* createColumnNode(SAstCreateContext* pCxt, const SToken* pTableAlias, const SToken* pColumnName);
SNode* createValueNode(SAstCreateContext* pCxt, int32_t dataType, const SToken* pLiteral);
SNode* createDurationValueNode(SAstCreateContext* pCxt, const SToken* pLiteral);
SNode* createDefaultDatabaseCondValue(SAstCreateContext* pCxt);
SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, const SToken* pAlias);
SNode* createLogicConditionNode(SAstCreateContext* pCxt, ELogicConditionType type, SNode* pParam1, SNode* pParam2);
SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pLeft, SNode* pRight);
@ -142,7 +143,7 @@ SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_
SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, const SToken* pOldColName, const SToken* pNewColName);
SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, const SToken* pTagName, SNode* pVal);
SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, const SToken* pDbName);
SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pDbName);
SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type, SNode* pDbName, SNode* pTbNamePattern);
SNode* createCreateUserStmt(SAstCreateContext* pCxt, const SToken* pUserName, const SToken* pPassword);
SNode* createAlterUserStmt(SAstCreateContext* pCxt, const SToken* pUserName, int8_t alterType, const SToken* pVal);
SNode* createDropUserStmt(SAstCreateContext* pCxt, const SToken* pUserName);

View File

@ -78,6 +78,8 @@ typedef struct STableDataBlocks {
char *pData;
bool cloned;
STagData tagData;
char tableName[TSDB_TABLE_NAME_LEN];
char dbFName[TSDB_DB_FNAME_LEN];
SParsedDataColInfo boundColumnInfo;
SRowBuilder rowBuilder;
@ -115,10 +117,10 @@ static FORCE_INLINE void getMemRowAppendInfo(SSchema *pSchema, uint8_t rowType,
}
}
static FORCE_INLINE int32_t setBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows) {
pBlocks->tid = pTableMeta->suid;
pBlocks->uid = pTableMeta->uid;
pBlocks->sversion = pTableMeta->sversion;
static FORCE_INLINE int32_t setBlockInfo(SSubmitBlk *pBlocks, STableDataBlocks* dataBuf, int32_t numOfRows) {
pBlocks->tid = dataBuf->pTableMeta->suid;
pBlocks->uid = dataBuf->pTableMeta->uid;
pBlocks->sversion = dataBuf->pTableMeta->sversion;
if (pBlocks->numOfRows + numOfRows >= INT16_MAX) {
return TSDB_CODE_TSC_INVALID_OPERATION;

View File

@ -23,6 +23,13 @@ extern "C" {
#include "os.h"
#include "query.h"
#define parserFatal(param, ...) qFatal("PARSER: " param, __VA_ARGS__)
#define parserError(param, ...) qError("PARSER: " param, __VA_ARGS__)
#define parserWarn(param, ...) qWarn("PARSER: " param, __VA_ARGS__)
#define parserInfo(param, ...) qInfo("PARSER: " param, __VA_ARGS__)
#define parserDebug(param, ...) qDebug("PARSER: " param, __VA_ARGS__)
#define parserTrace(param, ...) qTrace("PARSER: " param, __VA_ARGS__)
typedef struct SMsgBuf {
int32_t len;
char *buf;

View File

@ -83,14 +83,12 @@ cmd ::= CREATE USER user_name(A) PASS NK_STRING(B).
cmd ::= ALTER USER user_name(A) PASS NK_STRING(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_PASSWD, &B); }
cmd ::= ALTER USER user_name(A) PRIVILEGE NK_STRING(B). { pCxt->pRootNode = createAlterUserStmt(pCxt, &A, TSDB_ALTER_USER_PRIVILEGES, &B); }
cmd ::= DROP USER user_name(A). { pCxt->pRootNode = createDropUserStmt(pCxt, &A); }
cmd ::= SHOW USERS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_USERS_STMT, NULL); }
/************************************************ create/drop/alter/show dnode ****************************************/
cmd ::= CREATE DNODE dnode_endpoint(A). { pCxt->pRootNode = createCreateDnodeStmt(pCxt, &A, NULL); }
cmd ::= CREATE DNODE dnode_host_name(A) PORT NK_INTEGER(B). { pCxt->pRootNode = createCreateDnodeStmt(pCxt, &A, &B); }
cmd ::= DROP DNODE NK_INTEGER(A). { pCxt->pRootNode = createDropDnodeStmt(pCxt, &A); }
cmd ::= DROP DNODE dnode_endpoint(A). { pCxt->pRootNode = createDropDnodeStmt(pCxt, &A); }
cmd ::= SHOW DNODES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT, NULL); }
cmd ::= ALTER DNODE NK_INTEGER(A) NK_STRING(B). { pCxt->pRootNode = createAlterDnodeStmt(pCxt, &A, &B, NULL); }
cmd ::= ALTER DNODE NK_INTEGER(A) NK_STRING(B) NK_STRING(C). { pCxt->pRootNode = createAlterDnodeStmt(pCxt, &A, &B, &C); }
cmd ::= ALTER ALL DNODES NK_STRING(A). { pCxt->pRootNode = createAlterDnodeStmt(pCxt, NULL, &A, NULL); }
@ -112,12 +110,10 @@ cmd ::= ALTER LOCAL NK_STRING(A) NK_STRING(B).
/************************************************ create/drop qnode ***************************************************/
cmd ::= CREATE QNODE ON DNODE NK_INTEGER(A). { pCxt->pRootNode = createCreateQnodeStmt(pCxt, &A); }
cmd ::= DROP QNODE ON DNODE NK_INTEGER(A). { pCxt->pRootNode = createDropQnodeStmt(pCxt, &A); }
cmd ::= SHOW QNODES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QNODES_STMT, NULL); }
/************************************************ create/drop/show/use database ***************************************/
cmd ::= CREATE DATABASE not_exists_opt(A) db_name(B) db_options(C). { pCxt->pRootNode = createCreateDatabaseStmt(pCxt, A, &B, C); }
cmd ::= DROP DATABASE exists_opt(A) db_name(B). { pCxt->pRootNode = createDropDatabaseStmt(pCxt, A, &B); }
cmd ::= SHOW DATABASES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT, NULL); }
cmd ::= USE db_name(A). { pCxt->pRootNode = createUseDatabaseStmt(pCxt, &A); }
cmd ::= ALTER DATABASE db_name(A) alter_db_options(B). { pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &A, B); }
@ -164,7 +160,7 @@ alter_db_option(A) ::= WAL NK_INTEGER(B).
alter_db_option(A) ::= QUORUM NK_INTEGER(B). { A.type = DB_OPTION_QUORUM; A.val = B; }
alter_db_option(A) ::= CACHELAST NK_INTEGER(B). { A.type = DB_OPTION_CACHELAST; A.val = B; }
/************************************************ create/drop/show table/stable ***************************************/
/************************************************ create/drop table/stable ********************************************/
cmd ::= CREATE TABLE not_exists_opt(A) full_table_name(B)
NK_LP column_def_list(C) NK_RP tags_def_opt(D) table_options(E). { pCxt->pRootNode = createCreateTableStmt(pCxt, A, B, C, D, E); }
cmd ::= CREATE TABLE multi_create_clause(A). { pCxt->pRootNode = createCreateMultiTableStmt(pCxt, A); }
@ -172,8 +168,6 @@ cmd ::= CREATE STABLE not_exists_opt(A) full_table_name(B)
NK_LP column_def_list(C) NK_RP tags_def(D) table_options(E). { pCxt->pRootNode = createCreateTableStmt(pCxt, A, B, C, D, E); }
cmd ::= DROP TABLE multi_drop_clause(A). { pCxt->pRootNode = createDropTableStmt(pCxt, A); }
cmd ::= DROP STABLE exists_opt(A) full_table_name(B). { pCxt->pRootNode = createDropSuperTableStmt(pCxt, A, B); }
cmd ::= SHOW TABLES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TABLES_STMT, NULL); }
cmd ::= SHOW STABLES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STABLES_STMT, NULL); }
cmd ::= ALTER TABLE alter_table_clause(A). { pCxt->pRootNode = A; }
cmd ::= ALTER STABLE alter_table_clause(A). { pCxt->pRootNode = A; }
@ -286,6 +280,31 @@ col_name_list(A) ::= col_name_list(B) NK_COMMA col_name(C).
col_name(A) ::= column_name(B). { A = createColumnNode(pCxt, NULL, &B); }
/************************************************ show ****************************************************************/
cmd ::= SHOW DNODES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT, NULL, NULL); }
cmd ::= SHOW USERS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_USERS_STMT, NULL, NULL); }
cmd ::= SHOW DATABASES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT, NULL, NULL); }
cmd ::= SHOW db_name_cond_opt(A) TABLES like_pattern_opt(B). { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TABLES_STMT, A, B); }
cmd ::= SHOW db_name_cond_opt(A) STABLES like_pattern_opt(B). { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STABLES_STMT, A, B); }
cmd ::= SHOW db_name_cond_opt(A) VGROUPS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, A, NULL); }
cmd ::= SHOW MNODES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MNODES_STMT, NULL, NULL); }
cmd ::= SHOW MODULES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MODULES_STMT, NULL, NULL); }
cmd ::= SHOW QNODES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QNODES_STMT, NULL, NULL); }
cmd ::= SHOW FUNCTIONS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_FUNCTIONS_STMT, NULL, NULL); }
cmd ::= SHOW INDEXES FROM table_name_cond(A) from_db_opt(B). { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, A, B); }
cmd ::= SHOW STREAMS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STREAMS_STMT, NULL, NULL); }
db_name_cond_opt(A) ::= . { A = createDefaultDatabaseCondValue(pCxt); }
db_name_cond_opt(A) ::= db_name(B) NK_DOT. { A = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B); }
like_pattern_opt(A) ::= . { A = NULL; }
like_pattern_opt(A) ::= LIKE NK_STRING(B). { A = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B); }
table_name_cond(A) ::= table_name(B). { A = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B); }
from_db_opt(A) ::= . { A = createDefaultDatabaseCondValue(pCxt); }
from_db_opt(A) ::= FROM db_name(B). { A = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B); }
%type func_name_list { SNodeList* }
%destructor func_name_list { nodesDestroyList($$); }
func_name_list(A) ::= func_name(B). { A = createNodeList(pCxt, B); }
@ -317,13 +336,6 @@ cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS query_expression(C).
cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS db_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, &C); }
cmd ::= DROP TOPIC exists_opt(A) topic_name(B). { pCxt->pRootNode = createDropTopicStmt(pCxt, A, &B); }
/************************************************ show vgroups ********************************************************/
cmd ::= SHOW VGROUPS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, NULL); }
cmd ::= SHOW db_name(B) NK_DOT VGROUPS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, &B); }
/************************************************ show mnodes *********************************************************/
cmd ::= SHOW MNODES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MNODES_STMT, NULL); }
/************************************************ select **************************************************************/
cmd ::= query_expression(A). { pCxt->pRootNode = A; }

View File

@ -21,6 +21,7 @@
do { \
if (NULL == (p)) { \
pCxt->valid = false; \
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "Out of memory"); \
return NULL; \
} \
} while (0)
@ -332,41 +333,40 @@ void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) {
static bool checkUserName(SAstCreateContext* pCxt, const SToken* pUserName) {
if (NULL == pUserName) {
return false;
}
pCxt->valid = false;
} else {
if (pUserName->n >= TSDB_USER_LEN) {
generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG);
pCxt->valid = false;
}
}
return pCxt->valid;
}
static bool checkPassword(SAstCreateContext* pCxt, const SToken* pPasswordToken, char* pPassword) {
if (NULL == pPasswordToken) {
return false;
}
if (pPasswordToken->n >= (TSDB_USET_PASSWORD_LEN - 2)) {
pCxt->valid = false;
} else if (pPasswordToken->n >= (TSDB_USET_PASSWORD_LEN - 2)) {
generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG);
pCxt->valid = false;
return false;
}
} else {
strncpy(pPassword, pPasswordToken->z, pPasswordToken->n);
strdequote(pPassword);
if (strtrim(pPassword) <= 0) {
generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_PASSWD_EMPTY);
pCxt->valid = false;
}
}
return pCxt->valid;
}
static bool checkAndSplitEndpoint(SAstCreateContext* pCxt, const SToken* pEp, char* pFqdn, int32_t* pPort) {
if (NULL == pEp) {
return false;
}
if (pEp->n >= TSDB_FQDN_LEN + 2 + 6) { // format 'fqdn:port'
pCxt->valid = false;
} else if (pEp->n >= TSDB_FQDN_LEN + 2 + 6) { // format 'fqdn:port'
generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG);
pCxt->valid = false;
}
} else {
char ep[TSDB_FQDN_LEN + 2 + 6];
strncpy(ep, pEp->z, pEp->n);
strdequote(ep);
@ -375,60 +375,67 @@ static bool checkAndSplitEndpoint(SAstCreateContext* pCxt, const SToken* pEp, ch
if (NULL == pColon) {
generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ENDPOINT);
pCxt->valid = false;
}
} else {
strncpy(pFqdn, ep, pColon - ep);
*pPort = strtol(pColon + 1, NULL, 10);
if (*pPort >= UINT16_MAX || *pPort <= 0) {
generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PORT);
pCxt->valid = false;
}
}
}
return pCxt->valid;
}
static bool checkFqdn(SAstCreateContext* pCxt, const SToken* pFqdn) {
if (NULL == pFqdn) {
return false;
}
pCxt->valid = false;
} else {
if (pFqdn->n >= TSDB_FQDN_LEN) {
generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG);
pCxt->valid = false;
}
}
return pCxt->valid;
}
static bool checkPort(SAstCreateContext* pCxt, const SToken* pPortToken, int32_t* pPort) {
if (NULL == pPortToken) {
return false;
}
pCxt->valid = false;
} else {
*pPort = strtol(pPortToken->z, NULL, 10);
if (*pPort >= UINT16_MAX || *pPort <= 0) {
generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PORT);
pCxt->valid = false;
}
}
return pCxt->valid;
}
static bool checkDbName(SAstCreateContext* pCxt, const SToken* pDbName) {
static bool checkDbName(SAstCreateContext* pCxt, const SToken* pDbName, bool query) {
if (NULL == pDbName) {
return true;
}
pCxt->valid = (query ? NULL != pCxt->pQueryCxt->db : true);
} else {
pCxt->valid = pDbName->n < TSDB_DB_NAME_LEN ? true : false;
}
return pCxt->valid;
}
static bool checkTableName(SAstCreateContext* pCxt, const SToken* pTableName) {
if (NULL == pTableName) {
return true;
}
pCxt->valid = true;
} else {
pCxt->valid = pTableName->n < TSDB_TABLE_NAME_LEN ? true : false;
}
return pCxt->valid;
}
static bool checkColumnName(SAstCreateContext* pCxt, const SToken* pColumnName) {
if (NULL == pColumnName) {
return true;
}
pCxt->valid = true;
} else {
pCxt->valid = pColumnName->n < TSDB_COL_NAME_LEN ? true : false;
}
return pCxt->valid;
}
@ -508,9 +515,12 @@ SNode* createValueNode(SAstCreateContext* pCxt, int32_t dataType, const SToken*
SValueNode* val = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
CHECK_OUT_OF_MEM(val);
val->literal = strndup(pLiteral->z, pLiteral->n);
if (TK_NK_ID != pLiteral->type && (IS_VAR_DATA_TYPE(dataType) || TSDB_DATA_TYPE_TIMESTAMP == dataType)) {
trimString(pLiteral->z, pLiteral->n, val->literal, pLiteral->n);
}
CHECK_OUT_OF_MEM(val->literal);
val->node.resType.type = dataType;
val->node.resType.bytes = tDataTypes[dataType].bytes;
val->node.resType.bytes = IS_VAR_DATA_TYPE(dataType) ? strlen(val->literal) : tDataTypes[dataType].bytes;
if (TSDB_DATA_TYPE_TIMESTAMP == dataType) {
val->node.resType.precision = TSDB_TIME_PRECISION_MILLI;
}
@ -532,6 +542,23 @@ SNode* createDurationValueNode(SAstCreateContext* pCxt, const SToken* pLiteral)
return (SNode*)val;
}
SNode* createDefaultDatabaseCondValue(SAstCreateContext* pCxt) {
if (NULL == pCxt->pQueryCxt->db) {
return NULL;
}
SValueNode* val = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
CHECK_OUT_OF_MEM(val);
val->literal = strdup(pCxt->pQueryCxt->db);
CHECK_OUT_OF_MEM(val->literal);
val->isDuration = false;
val->translate = false;
val->node.resType.type = TSDB_DATA_TYPE_BINARY;
val->node.resType.bytes = strlen(val->literal);
val->node.resType.precision = TSDB_TIME_PRECISION_MILLI;
return (SNode*)val;
}
SNode* createLogicConditionNode(SAstCreateContext* pCxt, ELogicConditionType type, SNode* pParam1, SNode* pParam2) {
SLogicConditionNode* cond = (SLogicConditionNode*)nodesMakeNode(QUERY_NODE_LOGIC_CONDITION);
CHECK_OUT_OF_MEM(cond);
@ -577,7 +604,7 @@ SNode* createNodeListNode(SAstCreateContext* pCxt, SNodeList* pList) {
}
SNode* createRealTableNode(SAstCreateContext* pCxt, const SToken* pDbName, const SToken* pTableName, const SToken* pTableAlias) {
if (!checkDbName(pCxt, pDbName) || !checkTableName(pCxt, pTableName)) {
if (!checkDbName(pCxt, pDbName, true) || !checkTableName(pCxt, pTableName)) {
return NULL;
}
SRealTableNode* realTable = (SRealTableNode*)nodesMakeNode(QUERY_NODE_REAL_TABLE);
@ -593,6 +620,7 @@ SNode* createRealTableNode(SAstCreateContext* pCxt, const SToken* pDbName, const
strncpy(realTable->table.tableAlias, pTableName->z, pTableName->n);
}
strncpy(realTable->table.tableName, pTableName->z, pTableName->n);
strcpy(realTable->useDbName, pCxt->pQueryCxt->db);
return (SNode*)realTable;
}
@ -812,7 +840,7 @@ SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOpti
}
SNode* createCreateDatabaseStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pDbName, SNode* pOptions) {
if (!checkDbName(pCxt, pDbName)) {
if (!checkDbName(pCxt, pDbName, false)) {
return NULL;
}
SCreateDatabaseStmt* pStmt = (SCreateDatabaseStmt*)nodesMakeNode(QUERY_NODE_CREATE_DATABASE_STMT);
@ -824,7 +852,7 @@ SNode* createCreateDatabaseStmt(SAstCreateContext* pCxt, bool ignoreExists, cons
}
SNode* createDropDatabaseStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pDbName) {
if (!checkDbName(pCxt, pDbName)) {
if (!checkDbName(pCxt, pDbName, false)) {
return NULL;
}
SDropDatabaseStmt* pStmt = (SDropDatabaseStmt*)nodesMakeNode(QUERY_NODE_DROP_DATABASE_STMT);
@ -835,7 +863,7 @@ SNode* createDropDatabaseStmt(SAstCreateContext* pCxt, bool ignoreNotExists, con
}
SNode* createAlterDatabaseStmt(SAstCreateContext* pCxt, const SToken* pDbName, SNode* pOptions) {
if (!checkDbName(pCxt, pDbName)) {
if (!checkDbName(pCxt, pDbName, false)) {
return NULL;
}
SAlterDatabaseStmt* pStmt = nodesMakeNode(QUERY_NODE_ALTER_DATABASE_STMT);
@ -1010,17 +1038,20 @@ SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, const SToken* pDbName) {
return (SNode*)pStmt;
}
SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pDbName) {
if (!checkDbName(pCxt, pDbName)) {
static bool needDbShowStmt(ENodeType type) {
return QUERY_NODE_SHOW_TABLES_STMT == type || QUERY_NODE_SHOW_STABLES_STMT == type || QUERY_NODE_SHOW_VGROUPS_STMT == type;
}
SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type, SNode* pDbName, SNode* pTbNamePattern) {
if (needDbShowStmt(type) && NULL == pDbName && NULL == pCxt->pQueryCxt->db) {
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "db not specified");
pCxt->valid = false;
return NULL;
}
SShowStmt* pStmt = nodesMakeNode(type);;
CHECK_OUT_OF_MEM(pStmt);
if (NULL != pDbName) {
strncpy(pStmt->dbName, pDbName->z, pDbName->n);
} else if (NULL != pCxt->pQueryCxt->db) {
strcpy(pStmt->dbName, pCxt->pQueryCxt->db);
}
pStmt->pDbName = pDbName;
pStmt->pTbNamePattern = pTbNamePattern;
return (SNode*)pStmt;
}

View File

@ -57,6 +57,8 @@ typedef struct SInsertParseContext {
SParseContext* pComCxt; // input
char *pSql; // input
SMsgBuf msg; // input
char dbFName[TSDB_DB_FNAME_LEN];
char tableName[TSDB_TABLE_NAME_LEN];
STableMeta* pTableMeta; // each table
SParsedDataColInfo tags; // each table
SKVRowBuilder tagsBuilder; // each table
@ -235,6 +237,9 @@ static int32_t getTableMeta(SInsertParseContext* pCxt, SToken* pTname) {
CHECK_CODE(catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, &name, &vg));
CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
pCxt->pTableMeta->vgId = vg.vgId; // todo remove
strcpy(pCxt->tableName, name.tname);
tNameGetFullDbName(&name, pCxt->dbFName);
return TSDB_CODE_SUCCESS;
}
@ -248,7 +253,7 @@ static int32_t findCol(SToken* pColname, int32_t start, int32_t end, SSchema* pS
return -1;
}
static void buildMsgHeader(SVgDataBlocks* blocks) {
static void buildMsgHeader(STableDataBlocks* src, SVgDataBlocks* blocks) {
SSubmitReq* submit = (SSubmitReq*)blocks->pData;
submit->header.vgId = htonl(blocks->vg.vgId);
submit->header.contLen = htonl(blocks->size);
@ -285,7 +290,7 @@ static int32_t buildOutput(SInsertParseContext* pCxt) {
dst->numOfTables = src->numOfTables;
dst->size = src->size;
TSWAP(dst->pData, src->pData, char*);
buildMsgHeader(dst);
buildMsgHeader(src, dst);
taosArrayPush(pCxt->pOutput->pDataBlocks, &dst);
}
return TSDB_CODE_SUCCESS;
@ -902,7 +907,7 @@ static int32_t parseValuesClause(SInsertParseContext* pCxt, STableDataBlocks* da
CHECK_CODE(parseValues(pCxt, dataBuf, maxNumOfRows, &numOfRows));
SSubmitBlk *pBlocks = (SSubmitBlk *)(dataBuf->pData);
if (TSDB_CODE_SUCCESS != setBlockInfo(pBlocks, dataBuf->pTableMeta, numOfRows)) {
if (TSDB_CODE_SUCCESS != setBlockInfo(pBlocks, dataBuf, numOfRows)) {
return buildInvalidOperationMsg(&pCxt->msg, "too many rows in sql, total number of rows should be less than 32767");
}
@ -979,6 +984,8 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
STableDataBlocks *dataBuf = NULL;
CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, pCxt->pTableMeta->uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta, &dataBuf, NULL));
strcpy(dataBuf->tableName, pCxt->tableName);
strcpy(dataBuf->dbFName, pCxt->dbFName);
if (TK_NK_LP == sToken.type) {
// pSql -> field1_name, ...)
@ -1033,7 +1040,6 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
};
if (NULL == context.pVgroupsHashObj || NULL == context.pTableBlockHashObj || NULL == context.pOutput) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@ -1052,6 +1058,5 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
code = parseInsertBody(&context);
}
destroyInsertParseContext(&context);
terrno = code;
return code;
}

View File

@ -61,12 +61,14 @@ static SKeyword keywordTable[] = {
{"FROM", TK_FROM},
{"FSYNC", TK_FSYNC},
{"FUNCTION", TK_FUNCTION},
{"FUNCTIONS", TK_FUNCTIONS},
{"GROUP", TK_GROUP},
{"HAVING", TK_HAVING},
{"IF", TK_IF},
{"IMPORT", TK_IMPORT},
{"IN", TK_IN},
{"INDEX", TK_INDEX},
{"INDEXES", TK_INDEXES},
{"INNER", TK_INNER},
{"INT", TK_INT},
{"INSERT", TK_INSERT},
@ -85,6 +87,7 @@ static SKeyword keywordTable[] = {
{"MINROWS", TK_MINROWS},
{"MINUS", TK_MINUS},
{"MNODES", TK_MNODES},
{"MODULES", TK_MODULES},
{"NCHAR", TK_NCHAR},
{"NMATCH", TK_NMATCH},
{"NONE", TK_NONE},
@ -116,6 +119,7 @@ static SKeyword keywordTable[] = {
{"STABLE", TK_STABLE},
{"STABLES", TK_STABLES},
{"STATE_WINDOW", TK_STATE_WINDOW},
{"STREAMS", TK_STREAMS},
{"STREAM_MODE", TK_STREAM_MODE},
{"TABLE", TK_TABLE},
{"TABLES", TK_TABLES},
@ -161,10 +165,8 @@ static SKeyword keywordTable[] = {
// {"UPLUS", TK_UPLUS},
// {"BITNOT", TK_BITNOT},
// {"ACCOUNTS", TK_ACCOUNTS},
// {"MODULES", TK_MODULES},
// {"QUERIES", TK_QUERIES},
// {"CONNECTIONS", TK_CONNECTIONS},
// {"STREAMS", TK_STREAMS},
// {"VARIABLES", TK_VARIABLES},
// {"SCORES", TK_SCORES},
// {"GRANTS", TK_GRANTS},
@ -234,7 +236,6 @@ static SKeyword keywordTable[] = {
// {"TOPICS", TK_TOPICS},
// {"COMPACT", TK_COMPACT},
// {"MODIFY", TK_MODIFY},
// {"FUNCTIONS", TK_FUNCTIONS},
// {"OUTPUTTYPE", TK_OUTPUTTYPE},
// {"AGGREGATE", TK_AGGREGATE},
// {"BUFSIZE", TK_BUFSIZE},

View File

@ -30,8 +30,14 @@ typedef struct STranslateContext {
ESqlClause currClause;
SSelectStmt* pCurrStmt;
SCmdMsgInfo* pCmdMsg;
SHashObj* pDbs;
SHashObj* pTables;
} STranslateContext;
typedef struct SFullDatabaseName {
char fullDbName[TSDB_DB_FNAME_LEN];
} SFullDatabaseName;
static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode);
static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode);
@ -70,14 +76,110 @@ static int32_t addNamespace(STranslateContext* pCxt, void* pTable) {
return TSDB_CODE_SUCCESS;
}
static SName* toName(int32_t acctId, const SRealTableNode* pRealTable, SName* pName) {
static SName* toName(int32_t acctId, const char* pDbName, const char* pTableName, SName* pName) {
pName->type = TSDB_TABLE_NAME_T;
pName->acctId = acctId;
strcpy(pName->dbname, pRealTable->table.dbName);
strcpy(pName->tname, pRealTable->table.tableName);
strcpy(pName->dbname, pDbName);
strcpy(pName->tname, pTableName);
return pName;
}
static int32_t collectUseDatabase(const char* pFullDbName, SHashObj* pDbs) {
SFullDatabaseName name = {0};
strcpy(name.fullDbName, pFullDbName);
return taosHashPut(pDbs, pFullDbName, strlen(pFullDbName), &name, sizeof(SFullDatabaseName));
}
static int32_t collectUseTable(const SName* pName, SHashObj* pDbs) {
char fullName[TSDB_TABLE_FNAME_LEN];
tNameExtractFullName(pName, fullName);
return taosHashPut(pDbs, fullName, strlen(fullName), pName, sizeof(SName));
}
static int32_t getTableMetaImpl(STranslateContext* pCxt, const SName* pName, STableMeta** pMeta) {
SParseContext* pParCxt = pCxt->pParseCxt;
int32_t code = collectUseTable(pName, pCxt->pTables);
if (TSDB_CODE_SUCCESS == code) {
code = catalogGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pMeta);
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, pName->tname);
}
return code;
}
static int32_t getTableMeta(STranslateContext* pCxt, const char* pDbName, const char* pTableName, STableMeta** pMeta) {
SName name = { .type = TSDB_TABLE_NAME_T, .acctId = pCxt->pParseCxt->acctId };
strcpy(name.dbname, pDbName);
strcpy(name.tname, pTableName);
return getTableMetaImpl(pCxt, &name, pMeta);
}
static int32_t getTableDistVgInfo(STranslateContext* pCxt, const SName* pName, SArray** pVgInfo) {
SParseContext* pParCxt = pCxt->pParseCxt;
int32_t code = collectUseTable(pName, pCxt->pTables);
if (TSDB_CODE_SUCCESS == code) {
code = catalogGetTableDistVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pVgInfo);
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetTableDistVgInfo error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, pName->tname);
}
return code;
}
static int32_t getDBVgInfoImpl(STranslateContext* pCxt, const SName* pName, SArray** pVgInfo) {
SParseContext* pParCxt = pCxt->pParseCxt;
char fullDbName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(pName, fullDbName);
int32_t code = collectUseDatabase(fullDbName, pCxt->pDbs);
if (TSDB_CODE_SUCCESS == code) {
code = catalogGetDBVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, fullDbName, pVgInfo);
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetDBVgInfo error, code:%s, dbFName:%s", tstrerror(code), fullDbName);
}
return code;
}
static int32_t getDBVgInfo(STranslateContext* pCxt, const char* pDbName, SArray** pVgInfo) {
SName name;
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pDbName, strlen(pDbName));
char dbFname[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(&name, dbFname);
return getDBVgInfoImpl(pCxt, &name, pVgInfo);
}
static int32_t getTableHashVgroupImpl(STranslateContext* pCxt, const SName* pName, SVgroupInfo* pInfo) {
SParseContext* pParCxt = pCxt->pParseCxt;
int32_t code = collectUseTable(pName, pCxt->pTables);
if (TSDB_CODE_SUCCESS == code) {
code = catalogGetTableHashVgroup(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pInfo);
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetTableHashVgroup error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, pName->tname);
}
return code;
}
static int32_t getTableHashVgroup(STranslateContext* pCxt, const char* pDbName, const char* pTableName, SVgroupInfo* pInfo) {
SName name = { .type = TSDB_TABLE_NAME_T, .acctId = pCxt->pParseCxt->acctId };
strcpy(name.dbname, pDbName);
strcpy(name.tname, pTableName);
return getTableHashVgroupImpl(pCxt, &name, pInfo);
}
static int32_t getDBVgVersion(STranslateContext* pCxt, const char* pDbFName, int32_t* pVersion, int64_t* pDbId, int32_t* pTableNum) {
SParseContext* pParCxt = pCxt->pParseCxt;
int32_t code = collectUseDatabase(pDbFName, pCxt->pDbs);
if (TSDB_CODE_SUCCESS == code) {
code = catalogGetDBVgVersion(pParCxt->pCatalog, pDbFName, pVersion, pDbId, pTableNum);
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetDBVgVersion error, code:%s, dbFName:%s", tstrerror(code), pDbFName);
}
return code;
}
static bool belongTable(const char* currentDb, const SColumnNode* pCol, const STableNode* pTable) {
int cmp = 0;
if ('\0' != pCol->dbName[0]) {
@ -244,15 +346,6 @@ static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode* pCol) {
return found ? DEAL_RES_CONTINUE : translateColumnWithoutPrefix(pCxt, pCol);
}
static int32_t trimStringWithVarFormat(const char* src, int32_t len, bool format, char* dst) {
char* dstVal = dst;
if (format) {
varDataSetLen(dst, len);
dstVal = varDataVal(dst);
}
return trimString(src, len, dstVal, len);
}
static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal) {
if (pVal->isDuration) {
if (parseAbsoluteDuration(pVal->literal, strlen(pVal->literal), &pVal->datum.i, &pVal->unit, pVal->node.resType.precision) != TSDB_CODE_SUCCESS) {
@ -290,26 +383,18 @@ static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal) {
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_VARBINARY: {
int32_t n = strlen(pVal->literal);
pVal->datum.p = calloc(1, n + VARSTR_HEADER_SIZE);
pVal->datum.p = calloc(1, pVal->node.resType.bytes + VARSTR_HEADER_SIZE);
if (NULL == pVal->datum.p) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY);
}
trimStringWithVarFormat(pVal->literal, n, true, pVal->datum.p);
varDataSetLen(pVal->datum.p, pVal->node.resType.bytes);
strcpy(varDataVal(pVal->datum.p), pVal->literal);
break;
}
case TSDB_DATA_TYPE_TIMESTAMP: {
int32_t n = strlen(pVal->literal);
char* tmp = calloc(1, n);
if (NULL == tmp) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY);
}
int32_t len = trimStringWithVarFormat(pVal->literal, n, false, tmp);
if (taosParseTime(tmp, &pVal->datum.i, len, pVal->node.resType.precision, tsDaylight) != TSDB_CODE_SUCCESS) {
tfree(tmp);
if (taosParseTime(pVal->literal, &pVal->datum.i, pVal->node.resType.bytes, pVal->node.resType.precision, tsDaylight) != TSDB_CODE_SUCCESS) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
tfree(tmp);
break;
}
case TSDB_DATA_TYPE_JSON:
@ -498,43 +583,72 @@ static int32_t checkAggColCoexist(STranslateContext* pCxt, SSelectStmt* pSelect)
return TSDB_CODE_SUCCESS;
}
static int32_t setTableVgroupList(SParseContext* pCxt, SName* name, SRealTableNode* pRealTable) {
if (pCxt->topicQuery) {
static int32_t toVgroupsInfo(SArray* pVgs, SVgroupsInfo** pVgsInfo) {
size_t vgroupNum = taosArrayGetSize(pVgs);
*pVgsInfo = calloc(1, sizeof(SVgroupsInfo) + sizeof(SVgroupInfo) * vgroupNum);
if (NULL == *pVgsInfo) {
return TSDB_CODE_OUT_OF_MEMORY;
}
(*pVgsInfo)->numOfVgroups = vgroupNum;
for (int32_t i = 0; i < vgroupNum; ++i) {
SVgroupInfo *vg = taosArrayGet(pVgs, i);
(*pVgsInfo)->vgroups[i] = *vg;
}
return TSDB_CODE_SUCCESS;
}
if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) {
static int32_t setSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRealTableNode* pRealTable) {
// todo release
// if (0 != strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_USER_TABLES)) {
// return TSDB_CODE_SUCCESS;
// }
int32_t code = TSDB_CODE_SUCCESS;
SArray* vgroupList = NULL;
int32_t code = catalogGetTableDistVgInfo(pCxt->pCatalog, pCxt->pTransporter, &pCxt->mgmtEpSet, name, &vgroupList);
if (code != TSDB_CODE_SUCCESS) {
if ('\0' != pRealTable->useDbName[0]) {
code = getDBVgInfo(pCxt, pRealTable->useDbName, &vgroupList);
} else {
code = getDBVgInfoImpl(pCxt, pName, &vgroupList);
}
if (TSDB_CODE_SUCCESS == code) {
// todo remove
if (NULL != vgroupList && taosArrayGetSize(vgroupList) > 0 && 0 != strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_USER_TABLES)) {
taosArrayPopTailBatch(vgroupList, taosArrayGetSize(vgroupList) - 1);
}
code = toVgroupsInfo(vgroupList, &pRealTable->pVgroupList);
}
taosArrayDestroy(vgroupList);
return code;
}
size_t vgroupNum = taosArrayGetSize(vgroupList);
pRealTable->pVgroupList = calloc(1, sizeof(SVgroupsInfo) + sizeof(SVgroupInfo) * vgroupNum);
if (NULL == pRealTable->pVgroupList) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pRealTable->pVgroupList->numOfVgroups = vgroupNum;
for (int32_t i = 0; i < vgroupNum; ++i) {
SVgroupInfo *vg = taosArrayGet(vgroupList, i);
pRealTable->pVgroupList->vgroups[i] = *vg;
static int32_t setTableVgroupList(STranslateContext* pCxt, SName* pName, SRealTableNode* pRealTable) {
if (pCxt->pParseCxt->topicQuery) {
return TSDB_CODE_SUCCESS;
}
int32_t code = TSDB_CODE_SUCCESS;
if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) {
SArray* vgroupList = NULL;
code = getTableDistVgInfo(pCxt, pName, &vgroupList);
if (TSDB_CODE_SUCCESS == code) {
code = toVgroupsInfo(vgroupList, &pRealTable->pVgroupList);
}
taosArrayDestroy(vgroupList);
} else if (TSDB_SYSTEM_TABLE == pRealTable->pMeta->tableType) {
code = setSysTableVgroupList(pCxt, pName, pRealTable);
} else {
pRealTable->pVgroupList = calloc(1, sizeof(SVgroupsInfo) + sizeof(SVgroupInfo));
if (NULL == pRealTable->pVgroupList) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pRealTable->pVgroupList->numOfVgroups = 1;
int32_t code = catalogGetTableHashVgroup(pCxt->pCatalog, pCxt->pTransporter, &pCxt->mgmtEpSet, name, pRealTable->pVgroupList->vgroups);
if (code != TSDB_CODE_SUCCESS) {
code = getTableHashVgroupImpl(pCxt, pName, pRealTable->pVgroupList->vgroups);
}
return code;
}
}
return TSDB_CODE_SUCCESS;
}
static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
int32_t code = TSDB_CODE_SUCCESS;
@ -542,12 +656,12 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
case QUERY_NODE_REAL_TABLE: {
SRealTableNode* pRealTable = (SRealTableNode*)pTable;
SName name;
code = catalogGetTableMeta(pCxt->pParseCxt->pCatalog, pCxt->pParseCxt->pTransporter, &(pCxt->pParseCxt->mgmtEpSet),
toName(pCxt->pParseCxt->acctId, pRealTable, &name), &(pRealTable->pMeta));
code = getTableMetaImpl(pCxt,
toName(pCxt->pParseCxt->acctId, pRealTable->table.dbName, pRealTable->table.tableName, &name), &(pRealTable->pMeta));
if (TSDB_CODE_SUCCESS != code) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TABLE_NOT_EXIST, pRealTable->table.tableName);
}
code = setTableVgroupList(pCxt->pParseCxt, &name, pRealTable);
code = setTableVgroupList(pCxt, &name, pRealTable);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
@ -951,11 +1065,10 @@ static int32_t doTranslateDropSuperTable(STranslateContext* pCxt, const SName* p
static int32_t translateDropTable(STranslateContext* pCxt, SDropTableStmt* pStmt) {
SDropTableClause* pClause = nodesListGetNode(pStmt->pTables, 0);
SName tableName = { .type = TSDB_TABLE_NAME_T, .acctId = pCxt->pParseCxt->acctId };
strcpy(tableName.dbname, pClause->dbName);
strcpy(tableName.tname, pClause->tableName);
STableMeta* pTableMeta = NULL;
int32_t code = catalogGetTableMeta(pCxt->pParseCxt->pCatalog, pCxt->pParseCxt->pTransporter, &(pCxt->pParseCxt->mgmtEpSet), &tableName, &pTableMeta);
SName tableName;
int32_t code = getTableMetaImpl(
pCxt, toName(pCxt->pParseCxt->acctId, pClause->dbName, pClause->tableName, &tableName), &pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
if (TSDB_SUPER_TABLE == pTableMeta->tableType) {
code = doTranslateDropSuperTable(pCxt, &tableName, pClause->ignoreNotExists);
@ -963,8 +1076,8 @@ static int32_t translateDropTable(STranslateContext* pCxt, SDropTableStmt* pStmt
// todo : drop normal table or child table
code = TSDB_CODE_FAILED;
}
}
tfree(pTableMeta);
}
return code;
}
@ -1044,13 +1157,14 @@ static int32_t translateAlterTable(STranslateContext* pCxt, SAlterTableStmt* pSt
}
static int32_t translateUseDatabase(STranslateContext* pCxt, SUseDatabaseStmt* pStmt) {
SUseDbReq usedbReq = {0};
SName name = {0};
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName));
SUseDbReq usedbReq = {0};
tNameExtractFullName(&name, usedbReq.db);
catalogGetDBVgVersion(pCxt->pParseCxt->pCatalog, usedbReq.db, &usedbReq.vgVersion, &usedbReq.dbId, &usedbReq.numOfTable);
int32_t code = getDBVgVersion(pCxt, usedbReq.db, &usedbReq.vgVersion, &usedbReq.dbId, &usedbReq.numOfTable);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
pCxt->pCmdMsg = malloc(sizeof(SCmdMsgInfo));
if (NULL == pCxt->pCmdMsg) {
@ -1226,12 +1340,12 @@ static int32_t nodeTypeToShowType(ENodeType nt) {
static int32_t translateShow(STranslateContext* pCxt, SShowStmt* pStmt) {
SShowReq showReq = { .type = nodeTypeToShowType(nodeType(pStmt)) };
if ('\0' != pStmt->dbName[0]) {
SName name = {0};
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName));
char dbFname[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(&name, showReq.db);
}
// if ('\0' != pStmt->dbName[0]) {
// SName name = {0};
// tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName));
// char dbFname[TSDB_DB_FNAME_LEN] = {0};
// tNameGetFullDbName(&name, showReq.db);
// }
pCxt->pCmdMsg = malloc(sizeof(SCmdMsgInfo));
if (NULL == pCxt->pCmdMsg) {
@ -1250,19 +1364,11 @@ static int32_t translateShow(STranslateContext* pCxt, SShowStmt* pStmt) {
}
static int32_t translateShowTables(STranslateContext* pCxt) {
SName name = {0};
SVShowTablesReq* pShowReq = calloc(1, sizeof(SVShowTablesReq));
if (pCxt->pParseCxt->db == NULL || strlen(pCxt->pParseCxt->db) == 0) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_TSC_INVALID_OPERATION, "db not specified");
}
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, strlen(pCxt->pParseCxt->db));
char dbFname[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(&name, dbFname);
SArray* array = NULL;
int32_t code = catalogGetDBVgInfo(pCxt->pParseCxt->pCatalog, pCxt->pParseCxt->pTransporter, &pCxt->pParseCxt->mgmtEpSet, dbFname, false, &array);
if (code != TSDB_CODE_SUCCESS) {
int32_t code = getDBVgInfo(pCxt, pCxt->pParseCxt->db, &array);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
SVgroupInfo* info = taosArrayGet(array, 0);
@ -1605,11 +1711,155 @@ static void destroyTranslateContext(STranslateContext* pCxt) {
tfree(pCxt->pCmdMsg->pMsg);
tfree(pCxt->pCmdMsg);
}
taosHashCleanup(pCxt->pDbs);
taosHashCleanup(pCxt->pTables);
}
static const char* getSysTableName(ENodeType type) {
switch (type) {
case QUERY_NODE_SHOW_DATABASES_STMT:
return TSDB_INS_TABLE_USER_DATABASES;
case QUERY_NODE_SHOW_TABLES_STMT:
return TSDB_INS_TABLE_USER_TABLES;
case QUERY_NODE_SHOW_STABLES_STMT:
return TSDB_INS_TABLE_USER_STABLES;
case QUERY_NODE_SHOW_USERS_STMT:
return TSDB_INS_TABLE_USER_USERS;
case QUERY_NODE_SHOW_DNODES_STMT:
return TSDB_INS_TABLE_DNODES;
case QUERY_NODE_SHOW_VGROUPS_STMT:
return TSDB_INS_TABLE_VGROUPS;
case QUERY_NODE_SHOW_MNODES_STMT:
return TSDB_INS_TABLE_MNODES;
case QUERY_NODE_SHOW_MODULES_STMT:
return TSDB_INS_TABLE_MODULES;
case QUERY_NODE_SHOW_QNODES_STMT:
return TSDB_INS_TABLE_QNODES;
case QUERY_NODE_SHOW_FUNCTIONS_STMT:
return TSDB_INS_TABLE_USER_FUNCTIONS;
case QUERY_NODE_SHOW_INDEXES_STMT:
return TSDB_INS_TABLE_USER_INDEXES;
case QUERY_NODE_SHOW_STREAMS_STMT:
return TSDB_INS_TABLE_USER_STREAMS;
default:
break;
}
return NULL;
}
static int32_t createSelectStmtForShow(ENodeType showType, SSelectStmt** pStmt) {
SSelectStmt* pSelect = nodesMakeNode(QUERY_NODE_SELECT_STMT);
if (NULL == pSelect) {
return TSDB_CODE_OUT_OF_MEMORY;
}
SRealTableNode* pTable = nodesMakeNode(QUERY_NODE_REAL_TABLE);
if (NULL == pTable) {
nodesDestroyNode(pSelect);
return TSDB_CODE_OUT_OF_MEMORY;
}
strcpy(pTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB);
strcpy(pTable->table.tableName, getSysTableName(showType));
pSelect->pFromTable = (SNode*)pTable;
*pStmt = pSelect;
return TSDB_CODE_SUCCESS;
}
static int32_t createOperatorNode(EOperatorType opType, const char* pColName, SNode* pRight, SNode** pOp) {
if (NULL == pRight) {
return TSDB_CODE_SUCCESS;
}
SOperatorNode* pOper = nodesMakeNode(QUERY_NODE_OPERATOR);
if (NULL == pOper) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pOper->opType = opType;
pOper->pLeft = nodesMakeNode(QUERY_NODE_COLUMN);
pOper->pRight = nodesCloneNode(pRight);
if (NULL == pOper->pLeft || NULL == pOper->pRight) {
nodesDestroyNode(pOper);
return TSDB_CODE_OUT_OF_MEMORY;
}
strcpy(((SColumnNode*)pOper->pLeft)->colName, pColName);
*pOp = (SNode*)pOper;
return TSDB_CODE_SUCCESS;
}
static const char* getTbNameColName(ENodeType type) {
return (QUERY_NODE_SHOW_STABLES_STMT == type ? "stable_name" : "table_name");
}
static int32_t createLogicCondNode(SNode* pCond1, SNode* pCond2, SNode** pCond) {
SLogicConditionNode* pCondition = nodesMakeNode(QUERY_NODE_LOGIC_CONDITION);
if (NULL == pCondition) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pCondition->condType = LOGIC_COND_TYPE_AND;
pCondition->pParameterList = nodesMakeList();
if (NULL == pCondition->pParameterList) {
nodesDestroyNode(pCondition);
return TSDB_CODE_OUT_OF_MEMORY;
}
if (TSDB_CODE_SUCCESS != nodesListAppend(pCondition->pParameterList, pCond1) ||
TSDB_CODE_SUCCESS != nodesListAppend(pCondition->pParameterList, pCond2)) {
nodesDestroyNode(pCondition);
return TSDB_CODE_OUT_OF_MEMORY;
}
*pCond = (SNode*)pCondition;
return TSDB_CODE_SUCCESS;
}
static int32_t createShowCondition(const SShowStmt* pShow, SSelectStmt* pSelect) {
SNode* pDbCond = NULL;
SNode* pTbCond = NULL;
if (TSDB_CODE_SUCCESS != createOperatorNode(OP_TYPE_EQUAL, "db_name", pShow->pDbName, &pDbCond) ||
TSDB_CODE_SUCCESS != createOperatorNode(OP_TYPE_LIKE, getTbNameColName(nodeType(pShow)), pShow->pTbNamePattern, &pTbCond)) {
nodesDestroyNode(pDbCond);
nodesDestroyNode(pTbCond);
return TSDB_CODE_OUT_OF_MEMORY;
}
if (NULL != pDbCond && NULL != pTbCond) {
if (TSDB_CODE_SUCCESS != createLogicCondNode(pDbCond, pTbCond, &pSelect->pWhere)) {
nodesDestroyNode(pDbCond);
nodesDestroyNode(pTbCond);
return TSDB_CODE_OUT_OF_MEMORY;
}
} else {
pSelect->pWhere = (NULL == pDbCond ? pTbCond : pDbCond);
}
if (NULL != pShow->pDbName) {
strcpy(((SRealTableNode*)pSelect->pFromTable)->useDbName, ((SValueNode*)pShow->pDbName)->literal);
}
return TSDB_CODE_SUCCESS;
}
static int32_t rewriteShow(STranslateContext* pCxt, SQuery* pQuery) {
SSelectStmt* pStmt = NULL;
int32_t code = createSelectStmtForShow(nodeType(pQuery->pRoot), &pStmt);
if (TSDB_CODE_SUCCESS == code) {
code = createShowCondition((SShowStmt*)pQuery->pRoot, pStmt);
}
if (TSDB_CODE_SUCCESS == code) {
nodesDestroyNode(pQuery->pRoot);
pQuery->pRoot = (SNode*)pStmt;
}
return code;
}
typedef struct SVgroupTablesBatch {
SVCreateTbBatchReq req;
SVgroupInfo info;
char dbName[TSDB_DB_NAME_LEN];
} SVgroupTablesBatch;
static void toSchema(const SColumnDefNode* pCol, int32_t colId, SSchema* pSchema) {
@ -1625,7 +1875,7 @@ static void destroyCreateTbReq(SVCreateTbReq* pReq) {
}
static int32_t buildNormalTableBatchReq(
const char* pTableName, const SNodeList* pColumns, const SVgroupInfo* pVgroupInfo, SVgroupTablesBatch* pBatch) {
const char* pDbName, const char* pTableName, const SNodeList* pColumns, const SVgroupInfo* pVgroupInfo, SVgroupTablesBatch* pBatch) {
SVCreateTbReq req = {0};
req.type = TD_NORMAL_TABLE;
req.name = strdup(pTableName);
@ -1643,6 +1893,7 @@ static int32_t buildNormalTableBatchReq(
}
pBatch->info = *pVgroupInfo;
strcpy(pBatch->dbName, pDbName);
pBatch->req.pArray = taosArrayInit(1, sizeof(struct SVCreateTbReq));
if (NULL == pBatch->req.pArray) {
destroyCreateTbReq(&req);
@ -1653,7 +1904,7 @@ static int32_t buildNormalTableBatchReq(
return TSDB_CODE_SUCCESS;
}
static int32_t serializeVgroupTablesBatch(SVgroupTablesBatch* pTbBatch, SArray* pBufArray) {
static int32_t serializeVgroupTablesBatch(int32_t acctId, SVgroupTablesBatch* pTbBatch, SArray* pBufArray) {
int tlen = sizeof(SMsgHead) + tSerializeSVCreateTbBatchReq(NULL, &(pTbBatch->req));
void* buf = malloc(tlen);
if (NULL == buf) {
@ -1693,13 +1944,6 @@ static void destroyCreateTbReqBatch(SVgroupTablesBatch* pTbBatch) {
taosArrayDestroy(pTbBatch->req.pArray);
}
static int32_t getTableHashVgroup(SParseContext* pCxt, const char* pDbName, const char* pTableName, SVgroupInfo* pInfo) {
SName name = { .type = TSDB_TABLE_NAME_T, .acctId = pCxt->acctId };
strcpy(name.dbname, pDbName);
strcpy(name.tname, pTableName);
return catalogGetTableHashVgroup(pCxt->pCatalog, pCxt->pTransporter, &pCxt->mgmtEpSet, &name, pInfo);
}
static int32_t rewriteToVnodeModifOpStmt(SQuery* pQuery, SArray* pBufArray) {
SVnodeModifOpStmt* pNewStmt = nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
if (pNewStmt == NULL) {
@ -1722,16 +1966,16 @@ static void destroyCreateTbReqArray(SArray* pArray) {
taosArrayDestroy(pArray);
}
static int32_t buildCreateTableDataBlock(const SCreateTableStmt* pStmt, const SVgroupInfo* pInfo, SArray** pBufArray) {
static int32_t buildCreateTableDataBlock(int32_t acctId, const SCreateTableStmt* pStmt, const SVgroupInfo* pInfo, SArray** pBufArray) {
*pBufArray = taosArrayInit(1, POINTER_BYTES);
if (NULL == *pBufArray) {
return TSDB_CODE_OUT_OF_MEMORY;
}
SVgroupTablesBatch tbatch = {0};
int32_t code = buildNormalTableBatchReq(pStmt->tableName, pStmt->pCols, pInfo, &tbatch);
int32_t code = buildNormalTableBatchReq(pStmt->dbName, pStmt->tableName, pStmt->pCols, pInfo, &tbatch);
if (TSDB_CODE_SUCCESS == code) {
code = serializeVgroupTablesBatch(&tbatch, *pBufArray);
code = serializeVgroupTablesBatch(acctId, &tbatch, *pBufArray);
}
destroyCreateTbReqBatch(&tbatch);
@ -1745,10 +1989,10 @@ static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) {
SCreateTableStmt* pStmt = (SCreateTableStmt*)pQuery->pRoot;
SVgroupInfo info = {0};
int32_t code = getTableHashVgroup(pCxt->pParseCxt, pStmt->dbName, pStmt->tableName, &info);
int32_t code = getTableHashVgroup(pCxt, pStmt->dbName, pStmt->tableName, &info);
SArray* pBufArray = NULL;
if (TSDB_CODE_SUCCESS == code) {
code = buildCreateTableDataBlock(pStmt, &info, &pBufArray);
code = buildCreateTableDataBlock(pCxt->pParseCxt->acctId, pStmt, &info, &pBufArray);
}
if (TSDB_CODE_SUCCESS == code) {
code = rewriteToVnodeModifOpStmt(pQuery, pBufArray);
@ -1760,7 +2004,7 @@ static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) {
return code;
}
static void addCreateTbReqIntoVgroup(SHashObj* pVgroupHashmap, const char* pTableName, SKVRow row, uint64_t suid, SVgroupInfo* pVgInfo) {
static void addCreateTbReqIntoVgroup(SHashObj* pVgroupHashmap, const char* pDbName, const char* pTableName, SKVRow row, uint64_t suid, SVgroupInfo* pVgInfo) {
struct SVCreateTbReq req = {0};
req.type = TD_CHILD_TABLE;
req.name = strdup(pTableName);
@ -1771,6 +2015,7 @@ static void addCreateTbReqIntoVgroup(SHashObj* pVgroupHashmap, const char* pTabl
if (pTableBatch == NULL) {
SVgroupTablesBatch tBatch = {0};
tBatch.info = *pVgInfo;
strcpy(tBatch.dbName, pDbName);
tBatch.req.pArray = taosArrayInit(4, sizeof(struct SVCreateTbReq));
taosArrayPush(tBatch.req.pArray, &req);
@ -1883,11 +2128,8 @@ static int32_t buildKVRowForAllTags(STranslateContext* pCxt, SCreateSubTableClau
}
static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableClause* pStmt, SHashObj* pVgroupHashmap) {
SName name = { .type = TSDB_TABLE_NAME_T, .acctId = pCxt->pParseCxt->acctId };
strcpy(name.dbname, pStmt->useDbName);
strcpy(name.tname, pStmt->useTableName);
STableMeta* pSuperTableMeta = NULL;
int32_t code = catalogGetTableMeta(pCxt->pParseCxt->pCatalog, pCxt->pParseCxt->pTransporter, &pCxt->pParseCxt->mgmtEpSet, &name, &pSuperTableMeta);
int32_t code = getTableMeta(pCxt, pStmt->useDbName, pStmt->useTableName, &pSuperTableMeta);
SKVRowBuilder kvRowBuilder = {0};
if (TSDB_CODE_SUCCESS == code) {
@ -1914,10 +2156,10 @@ static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableCla
SVgroupInfo info = {0};
if (TSDB_CODE_SUCCESS == code) {
code = getTableHashVgroup(pCxt->pParseCxt, pStmt->dbName, pStmt->tableName, &info);
code = getTableHashVgroup(pCxt, pStmt->dbName, pStmt->tableName, &info);
}
if (TSDB_CODE_SUCCESS == code) {
addCreateTbReqIntoVgroup(pVgroupHashmap, pStmt->tableName, row, pSuperTableMeta->uid, &info);
addCreateTbReqIntoVgroup(pVgroupHashmap, pStmt->dbName, pStmt->tableName, row, pSuperTableMeta->uid, &info);
}
tfree(pSuperTableMeta);
@ -1925,7 +2167,7 @@ static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableCla
return code;
}
static SArray* serializeVgroupsTablesBatch(SHashObj* pVgroupHashmap) {
static SArray* serializeVgroupsTablesBatch(int32_t acctId, SHashObj* pVgroupHashmap) {
SArray* pBufArray = taosArrayInit(taosHashGetSize(pVgroupHashmap), sizeof(void*));
if (NULL == pBufArray) {
return NULL;
@ -1939,7 +2181,7 @@ static SArray* serializeVgroupsTablesBatch(SHashObj* pVgroupHashmap) {
break;
}
serializeVgroupTablesBatch(pTbBatch, pBufArray);
serializeVgroupTablesBatch(acctId, pTbBatch, pBufArray);
destroyCreateTbReqBatch(pTbBatch);
} while (true);
@ -1964,7 +2206,7 @@ static int32_t rewriteCreateMultiTable(STranslateContext* pCxt, SQuery* pQuery)
}
}
SArray* pBufArray = serializeVgroupsTablesBatch(pVgroupHashmap);
SArray* pBufArray = serializeVgroupsTablesBatch(pCxt->pParseCxt->acctId, pVgroupHashmap);
taosHashCleanup(pVgroupHashmap);
if (NULL == pBufArray) {
return TSDB_CODE_OUT_OF_MEMORY;
@ -1981,6 +2223,20 @@ static int32_t rewriteAlterTable(STranslateContext* pCxt, SQuery* pQuery) {
static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) {
int32_t code = TSDB_CODE_SUCCESS;
switch (nodeType(pQuery->pRoot)) {
case QUERY_NODE_SHOW_DATABASES_STMT:
case QUERY_NODE_SHOW_TABLES_STMT:
case QUERY_NODE_SHOW_STABLES_STMT:
case QUERY_NODE_SHOW_USERS_STMT:
case QUERY_NODE_SHOW_DNODES_STMT:
case QUERY_NODE_SHOW_VGROUPS_STMT:
case QUERY_NODE_SHOW_MNODES_STMT:
case QUERY_NODE_SHOW_MODULES_STMT:
case QUERY_NODE_SHOW_QNODES_STMT:
case QUERY_NODE_SHOW_FUNCTIONS_STMT:
case QUERY_NODE_SHOW_INDEXES_STMT:
case QUERY_NODE_SHOW_STREAMS_STMT:
code = rewriteShow(pCxt, pQuery);
break;
case QUERY_NODE_CREATE_TABLE_STMT:
if (NULL == ((SCreateTableStmt*)pQuery->pRoot)->pTags) {
code = rewriteCreateTable(pCxt, pQuery);
@ -2022,6 +2278,31 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
pQuery->msgType = pQuery->pCmdMsg->msgType;
break;
}
if (NULL != pCxt->pDbs) {
pQuery->pDbList = taosArrayInit(taosHashGetSize(pCxt->pDbs), TSDB_DB_FNAME_LEN);
if (NULL == pQuery->pDbList) {
return TSDB_CODE_OUT_OF_MEMORY;
}
SFullDatabaseName* pDb = taosHashIterate(pCxt->pDbs, NULL);
while (NULL != pDb) {
taosArrayPush(pQuery->pDbList, pDb->fullDbName);
pDb = taosHashIterate(pCxt->pDbs, pDb);
}
}
if (NULL != pCxt->pTables) {
pQuery->pTableList = taosArrayInit(taosHashGetSize(pCxt->pTables), sizeof(SName));
if (NULL == pQuery->pTableList) {
return TSDB_CODE_OUT_OF_MEMORY;
}
SName* pTable = taosHashIterate(pCxt->pTables, NULL);
while (NULL != pTable) {
taosArrayPush(pQuery->pTableList, pTable);
pTable = taosHashIterate(pCxt->pTables, pTable);
}
}
return code;
}
@ -2032,8 +2313,13 @@ int32_t doTranslate(SParseContext* pParseCxt, SQuery* pQuery) {
.msgBuf = { .buf = pParseCxt->pMsg, .len = pParseCxt->msgLen },
.pNsLevel = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES),
.currLevel = 0,
.currClause = 0
.currClause = 0,
.pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK),
.pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK)
};
if (NULL == cxt.pNsLevel) {
return TSDB_CODE_OUT_OF_MEMORY;
}
int32_t code = fmFuncMgtInit();
if (TSDB_CODE_SUCCESS == code) {
code = rewriteQuery(&cxt, pQuery);

View File

@ -38,11 +38,14 @@ static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) {
}
int32_t qParseQuerySql(SParseContext* pCxt, SQuery** pQuery) {
int32_t code = TSDB_CODE_SUCCESS;
if (isInsertSql(pCxt->pSql, pCxt->sqlLen)) {
return parseInsertSql(pCxt, pQuery);
code = parseInsertSql(pCxt, pQuery);
} else {
return parseSqlIntoAst(pCxt, pQuery);
code = parseSqlIntoAst(pCxt, pQuery);
}
terrno = code;
return code;
}
void qDestroyQuery(SQuery* pQueryNode) {

File diff suppressed because it is too large Load Diff

View File

@ -26,6 +26,64 @@
#include "mockCatalog.h"
namespace {
void generateInformationSchema(MockCatalogService* mcs) {
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "dnodes", TSDB_SYSTEM_TABLE, 1).addColumn("id", TSDB_DATA_TYPE_INT);
builder.done();
}
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "mnodes", TSDB_SYSTEM_TABLE, 1).addColumn("id", TSDB_DATA_TYPE_INT);
builder.done();
}
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "modules", TSDB_SYSTEM_TABLE, 1).addColumn("id", TSDB_DATA_TYPE_INT);
builder.done();
}
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "qnodes", TSDB_SYSTEM_TABLE, 1).addColumn("id", TSDB_DATA_TYPE_INT);
builder.done();
}
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "user_databases", TSDB_SYSTEM_TABLE, 1).addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN);
builder.done();
}
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "user_functions", TSDB_SYSTEM_TABLE, 1).addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_FUNC_NAME_LEN);
builder.done();
}
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "user_indexes", TSDB_SYSTEM_TABLE, 2)
.addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN).addColumn("table_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "user_stables", TSDB_SYSTEM_TABLE, 2)
.addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN).addColumn("stable_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "user_streams", TSDB_SYSTEM_TABLE, 1).addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "user_tables", TSDB_SYSTEM_TABLE, 2)
.addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN).addColumn("table_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "user_table_distributed", TSDB_SYSTEM_TABLE, 1).addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN);
builder.done();
}
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "user_users", TSDB_SYSTEM_TABLE, 1).addColumn("user_name", TSDB_DATA_TYPE_BINARY, TSDB_USER_LEN);
builder.done();
}
{
ITableBuilder& builder = mcs->createTableBuilder("information_schema", "vgroups", TSDB_SYSTEM_TABLE, 1).addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN);
builder.done();
}
}
void generateTestT1(MockCatalogService* mcs) {
ITableBuilder& builder = mcs->createTableBuilder("test", "t1", TSDB_NORMAL_TABLE, 6)
.setPrecision(TSDB_TIME_PRECISION_MILLI).setVgid(1).addColumn("ts", TSDB_DATA_TYPE_TIMESTAMP)
@ -66,6 +124,10 @@ int32_t __catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* ve
return 0;
}
int32_t __catalogGetDBVgInfo(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* dbFName, SArray** vgroupList) {
return 0;
}
void initMetaDataEnv() {
mockCatalogService.reset(new MockCatalogService());
@ -74,6 +136,8 @@ void initMetaDataEnv() {
stub.set(catalogGetTableMeta, __catalogGetTableMeta);
stub.set(catalogGetTableHashVgroup, __catalogGetTableHashVgroup);
stub.set(catalogGetTableDistVgInfo, __catalogGetTableDistVgInfo);
stub.set(catalogGetDBVgVersion, __catalogGetDBVgVersion);
stub.set(catalogGetDBVgInfo, __catalogGetDBVgInfo);
// {
// AddrAny any("libcatalog.so");
// std::map<std::string,void*> result;
@ -117,6 +181,7 @@ void initMetaDataEnv() {
}
void generateMetaData() {
generateInformationSchema(mockCatalogService.get());
generateTestT1(mockCatalogService.get());
generateTestST1(mockCatalogService.get());
mockCatalogService->showTables();

View File

@ -302,6 +302,13 @@ TEST_F(ParserTest, createUser) {
ASSERT_TRUE(run());
}
TEST_F(ParserTest, showUsers) {
setDatabase("root", "test");
bind("show users");
ASSERT_TRUE(run());
}
TEST_F(ParserTest, alterAccount) {
setDatabase("root", "test");
@ -319,6 +326,13 @@ TEST_F(ParserTest, createDnode) {
ASSERT_TRUE(run());
}
TEST_F(ParserTest, showDnodes) {
setDatabase("root", "test");
bind("show dnodes");
ASSERT_TRUE(run());
}
TEST_F(ParserTest, alterDnode) {
setDatabase("root", "test");
@ -433,6 +447,93 @@ TEST_F(ParserTest, createTable) {
ASSERT_TRUE(run());
}
TEST_F(ParserTest, showTables) {
setDatabase("root", "test");
bind("show tables");
ASSERT_TRUE(run());
bind("show test.tables");
ASSERT_TRUE(run());
bind("show tables like 'c%'");
ASSERT_TRUE(run());
bind("show test.tables like 'c%'");
ASSERT_TRUE(run());
}
TEST_F(ParserTest, showStables) {
setDatabase("root", "test");
bind("show stables");
ASSERT_TRUE(run());
bind("show test.stables");
ASSERT_TRUE(run());
bind("show stables like 'c%'");
ASSERT_TRUE(run());
bind("show test.stables like 'c%'");
ASSERT_TRUE(run());
}
TEST_F(ParserTest, showVgroups) {
setDatabase("root", "test");
bind("show vgroups");
ASSERT_TRUE(run());
bind("show test.vgroups");
ASSERT_TRUE(run());
}
TEST_F(ParserTest, showMnodes) {
setDatabase("root", "test");
bind("show mnodes");
ASSERT_TRUE(run());
}
TEST_F(ParserTest, showModules) {
setDatabase("root", "test");
bind("show modules");
ASSERT_TRUE(run());
}
TEST_F(ParserTest, showQnodes) {
setDatabase("root", "test");
bind("show qnodes");
ASSERT_TRUE(run());
}
TEST_F(ParserTest, showFunctions) {
setDatabase("root", "test");
bind("show functions");
ASSERT_TRUE(run());
}
TEST_F(ParserTest, showIndexes) {
setDatabase("root", "test");
bind("show indexes from t1");
ASSERT_TRUE(run());
bind("show indexes from t1 from test");
ASSERT_TRUE(run());
}
TEST_F(ParserTest, showStreams) {
setDatabase("root", "test");
bind("show streams");
ASSERT_TRUE(run());
}
TEST_F(ParserTest, createSmaIndex) {
setDatabase("root", "test");

View File

@ -131,7 +131,7 @@ private:
TEST_F(InsertTest, singleTableSingleRowTest) {
setDatabase("root", "test");
bind("insert into t1 values (now, 1, \"beijing\")");
bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)");
ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
dumpReslut();
checkReslut(1, 1);
@ -141,7 +141,7 @@ TEST_F(InsertTest, singleTableSingleRowTest) {
TEST_F(InsertTest, singleTableMultiRowTest) {
setDatabase("root", "test");
bind("insert into t1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)(now+2s, 3, 'guangzhou', 9, 10, 11)");
ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
dumpReslut();
checkReslut(1, 3);

View File

@ -123,6 +123,30 @@ static int32_t createChildLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelec
return code;
}
static EScanType getScanType(SLogicPlanContext* pCxt, SNodeList* pScanCols, STableMeta* pMeta) {
if (pCxt->pPlanCxt->topicQuery || pCxt->pPlanCxt->streamQuery) {
return SCAN_TYPE_STREAM;
}
if (NULL == pScanCols) {
// select count(*) from t
return SCAN_TYPE_TABLE;
}
if (TSDB_SYSTEM_TABLE == pMeta->tableType) {
return SCAN_TYPE_SYSTEM_TABLE;
}
SNode* pCol = NULL;
FOREACH(pCol, pScanCols) {
if (COLUMN_TYPE_COLUMN == ((SColumnNode*)pCol)->colType) {
return SCAN_TYPE_TABLE;
}
}
return SCAN_TYPE_TAG;
}
static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SRealTableNode* pRealTable, SLogicNode** pLogicNode) {
SScanLogicNode* pScan = (SScanLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_SCAN);
if (NULL == pScan) {
@ -131,7 +155,6 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
TSWAP(pScan->pMeta, pRealTable->pMeta, STableMeta*);
TSWAP(pScan->pVgroupList, pRealTable->pVgroupList, SVgroupsInfo*);
pScan->scanType = pCxt->pPlanCxt->topicQuery ? SCAN_TYPE_TOPIC : SCAN_TYPE_TABLE;
pScan->scanFlag = MAIN_SCAN;
pScan->scanRange = TSWINDOW_INITIALIZER;
pScan->tableName.type = TSDB_TABLE_NAME_T;
@ -149,6 +172,8 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
}
}
pScan->scanType = getScanType(pCxt, pCols, pScan->pMeta);
// set output
if (TSDB_CODE_SUCCESS == code && NULL != pCols) {
pScan->node.pTargets = nodesCloneList(pCols);

View File

@ -199,6 +199,8 @@ static SNodeptr createPrimaryKeyCol(SPhysiPlanContext* pCxt, uint64_t tableId) {
}
static int32_t createScanCols(SPhysiPlanContext* pCxt, SScanPhysiNode* pScanPhysiNode, SNodeList* pScanCols) {
if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == nodeType(pScanPhysiNode)
|| QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN == nodeType(pScanPhysiNode)) {
pScanPhysiNode->pScanCols = nodesMakeList();
CHECK_ALLOC(pScanPhysiNode->pScanCols, TSDB_CODE_OUT_OF_MEMORY);
CHECK_CODE_EXT(nodesListStrictAppend(pScanPhysiNode->pScanCols, createPrimaryKeyCol(pCxt, pScanPhysiNode->uid)));
@ -213,6 +215,11 @@ static int32_t createScanCols(SPhysiPlanContext* pCxt, SScanPhysiNode* pScanPhys
}
CHECK_CODE_EXT(nodesListStrictAppend(pScanPhysiNode->pScanCols, nodesCloneNode(pNode)));
}
} else {
pScanPhysiNode->pScanCols = nodesCloneList(pScanCols);
CHECK_ALLOC(pScanPhysiNode->pScanCols, TSDB_CODE_OUT_OF_MEMORY);
}
return TSDB_CODE_SUCCESS;
}
@ -256,9 +263,30 @@ static SPhysiNode* createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* p
pTableScan->scanRange = pScanLogicNode->scanRange;
vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode);
taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode);
pSubplan->execNodeStat.tableNum = pScanLogicNode->pVgroupList->vgroups[0].numOfTable;
tNameGetFullDbName(&pScanLogicNode->tableName, pSubplan->dbFName);
return (SPhysiNode*)pTableScan;
}
static SPhysiNode* createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode) {
SSystemTableScanPhysiNode* pScan = (SSystemTableScanPhysiNode*)makePhysiNode(pCxt, QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN);
CHECK_ALLOC(pScan, NULL);
CHECK_CODE(initScanPhysiNode(pCxt, pScanLogicNode, (SScanPhysiNode*)pScan), (SPhysiNode*)pScan);
if (0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_USER_TABLES)) {
vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode);
taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode);
} else {
for (int32_t i = 0; i < pScanLogicNode->pVgroupList->numOfVgroups; ++i) {
SQueryNodeAddr addr;
vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups + i, &addr);
taosArrayPush(pCxt->pExecNodeList, &addr);
}
}
pScan->mgmtEpSet = pCxt->pPlanCxt->mgmtEpSet;
tNameGetFullDbName(&pScanLogicNode->tableName, pSubplan->dbFName);
return (SPhysiNode*)pScan;
}
static SPhysiNode* createStreamScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode) {
SStreamScanPhysiNode* pTableScan = (SStreamScanPhysiNode*)makePhysiNode(pCxt, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN);
CHECK_ALLOC(pTableScan, NULL);
@ -272,7 +300,8 @@ static SPhysiNode* createScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubpl
return createTagScanPhysiNode(pCxt, pScanLogicNode);
case SCAN_TYPE_TABLE:
return createTableScanPhysiNode(pCxt, pSubplan, pScanLogicNode);
case SCAN_TYPE_TOPIC:
case SCAN_TYPE_SYSTEM_TABLE:
return createSystemTableScanPhysiNode(pCxt, pSubplan, pScanLogicNode);
case SCAN_TYPE_STREAM:
return createStreamScanPhysiNode(pCxt, pSubplan, pScanLogicNode);
default:
@ -836,7 +865,7 @@ static SQueryPlan* makeQueryPhysiPlan(SPhysiPlanContext* pCxt) {
static int32_t doBuildPhysiPlan(SPhysiPlanContext* pCxt, SSubLogicPlan* pLogicSubplan, SSubplan* pParent, SQueryPlan* pQueryPlan) {
SSubplan* pSubplan = createPhysiSubplan(pCxt, pLogicSubplan);
CHECK_ALLOC(pSubplan, DEAL_RES_ERROR);
CHECK_ALLOC(pSubplan, TSDB_CODE_OUT_OF_MEMORY);
CHECK_CODE_EXT(pushSubplan(pCxt, pSubplan, pLogicSubplan->level, pQueryPlan->pSubplans));
++(pQueryPlan->numOfSubplans);
if (NULL != pParent) {

View File

@ -44,8 +44,8 @@ typedef struct SStsInfo {
} SStsInfo;
static SLogicNode* stsMatchByNode(SLogicNode* pNode) {
if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode) && TSDB_SUPER_TABLE == ((SScanLogicNode*)pNode)->pMeta->tableType &&
SCAN_TYPE_TOPIC != ((SScanLogicNode*)pNode)->scanType) {
if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode) &&
NULL != ((SScanLogicNode*)pNode)->pVgroupList && ((SScanLogicNode*)pNode)->pVgroupList->numOfVgroups > 1) {
return pNode;
}
SNode* pChild;

View File

@ -185,6 +185,12 @@ TEST_F(PlannerTest, interval) {
ASSERT_TRUE(run());
}
TEST_F(PlannerTest, showTables) {
setDatabase("root", "test");
bind("show tables");
}
TEST_F(PlannerTest, createTopic) {
setDatabase("root", "test");

View File

@ -165,3 +165,43 @@ int32_t asyncSendMsgToServer(void* pTransporter, SEpSet* epSet, int64_t* pTransp
rpcSendRequest(pTransporter, epSet, &rpcMsg, pTransporterId);
return TSDB_CODE_SUCCESS;
}
char *jobTaskStatusStr(int32_t status) {
switch (status) {
case JOB_TASK_STATUS_NULL:
return "NULL";
case JOB_TASK_STATUS_NOT_START:
return "NOT_START";
case JOB_TASK_STATUS_EXECUTING:
return "EXECUTING";
case JOB_TASK_STATUS_PARTIAL_SUCCEED:
return "PARTIAL_SUCCEED";
case JOB_TASK_STATUS_SUCCEED:
return "SUCCEED";
case JOB_TASK_STATUS_FAILED:
return "FAILED";
case JOB_TASK_STATUS_CANCELLING:
return "CANCELLING";
case JOB_TASK_STATUS_CANCELLED:
return "CANCELLED";
case JOB_TASK_STATUS_DROPPING:
return "DROPPING";
default:
break;
}
return "UNKNOWN";
}
SSchema createSchema(uint8_t type, int32_t bytes, int32_t colId, const char* name) {
SSchema s = {0};
s.type = type;
s.bytes = bytes;
s.colId = colId;
tstrncpy(s.name, name, tListLen(s.name));
return s;
}

View File

@ -27,6 +27,7 @@ int32_t (*queryProcessMsgRsp[TDMT_MAX])(void *output, char *msg, int32_t msgSize
int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) {
memcpy(pOut->db, usedbRsp->db, TSDB_DB_FNAME_LEN);
pOut->dbId = usedbRsp->uid;
pOut->dbVgroup = calloc(1, sizeof(SDBVgInfo));
if (NULL == pOut->dbVgroup) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -34,6 +35,11 @@ int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) {
pOut->dbVgroup->vgVersion = usedbRsp->vgVersion;
pOut->dbVgroup->hashMethod = usedbRsp->hashMethod;
if (usedbRsp->vgNum <= 0) {
return TSDB_CODE_SUCCESS;
}
pOut->dbVgroup->vgHash =
taosHashInit(usedbRsp->vgNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
if (NULL == pOut->dbVgroup->vgHash) {
@ -166,7 +172,7 @@ static int32_t queryConvertTableMetaMsg(STableMetaRsp *pMetaMsg) {
}
if (pMetaMsg->tableType != TSDB_SUPER_TABLE && pMetaMsg->tableType != TSDB_CHILD_TABLE &&
pMetaMsg->tableType != TSDB_NORMAL_TABLE) {
pMetaMsg->tableType != TSDB_NORMAL_TABLE && pMetaMsg->tableType != TSDB_SYSTEM_TABLE) {
qError("invalid tableType[%d] in table meta rsp msg", pMetaMsg->tableType);
return TSDB_CODE_TSC_INVALID_VALUE;
}

View File

@ -60,23 +60,15 @@ enum {
QW_WRITE,
};
enum {
QW_EXIST_ACQUIRE = 1,
QW_EXIST_RET_ERR,
};
enum {
QW_NOT_EXIST_RET_ERR = 1,
QW_NOT_EXIST_ADD,
};
enum {
QW_ADD_RET_ERR = 1,
QW_ADD_ACQUIRE,
};
typedef struct SQWDebug {
int32_t lockDebug;
bool lockEnable;
bool statusEnable;
} SQWDebug;
typedef struct SQWMsg {
@ -92,14 +84,10 @@ typedef struct SQWHbInfo {
} SQWHbInfo;
typedef struct SQWPhaseInput {
int8_t taskStatus;
int8_t taskType;
int32_t code;
} SQWPhaseInput;
typedef struct SQWPhaseOutput {
int32_t rspCode;
bool needStop;
} SQWPhaseOutput;
@ -119,9 +107,10 @@ typedef struct SQWTaskCtx {
void *cancelConnection;
bool emptyRes;
bool multiExec;
int8_t queryContinue;
int8_t queryInQueue;
bool queryFetched;
bool queryEnd;
bool queryContinue;
bool queryInQueue;
int32_t rspCode;
int8_t events[QW_EVENT_MAX];
@ -198,7 +187,7 @@ typedef struct SQWorkerMgmt {
#define QW_SCH_TASK_WLOG(param, ...) qWarn("QW:%p SID:0x%"PRIx64",QID:0x%"PRIx64",TID:0x%"PRIx64" " param, mgmt, sId, qId, tId, __VA_ARGS__)
#define QW_SCH_TASK_DLOG(param, ...) qDebug("QW:%p SID:0x%"PRIx64",QID:0x%"PRIx64",TID:0x%"PRIx64" " param, mgmt, sId, qId, tId, __VA_ARGS__)
#define QW_LOCK_DEBUG(...) do { if (gQWDebug.lockDebug) { qDebug(__VA_ARGS__); } } while (0)
#define QW_LOCK_DEBUG(...) do { if (gQWDebug.lockEnable) { qDebug(__VA_ARGS__); } } while (0)
#define TD_RWLATCH_WRITE_FLAG_COPY 0x40000000

File diff suppressed because it is too large Load Diff

View File

@ -46,15 +46,18 @@ void qwFreeFetchRsp(void *msg) {
int32_t qwBuildAndSendQueryRsp(void *connection, int32_t code) {
SRpcMsg *pMsg = (SRpcMsg *)connection;
SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp));
pRsp->code = code;
SQueryTableRsp rsp = {.code = code};
int32_t contLen = tSerializeSQueryTableRsp(NULL, 0, &rsp);
void *msg = rpcMallocCont(contLen);
tSerializeSQueryTableRsp(msg, contLen, &rsp);
SRpcMsg rpcRsp = {
.msgType = TDMT_VND_QUERY_RSP,
.handle = pMsg->handle,
.ahandle = pMsg->ahandle,
.pCont = pRsp,
.contLen = sizeof(*pRsp),
.pCont = msg,
.contLen = contLen,
.code = code,
};
@ -260,7 +263,7 @@ int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, void *connection) {
QW_ERR_RET(code);
}
QW_SCH_TASK_DLOG("put task continue exec msg to query queue, vgId:%d", mgmt->nodeId);
QW_SCH_TASK_DLOG("query continue msg put to queue, vgId:%d", mgmt->nodeId);
return TSDB_CODE_SUCCESS;
}
@ -297,7 +300,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
QW_SCH_TASK_DLOG("processQuery start, node:%p, sql:%s", node, sql);
tfree(sql);
QW_RET(qwProcessQuery(QW_FPARAMS(), &qwMsg, msg->taskType));
QW_ERR_RET(qwProcessQuery(QW_FPARAMS(), &qwMsg, msg->taskType));
QW_SCH_TASK_DLOG("processQuery end, node:%p", node);

View File

@ -47,6 +47,8 @@ namespace {
#define qwtTestQueryQueueSize 1000000
#define qwtTestFetchQueueSize 1000000
bool qwtEnableLog = true;
int32_t qwtTestMaxExecTaskUsec = 2;
int32_t qwtTestReqMaxDelayUsec = 2;
@ -54,10 +56,10 @@ int64_t qwtTestQueryId = 0;
bool qwtTestEnableSleep = true;
bool qwtTestStop = false;
bool qwtTestDeadLoop = false;
int32_t qwtTestMTRunSec = 60;
int32_t qwtTestPrintNum = 100000;
int32_t qwtTestCaseIdx = 0;
int32_t qwtTestCaseNum = 4;
int32_t qwtTestMTRunSec = 2;
int32_t qwtTestPrintNum = 10000;
uint64_t qwtTestCaseIdx = 0;
uint64_t qwtTestCaseNum = 4;
bool qwtTestCaseFinished = false;
tsem_t qwtTestQuerySem;
tsem_t qwtTestFetchSem;
@ -95,11 +97,15 @@ SSchTasksStatusReq qwtstatusMsg = {0};
void qwtInitLogFile() {
if (!qwtEnableLog) {
return;
}
const char *defaultLogFileNamePrefix = "taosdlog";
const int32_t maxLogFileNum = 10;
tsAsyncLog = 0;
qDebugFlag = 159;
strcpy(tsLogDir, "/var/log/taos");
if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
printf("failed to open log file in directory:%s\n", tsLogDir);
@ -202,6 +208,9 @@ int32_t qwtPutReqToQueue(void *node, struct SRpcMsg *pMsg) {
return 0;
}
void qwtSendReqToDnode(void* pVnode, struct SEpSet* epSet, struct SRpcMsg* pReq) {
}
void qwtRpcSendResponse(const SRpcMsg *pRsp) {
@ -240,6 +249,7 @@ void qwtRpcSendResponse(const SRpcMsg *pRsp) {
if (0 == pRsp->code && 0 == rsp->completed) {
qwtBuildFetchReqMsg(&qwtfetchMsg, &qwtfetchRpc);
qwtPutReqToFetchQueue((void *)0x1, &qwtfetchRpc);
rpcFreeCont(rsp);
return;
}
@ -262,26 +272,15 @@ void qwtRpcSendResponse(const SRpcMsg *pRsp) {
return;
}
int32_t qwtCreateExecTask(void* tsdb, int32_t vgId, struct SSubplan* pPlan, qTaskInfo_t* pTaskInfo, DataSinkHandle* handle) {
int32_t idx = abs((++qwtTestCaseIdx) % qwtTestCaseNum);
int32_t qwtCreateExecTask(void* tsdb, int32_t vgId, uint64_t taskId, struct SSubplan* pPlan, qTaskInfo_t* pTaskInfo, DataSinkHandle* handle) {
qwtTestSinkBlockNum = 0;
qwtTestSinkMaxBlockNum = taosRand() % 100 + 1;
qwtTestSinkQueryEnd = false;
if (0 == idx) {
*pTaskInfo = (qTaskInfo_t)qwtTestCaseIdx;
*handle = (DataSinkHandle)qwtTestCaseIdx+1;
} else if (1 == idx) {
*pTaskInfo = NULL;
*handle = NULL;
} else if (2 == idx) {
*pTaskInfo = (qTaskInfo_t)qwtTestCaseIdx;
*handle = NULL;
} else if (3 == idx) {
*pTaskInfo = NULL;
*handle = (DataSinkHandle)qwtTestCaseIdx;
}
*pTaskInfo = (qTaskInfo_t)qwtTestCaseIdx+1;
*handle = (DataSinkHandle)qwtTestCaseIdx+2;
++qwtTestCaseIdx;
return 0;
}
@ -314,7 +313,7 @@ int32_t qwtExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t *useconds) {
if (endExec) {
*pRes = (SSDataBlock*)calloc(1, sizeof(SSDataBlock));
(*pRes)->info.rows = taosRand() % 1000;
(*pRes)->info.rows = taosRand() % 1000 + 1;
} else {
*pRes = NULL;
*useconds = taosRand() % 10;
@ -849,7 +848,6 @@ void *fetchQueueThread(void *param) {
}
#if 0
TEST(seqTest, normalCase) {
void *mgmt = NULL;
@ -880,7 +878,10 @@ TEST(seqTest, normalCase) {
stubSetPutDataBlock();
stubSetGetDataBlock();
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, mockPointer, qwtPutReqToQueue);
SMsgCb msgCb = {0};
msgCb.pWrapper = (struct SMgmtWrapper *)mockPointer;
msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qwtPutReqToQueue;
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
@ -919,7 +920,10 @@ TEST(seqTest, cancelFirst) {
stubSetStringToPlan();
stubSetRpcSendResponse();
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, mockPointer, qwtPutReqToQueue);
SMsgCb msgCb = {0};
msgCb.pWrapper = (struct SMgmtWrapper *)mockPointer;
msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qwtPutReqToQueue;
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc);
@ -965,7 +969,10 @@ TEST(seqTest, randCase) {
taosSeedRand(taosGetTimestampSec());
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, mockPointer, qwtPutReqToQueue);
SMsgCb msgCb = {0};
msgCb.pWrapper = (struct SMgmtWrapper *)mockPointer;
msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qwtPutReqToQueue;
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
int32_t t = 0;
@ -1024,21 +1031,34 @@ TEST(seqTest, multithreadRand) {
stubSetStringToPlan();
stubSetRpcSendResponse();
stubSetExecTask();
stubSetCreateExecTask();
stubSetAsyncKillTask();
stubSetDestroyTask();
stubSetDestroyDataSinker();
stubSetGetDataLength();
stubSetEndPut();
stubSetPutDataBlock();
stubSetGetDataBlock();
taosSeedRand(taosGetTimestampSec());
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, mockPointer, qwtPutReqToQueue);
SMsgCb msgCb = {0};
msgCb.pWrapper = (struct SMgmtWrapper *)mockPointer;
msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qwtPutReqToQueue;
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
TdThreadAttr thattr;
taosThreadAttrInit(&thattr);
TdThread t1,t2,t3,t4,t5;
TdThread t1,t2,t3,t4,t5,t6;
taosThreadCreate(&(t1), &thattr, queryThread, mgmt);
taosThreadCreate(&(t2), &thattr, readyThread, NULL);
taosThreadCreate(&(t3), &thattr, fetchThread, NULL);
taosThreadCreate(&(t4), &thattr, dropThread, NULL);
taosThreadCreate(&(t5), &thattr, statusThread, NULL);
taosThreadCreate(&(t6), &thattr, fetchQueueThread, mgmt);
while (true) {
if (qwtTestDeadLoop) {
@ -1052,11 +1072,18 @@ TEST(seqTest, multithreadRand) {
qwtTestStop = true;
taosSsleep(3);
qwtTestQueryQueueNum = 0;
qwtTestQueryQueueRIdx = 0;
qwtTestQueryQueueWIdx = 0;
qwtTestQueryQueueLock = 0;
qwtTestFetchQueueNum = 0;
qwtTestFetchQueueRIdx = 0;
qwtTestFetchQueueWIdx = 0;
qwtTestFetchQueueLock = 0;
qWorkerDestroy(&mgmt);
}
#endif
TEST(rcTest, shortExecshortDelay) {
void *mgmt = NULL;
int32_t code = 0;
@ -1313,7 +1340,6 @@ TEST(rcTest, shortExeclongDelay) {
}
#if 0
TEST(rcTest, dropTest) {
void *mgmt = NULL;
int32_t code = 0;
@ -1335,7 +1361,10 @@ TEST(rcTest, dropTest) {
taosSeedRand(taosGetTimestampSec());
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, mockPointer, qwtPutReqToQueue);
SMsgCb msgCb = {0};
msgCb.pWrapper = (struct SMgmtWrapper *)mockPointer;
msgCb.queueFps[QUERY_QUEUE] = (PutToQueueFp)qwtPutReqToQueue;
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
tsem_init(&qwtTestQuerySem, 0, 0);
@ -1345,7 +1374,7 @@ TEST(rcTest, dropTest) {
taosThreadAttrInit(&thattr);
TdThread t1,t2,t3,t4,t5;
taosThreadCreate(&(t1), &thattr, clientThread, mgmt);
taosThreadCreate(&(t1), &thattr, qwtclientThread, mgmt);
taosThreadCreate(&(t2), &thattr, queryQueueThread, mgmt);
taosThreadCreate(&(t3), &thattr, fetchQueueThread, mgmt);
@ -1363,7 +1392,6 @@ TEST(rcTest, dropTest) {
qWorkerDestroy(&mgmt);
}
#endif
int main(int argc, char** argv) {

View File

@ -28,8 +28,7 @@ extern "C" {
#define SCHEDULE_DEFAULT_MAX_JOB_NUM 1000
#define SCHEDULE_DEFAULT_MAX_TASK_NUM 1000
#define SCHEDULE_DEFAULT_MAX_NODE_TABLE_NUM 20 // unit is TSDB_TABLE_NUM_UNIT
#define SCHEDULE_DEFAULT_MAX_NODE_TABLE_NUM 200 // unit is TSDB_TABLE_NUM_UNIT
#define SCH_MAX_CANDIDATE_EP_NUM TSDB_MAX_REPLICA
@ -113,6 +112,7 @@ typedef struct SSchTask {
int32_t msgLen; // msg length
int8_t status; // task status
int32_t lastMsgType; // last sent msg type
int32_t tryTimes; // task already tried times
SQueryNodeAddr succeedAddr; // task executed success node address
int8_t candidateIdx; // current try condidation index
SArray *candidateAddrs; // condidate node addresses, element is SQueryNodeAddr
@ -136,6 +136,7 @@ typedef struct SSchJob {
uint64_t queryId;
SSchJobAttr attr;
int32_t levelNum;
int32_t taskNum;
void *transport;
SArray *nodeList; // qnode/vnode list, element is SQueryNodeAddr
SArray *levels; // Element is SQueryLevel, starting from 0. SArray<SSchLevel>
@ -154,7 +155,8 @@ typedef struct SSchJob {
int32_t remoteFetch;
SSchTask *fetchTask;
int32_t errCode;
void *res; //TODO free it or not
SArray *errList; // SArray<SQueryErrorInfo>
void *resData; //TODO free it or not
int32_t resNumOfRows;
const char *sql;
SQueryProfileSummary summary;
@ -168,27 +170,33 @@ extern SSchedulerMgmt schMgmt;
#define SCH_SET_TASK_LASTMSG_TYPE(_task, _type) do { if(_task) { atomic_store_32(&(_task)->lastMsgType, _type); } } while (0)
#define SCH_GET_TASK_LASTMSG_TYPE(_task) ((_task) ? atomic_load_32(&(_task)->lastMsgType) : -1)
#define SCH_IS_DATA_SRC_TASK(task) ((task)->plan->subplanType == SUBPLAN_TYPE_SCAN)
#define SCH_TASK_NEED_WAIT_ALL(task) ((task)->plan->subplanType == SUBPLAN_TYPE_MODIFY)
#define SCH_TASK_NO_NEED_DROP(task) ((task)->plan->subplanType == SUBPLAN_TYPE_MODIFY)
#define SCH_IS_DATA_SRC_QRY_TASK(task) ((task)->plan->subplanType == SUBPLAN_TYPE_SCAN)
#define SCH_IS_DATA_SRC_TASK(task) (((task)->plan->subplanType == SUBPLAN_TYPE_SCAN) || ((task)->plan->subplanType == SUBPLAN_TYPE_MODIFY))
#define SCH_IS_LEAF_TASK(_job, _task) (((_task)->level->level + 1) == (_job)->levelNum)
#define SCH_SET_TASK_STATUS(task, st) atomic_store_8(&(task)->status, st)
#define SCH_GET_TASK_STATUS(task) atomic_load_8(&(task)->status)
#define SCH_GET_TASK_STATUS_STR(task) jobTaskStatusStr(SCH_GET_TASK_STATUS(task))
#define SCH_SET_JOB_STATUS(job, st) atomic_store_8(&(job)->status, st)
#define SCH_GET_JOB_STATUS(job) atomic_load_8(&(job)->status)
#define SCH_GET_JOB_STATUS_STR(job) jobTaskStatusStr(SCH_GET_JOB_STATUS(job))
#define SCH_SET_JOB_NEED_FLOW_CTRL(_job) (_job)->attr.needFlowCtrl = true
#define SCH_JOB_NEED_FLOW_CTRL(_job) ((_job)->attr.needFlowCtrl)
#define SCH_TASK_NEED_FLOW_CTRL(_job, _task) (SCH_IS_DATA_SRC_TASK(_task) && SCH_JOB_NEED_FLOW_CTRL(_job) && SCH_IS_LEAF_TASK(_job, _task) && SCH_IS_LEVEL_UNFINISHED((_task)->level))
#define SCH_TASK_NEED_FLOW_CTRL(_job, _task) (SCH_IS_DATA_SRC_QRY_TASK(_task) && SCH_JOB_NEED_FLOW_CTRL(_job) && SCH_IS_LEAF_TASK(_job, _task) && SCH_IS_LEVEL_UNFINISHED((_task)->level))
#define SCH_SET_JOB_TYPE(_job, type) (_job)->attr.queryJob = ((type) != SUBPLAN_TYPE_MODIFY)
#define SCH_IS_QUERY_JOB(_job) ((_job)->attr.queryJob)
#define SCH_JOB_NEED_FETCH(_job) SCH_IS_QUERY_JOB(_job)
#define SCH_IS_LEAF_TASK(_job, _task) (((_task)->level->level + 1) == (_job)->levelNum)
#define SCH_IS_WAIT_ALL_JOB(_job) (!SCH_IS_QUERY_JOB(_job))
#define SCH_IS_NEED_DROP_JOB(_job) (SCH_IS_QUERY_JOB(_job))
#define SCH_IS_LEVEL_UNFINISHED(_level) ((_level)->taskLaunchedNum < (_level)->taskNum)
#define SCH_GET_CUR_EP(_addr) (&(_addr)->epSet.eps[(_addr)->epSet.inUse])
#define SCH_SWITCH_EPSET(_addr) ((_addr)->epSet.inUse = ((_addr)->epSet.inUse + 1) % (_addr)->epSet.numOfEps)
#define SCH_TASK_NUM_OF_EPS(_addr) ((_addr)->epSet.numOfEps)
#define SCH_JOB_ELOG(param, ...) qError("QID:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__)
#define SCH_JOB_DLOG(param, ...) qDebug("QID:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__)

View File

@ -98,6 +98,7 @@ static FORCE_INLINE bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus) {
int32_t schValidateTaskReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgType) {
int32_t lastMsgType = SCH_GET_TASK_LASTMSG_TYPE(pTask);
int32_t taskStatus = SCH_GET_TASK_STATUS(pTask);
switch (msgType) {
case TDMT_VND_CREATE_TABLE_RSP:
@ -112,17 +113,14 @@ int32_t schValidateTaskReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t m
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
if (SCH_GET_TASK_STATUS(pTask) != JOB_TASK_STATUS_EXECUTING &&
SCH_GET_TASK_STATUS(pTask) != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
SCH_TASK_ELOG("rsp msg conflicted with task status, status:%d, rspType:%s", SCH_GET_TASK_STATUS(pTask),
TMSG_INFO(msgType));
if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), TMSG_INFO(msgType));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
break;
default:
SCH_TASK_ELOG("unknown rsp msg, type:%s, status:%d", TMSG_INFO(msgType), SCH_GET_TASK_STATUS(pTask));
SCH_TASK_ELOG("unknown rsp msg, type:%s, status:%s", TMSG_INFO(msgType), jobTaskStatusStr(taskStatus));
SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
@ -185,7 +183,7 @@ int32_t schCheckAndUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
break;
default:
SCH_JOB_ELOG("invalid job status:%d", oriStatus);
SCH_JOB_ELOG("invalid job status:%s", jobTaskStatusStr(oriStatus));
SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
}
@ -193,7 +191,7 @@ int32_t schCheckAndUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
continue;
}
SCH_JOB_DLOG("job status updated from %d to %d", oriStatus, newStatus);
SCH_JOB_DLOG("job status updated from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus));
break;
}
@ -202,8 +200,7 @@ int32_t schCheckAndUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
_return:
SCH_JOB_ELOG("invalid job status update, from %d to %d", oriStatus, newStatus);
SCH_JOB_ELOG("invalid job status update, from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus));
SCH_ERR_RET(code);
}
@ -402,6 +399,8 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) {
SCH_TASK_ELOG("taosHashPut to planToTaks failed, taskIdx:%d", n);
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
++pJob->taskNum;
}
SCH_JOB_DLOG("level initialized, taskNum:%d", taskNum);
@ -459,7 +458,7 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) {
if (addNum <= 0) {
SCH_TASK_ELOG("no available execNode as candidates, nodeNum:%d", nodeNum);
return TSDB_CODE_QRY_INVALID_INPUT;
SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
/*
@ -493,7 +492,7 @@ int32_t schPushTaskToExecList(SSchJob *pJob, SSchTask *pTask) {
int32_t schMoveTaskToSuccList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
if (0 != taosHashRemove(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId))) {
SCH_TASK_WLOG("remove task from execTask list failed, may not exist, status:%d", SCH_GET_TASK_STATUS(pTask));
SCH_TASK_WLOG("remove task from execTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
} else {
SCH_TASK_DLOG("task removed from execTask list, numOfTasks:%d", taosHashGetSize(pJob->execTasks));
}
@ -502,8 +501,7 @@ int32_t schMoveTaskToSuccList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
if (0 != code) {
if (HASH_NODE_EXIST(code)) {
*moved = true;
SCH_TASK_ELOG("task already in succTask list, status:%d", SCH_GET_TASK_STATUS(pTask));
SCH_TASK_ELOG("task already in succTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
@ -522,7 +520,7 @@ int32_t schMoveTaskToFailList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
*moved = false;
if (0 != taosHashRemove(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId))) {
SCH_TASK_WLOG("remove task from execTask list failed, may not exist, status:%d", SCH_GET_TASK_STATUS(pTask));
SCH_TASK_WLOG("remove task from execTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
}
int32_t code = taosHashPut(pJob->failTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES);
@ -530,7 +528,7 @@ int32_t schMoveTaskToFailList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
if (HASH_NODE_EXIST(code)) {
*moved = true;
SCH_TASK_WLOG("task already in failTask list, status:%d", SCH_GET_TASK_STATUS(pTask));
SCH_TASK_WLOG("task already in failTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
@ -547,7 +545,7 @@ int32_t schMoveTaskToFailList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
int32_t schMoveTaskToExecList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
if (0 != taosHashRemove(pJob->succTasks, &pTask->taskId, sizeof(pTask->taskId))) {
SCH_TASK_WLOG("remove task from succTask list failed, may not exist, status:%d", SCH_GET_TASK_STATUS(pTask));
SCH_TASK_WLOG("remove task from succTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
}
int32_t code = taosHashPut(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES);
@ -555,7 +553,7 @@ int32_t schMoveTaskToExecList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
if (HASH_NODE_EXIST(code)) {
*moved = true;
SCH_TASK_ELOG("task already in execTask list, status:%d", SCH_GET_TASK_STATUS(pTask));
SCH_TASK_ELOG("task already in execTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
@ -571,23 +569,48 @@ int32_t schMoveTaskToExecList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
}
int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bool *needRetry) {
// TODO set retry or not based on task type/errCode/retry times/job status/available eps...
int8_t status = 0;
++pTask->tryTimes;
if (schJobNeedToStop(pJob, &status)) {
*needRetry = false;
SCH_TASK_DLOG("task no more retry cause of job status, job status:%s", jobTaskStatusStr(status));
return TSDB_CODE_SUCCESS;
}
if (pTask->tryTimes >= REQUEST_MAX_TRY_TIMES) {
*needRetry = false;
SCH_TASK_DLOG("task no more retry since reach max try times, tryTimes:%d", pTask->tryTimes);
return TSDB_CODE_SUCCESS;
}
if (!NEED_SCHEDULER_RETRY_ERROR(errCode)) {
*needRetry = false;
SCH_TASK_DLOG("task no more retry cause of errCode, errCode:%x - %s", errCode, tstrerror(errCode));
return TSDB_CODE_SUCCESS;
}
// TODO CHECK epList/condidateList
if (SCH_IS_DATA_SRC_TASK(pTask)) {
if (pTask->tryTimes >= SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode)) {
*needRetry = false;
SCH_TASK_DLOG("task no more retry since all ep tried, tryTimes:%d, epNum:%d", pTask->tryTimes, SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode));
return TSDB_CODE_SUCCESS;
}
} else {
int32_t candidateNum = taosArrayGetSize(pTask->candidateAddrs);
if ((pTask->candidateIdx + 1) >= candidateNum) {
*needRetry = false;
SCH_TASK_DLOG("task no more retry since all candiates tried, candidateIdx:%d, candidateNum:%d", pTask->candidateIdx, candidateNum);
return TSDB_CODE_SUCCESS;
}
++pTask->candidateIdx;
}
*needRetry = true;
SCH_TASK_DLOG("task need the %dth retry, errCode:%x - %s", pTask->tryTimes, errCode, tstrerror(errCode));
return TSDB_CODE_SUCCESS;
}
int32_t schHandleTaskRetry(SSchJob *pJob, SSchTask *pTask) {
@ -660,13 +683,41 @@ int32_t schUpdateHbConnection(SQueryNodeEpId *epId, SSchHbTrans *trans) {
return TSDB_CODE_SUCCESS;
}
void schUpdateJobErrCode(SSchJob *pJob, int32_t errCode) {
if (TSDB_CODE_SUCCESS == errCode) {
return;
}
int32_t origCode = atomic_load_32(&pJob->errCode);
if (TSDB_CODE_SUCCESS == origCode) {
if (origCode == atomic_val_compare_exchange_32(&pJob->errCode, origCode, errCode)) {
goto _return;
}
origCode = atomic_load_32(&pJob->errCode);
}
if (NEED_CLIENT_HANDLE_ERROR(origCode)) {
return;
}
if (NEED_CLIENT_HANDLE_ERROR(errCode)) {
atomic_store_32(&pJob->errCode, errCode);
goto _return;
}
return;
_return:
SCH_JOB_DLOG("job errCode updated to %x - %s", errCode, tstrerror(errCode));
}
int32_t schProcessOnJobFailureImpl(SSchJob *pJob, int32_t status, int32_t errCode) {
// if already FAILED, no more processing
SCH_ERR_RET(schCheckAndUpdateJobStatus(pJob, status));
if (errCode) {
atomic_store_32(&pJob->errCode, errCode);
}
schUpdateJobErrCode(pJob, errCode);
if (atomic_load_8(&pJob->userFetch) || pJob->attr.syncSchedule) {
tsem_post(&pJob->rspSem);
@ -720,8 +771,7 @@ int32_t schProcessOnTaskFailure(SSchJob *pJob, SSchTask *pTask, int32_t errCode)
int8_t status = 0;
if (schJobNeedToStop(pJob, &status)) {
SCH_TASK_DLOG("task failed not processed cause of job status, job status:%d", status);
SCH_TASK_DLOG("task failed not processed cause of job status, job status:%s", jobTaskStatusStr(status));
SCH_RET(atomic_load_32(&pJob->errCode));
}
@ -740,23 +790,23 @@ int32_t schProcessOnTaskFailure(SSchJob *pJob, SSchTask *pTask, int32_t errCode)
if (SCH_GET_TASK_STATUS(pTask) == JOB_TASK_STATUS_EXECUTING) {
SCH_ERR_JRET(schMoveTaskToFailList(pJob, pTask, &moved));
} else {
SCH_TASK_DLOG("task already done, no more failure process, status:%d", SCH_GET_TASK_STATUS(pTask));
return TSDB_CODE_SUCCESS;
SCH_TASK_ELOG("task not in executing list, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
}
SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_FAILED);
if (SCH_TASK_NEED_WAIT_ALL(pTask)) {
if (SCH_IS_WAIT_ALL_JOB(pJob)) {
SCH_LOCK(SCH_WRITE, &pTask->level->lock);
pTask->level->taskFailed++;
taskDone = pTask->level->taskSucceed + pTask->level->taskFailed;
SCH_UNLOCK(SCH_WRITE, &pTask->level->lock);
atomic_store_32(&pJob->errCode, errCode);
schUpdateJobErrCode(pJob, errCode);
if (taskDone < pTask->level->taskNum) {
SCH_TASK_DLOG("not all tasks done, done:%d, all:%d", taskDone, pTask->level->taskNum);
SCH_ERR_RET(errCode);
SCH_TASK_DLOG("need to wait other tasks, doneNum:%d, allNum:%d", taskDone, pTask->level->taskNum);
SCH_RET(errCode);
}
}
} else {
@ -775,7 +825,7 @@ int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) {
bool moved = false;
int32_t code = 0;
SCH_TASK_DLOG("taskOnSuccess, status:%d", SCH_GET_TASK_STATUS(pTask));
SCH_TASK_DLOG("taskOnSuccess, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
SCH_ERR_JRET(schMoveTaskToSuccList(pJob, pTask, &moved));
@ -788,8 +838,7 @@ int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) {
int32_t parentNum = pTask->parents ? (int32_t)taosArrayGetSize(pTask->parents) : 0;
if (parentNum == 0) {
int32_t taskDone = 0;
if (SCH_TASK_NEED_WAIT_ALL(pTask)) {
if (SCH_IS_WAIT_ALL_JOB(pJob)) {
SCH_LOCK(SCH_WRITE, &pTask->level->lock);
pTask->level->taskSucceed++;
taskDone = pTask->level->taskSucceed + pTask->level->taskFailed;
@ -860,11 +909,11 @@ int32_t schFetchFromRemote(SSchJob *pJob) {
return TSDB_CODE_SUCCESS;
}
void *res = atomic_load_ptr(&pJob->res);
if (res) {
void *resData = atomic_load_ptr(&pJob->resData);
if (resData) {
atomic_val_compare_exchange_32(&pJob->remoteFetch, 1, 0);
SCH_JOB_DLOG("res already fetched, res:%p", res);
SCH_JOB_DLOG("res already fetched, res:%p", resData);
return TSDB_CODE_SUCCESS;
}
@ -886,8 +935,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
int8_t status = 0;
if (schJobNeedToStop(pJob, &status)) {
SCH_TASK_ELOG("rsp not processed cause of job status, job status:%d", status);
SCH_TASK_ELOG("rsp not processed cause of job status, job status:%s, rspCode:0x%x", jobTaskStatusStr(status), rspCode);
SCH_RET(atomic_load_32(&pJob->errCode));
}
@ -895,41 +943,55 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
switch (msgType) {
case TDMT_VND_CREATE_TABLE_RSP: {
SVCreateTbBatchRsp batchRsp = {0};
if (msg) {
tDeserializeSVCreateTbBatchRsp(msg, msgSize, &batchRsp);
if (batchRsp.rspList) {
int32_t num = taosArrayGetSize(batchRsp.rspList);
for (int32_t i = 0; i < num; ++i) {
SVCreateTbRsp *rsp = taosArrayGet(batchRsp.rspList, i);
if (NEED_CLIENT_HANDLE_ERROR(rsp->code)) {
taosArrayDestroy(batchRsp.rspList);
SCH_ERR_JRET(rsp->code);
}
}
taosArrayDestroy(batchRsp.rspList);
}
}
SCH_ERR_JRET(rspCode);
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
}
case TDMT_VND_SUBMIT_RSP: {
#if 0 // TODO OPEN THIS
SShellSubmitRspMsg *rsp = (SShellSubmitRspMsg *)msg;
if (rspCode != TSDB_CODE_SUCCESS || NULL == msg || rsp->code != TSDB_CODE_SUCCESS) {
SCH_ERR_RET(schProcessOnTaskFailure(pJob, pTask, rspCode));
if (msg) {
SSubmitRsp *rsp = (SSubmitRsp *)msg;
SCH_ERR_JRET(rsp->code);
}
pJob->resNumOfRows += rsp->affectedRows;
#else
SCH_ERR_JRET(rspCode);
SSubmitRsp *rsp = (SSubmitRsp *)msg;
if (rsp) {
pJob->resNumOfRows += rsp->affectedRows;
}
#endif
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
}
case TDMT_VND_QUERY_RSP: {
SQueryTableRsp *rsp = (SQueryTableRsp *)msg;
SQueryTableRsp rsp = {0};
if (msg) {
tDeserializeSQueryTableRsp(msg, msgSize, &rsp);
SCH_ERR_JRET(rsp.code);
}
SCH_ERR_JRET(rspCode);
if (NULL == msg) {
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
}
SCH_ERR_JRET(rsp->code);
SCH_ERR_JRET(schBuildAndSendMsg(pJob, pTask, NULL, TDMT_VND_RES_READY));
@ -943,7 +1005,6 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
}
SCH_ERR_JRET(rsp->code);
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
break;
@ -956,13 +1017,13 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
}
if (pJob->res) {
SCH_TASK_ELOG("got fetch rsp while res already exists, res:%p", pJob->res);
if (pJob->resData) {
SCH_TASK_ELOG("got fetch rsp while res already exists, res:%p", pJob->resData);
tfree(rsp);
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
}
atomic_store_ptr(&pJob->res, rsp);
atomic_store_ptr(&pJob->resData, rsp);
atomic_add_fetch_32(&pJob->resNumOfRows, htonl(rsp->numOfRows));
if (rsp->completed) {
@ -981,7 +1042,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
break;
}
default:
SCH_TASK_ELOG("unknown rsp msg, type:%d, status:%d", msgType, SCH_GET_TASK_STATUS(pTask));
SCH_TASK_ELOG("unknown rsp msg, type:%d, status:%s", msgType, SCH_GET_TASK_STATUS_STR(pTask));
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
}
@ -1233,6 +1294,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
memcpy(pMsg->msg, pJob->sql, len);
memcpy(pMsg->msg + len, pTask->msg, pTask->msgLen);
break;
}
@ -1358,11 +1420,17 @@ int32_t schLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) {
atomic_add_fetch_32(&pTask->level->taskLaunchedNum, 1);
if (schJobNeedToStop(pJob, &status)) {
SCH_TASK_DLOG("no need to launch task cause of job status, job status:%d", status);
SCH_TASK_DLOG("no need to launch task cause of job status, job status:%s", jobTaskStatusStr(status));
SCH_RET(atomic_load_32(&pJob->errCode));
}
// NOTE: race condition: the task should be put into the hash table before send msg to server
if (SCH_GET_TASK_STATUS(pTask) != JOB_TASK_STATUS_EXECUTING) {
SCH_ERR_RET(schPushTaskToExecList(pJob, pTask));
SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_EXECUTING);
}
SSubplan *plan = pTask->plan;
if (NULL == pTask->msg) { // TODO add more detailed reason for failure
@ -1378,12 +1446,6 @@ int32_t schLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) {
SCH_ERR_RET(schSetTaskCandidateAddrs(pJob, pTask));
// NOTE: race condition: the task should be put into the hash table before send msg to server
if (SCH_GET_TASK_STATUS(pTask) != JOB_TASK_STATUS_EXECUTING) {
SCH_ERR_RET(schPushTaskToExecList(pJob, pTask));
SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_EXECUTING);
}
if (SCH_IS_QUERY_JOB(pJob)) {
SCH_ERR_RET(schEnsureHbConnection(pJob, pTask));
}
@ -1439,14 +1501,14 @@ int32_t schLaunchJob(SSchJob *pJob) {
void schDropTaskOnExecutedNode(SSchJob *pJob, SSchTask *pTask) {
if (NULL == pTask->execAddrs) {
SCH_TASK_DLOG("no exec address, status:%d", SCH_GET_TASK_STATUS(pTask));
SCH_TASK_DLOG("no exec address, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
return;
}
int32_t size = (int32_t)taosArrayGetSize(pTask->execAddrs);
if (size <= 0) {
SCH_TASK_DLOG("task has no exec address, no need to drop it, status:%d", SCH_GET_TASK_STATUS(pTask));
SCH_TASK_DLOG("task has no exec address, no need to drop it, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
return;
}
@ -1461,13 +1523,15 @@ void schDropTaskOnExecutedNode(SSchJob *pJob, SSchTask *pTask) {
}
void schDropTaskInHashList(SSchJob *pJob, SHashObj *list) {
if (!SCH_IS_NEED_DROP_JOB(pJob)) {
return;
}
void *pIter = taosHashIterate(list, NULL);
while (pIter) {
SSchTask *pTask = *(SSchTask **)pIter;
if (!SCH_TASK_NO_NEED_DROP(pTask)) {
schDropTaskOnExecutedNode(pJob, pTask);
}
pIter = taosHashIterate(list, pIter);
}
@ -1524,8 +1588,7 @@ void schFreeJobImpl(void *job) {
taosArrayDestroy(pJob->levels);
taosArrayDestroy(pJob->nodeList);
tfree(pJob->res);
tfree(pJob->resData);
tfree(pJob);
qDebug("QID:0x%" PRIx64 " job freed, refId:%" PRIx64 ", pointer:%p", queryId, refId, pJob);
@ -1595,11 +1658,11 @@ static int32_t schExecJobImpl(void *transport, SArray *pNodeList, SQueryPlan *pD
*job = pJob->refId;
if (syncSchedule) {
SCH_JOB_DLOG("will wait for rsp now, job status:%d", SCH_GET_JOB_STATUS(pJob));
SCH_JOB_DLOG("will wait for rsp now, job status:%s", SCH_GET_JOB_STATUS_STR(pJob));
tsem_wait(&pJob->rspSem);
}
SCH_JOB_DLOG("job exec done, job status:%d", SCH_GET_JOB_STATUS(pJob));
SCH_JOB_DLOG("job exec done, job status:%s", SCH_GET_JOB_STATUS_STR(pJob));
schReleaseJob(pJob->refId);
@ -1662,8 +1725,10 @@ int32_t schedulerExecJob(void *transport, SArray *nodeList, SQueryPlan *pDag, in
SCH_ERR_RET(schExecJobImpl(transport, nodeList, pDag, pJob, sql, true));
SSchJob *job = schAcquireJob(*pJob);
pRes->code = atomic_load_32(&job->errCode);
pRes->numOfRows = job->resNumOfRows;
schReleaseJob(*pJob);
return TSDB_CODE_SUCCESS;
@ -1816,13 +1881,13 @@ int32_t schedulerFetchRows(int64_t job, void **pData) {
int8_t status = SCH_GET_JOB_STATUS(pJob);
if (status == JOB_TASK_STATUS_DROPPING) {
SCH_JOB_ELOG("job is dropping, status:%d", status);
SCH_JOB_ELOG("job is dropping, status:%s", jobTaskStatusStr(status));
schReleaseJob(job);
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
if (!SCH_JOB_NEED_FETCH(pJob)) {
SCH_JOB_ELOG("no need to fetch data, status:%d", SCH_GET_JOB_STATUS(pJob));
SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob));
schReleaseJob(job);
SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
@ -1834,10 +1899,10 @@ int32_t schedulerFetchRows(int64_t job, void **pData) {
}
if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) {
SCH_JOB_ELOG("job failed or dropping, status:%d", status);
SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status));
SCH_ERR_JRET(atomic_load_32(&pJob->errCode));
} else if (status == JOB_TASK_STATUS_SUCCEED) {
SCH_JOB_DLOG("job already succeed, status:%d", status);
SCH_JOB_DLOG("job already succeed, status:%s", jobTaskStatusStr(status));
goto _return;
} else if (status == JOB_TASK_STATUS_PARTIAL_SUCCEED) {
SCH_ERR_JRET(schFetchFromRemote(pJob));
@ -1848,17 +1913,17 @@ int32_t schedulerFetchRows(int64_t job, void **pData) {
status = SCH_GET_JOB_STATUS(pJob);
if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) {
SCH_JOB_ELOG("job failed or dropping, status:%d", status);
SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status));
SCH_ERR_JRET(atomic_load_32(&pJob->errCode));
}
if (pJob->res && ((SRetrieveTableRsp *)pJob->res)->completed) {
if (pJob->resData && ((SRetrieveTableRsp *)pJob->resData)->completed) {
SCH_ERR_JRET(schCheckAndUpdateJobStatus(pJob, JOB_TASK_STATUS_SUCCEED));
}
while (true) {
*pData = atomic_load_ptr(&pJob->res);
if (*pData != atomic_val_compare_exchange_ptr(&pJob->res, *pData, NULL)) {
*pData = atomic_load_ptr(&pJob->resData);
if (*pData != atomic_val_compare_exchange_ptr(&pJob->resData, *pData, NULL)) {
continue;
}

View File

@ -333,7 +333,10 @@ TEST_F(TransEnv, cliPersistHandle) {
SRpcMsg resp = {0};
void * handle = NULL;
for (int i = 0; i < 10; i++) {
SRpcMsg req = {.handle = resp.handle, .persistHandle = 1};
SRpcMsg req = {0};
req.handle = resp.handle;
req.persistHandle = 1;
req.msgType = 1;
req.pCont = rpcMallocCont(10);
req.contLen = 10;
@ -366,7 +369,9 @@ TEST_F(TransEnv, srvReleaseHandle) {
// tr->Restart(processReleaseHandleCb);
void *handle = NULL;
for (int i = 0; i < 1; i++) {
SRpcMsg req = {.handle = resp.handle, .persistHandle = 1};
SRpcMsg req = {0};
req.handle = resp.handle;
req.persistHandle = 1;
req.msgType = 1;
req.pCont = rpcMallocCont(10);
req.contLen = 10;
@ -379,7 +384,9 @@ TEST_F(TransEnv, srvReleaseHandle) {
TEST_F(TransEnv, cliReleaseHandleExcept) {
SRpcMsg resp = {0};
for (int i = 0; i < 3; i++) {
SRpcMsg req = {.handle = resp.handle, .persistHandle = 1};
SRpcMsg req = {0};
req.handle = resp.handle;
req.persistHandle = 1;
req.msgType = 1;
req.pCont = rpcMallocCont(10);
req.contLen = 10;
@ -411,7 +418,8 @@ TEST_F(TransEnv, srvPersistHandleExcept) {
// tr->SetCliPersistFp(cliPersistHandle);
SRpcMsg resp = {0};
for (int i = 0; i < 5; i++) {
SRpcMsg req = {.handle = resp.handle};
SRpcMsg req = {0};
req.handle = resp.handle;
req.msgType = 1;
req.pCont = rpcMallocCont(10);
req.contLen = 10;
@ -429,7 +437,8 @@ TEST_F(TransEnv, cliPersistHandleExcept) {
tr->SetSrvContinueSend(processContinueSend);
SRpcMsg resp = {0};
for (int i = 0; i < 5; i++) {
SRpcMsg req = {.handle = resp.handle};
SRpcMsg req = {0};
req.handle = resp.handle;
req.msgType = 1;
req.pCont = rpcMallocCont(10);
req.contLen = 10;
@ -451,7 +460,9 @@ TEST_F(TransEnv, queryExcept) {
tr->SetSrvContinueSend(processRegisterFailure);
SRpcMsg resp = {0};
for (int i = 0; i < 5; i++) {
SRpcMsg req = {.handle = resp.handle, .persistHandle = 1};
SRpcMsg req = {0};
req.handle = resp.handle;
req.persistHandle = 1;
req.msgType = 1;
req.pCont = rpcMallocCont(10);
req.contLen = 10;
@ -467,7 +478,8 @@ TEST_F(TransEnv, queryExcept) {
TEST_F(TransEnv, noResp) {
SRpcMsg resp = {0};
for (int i = 0; i < 5; i++) {
SRpcMsg req = {.noResp = 1};
SRpcMsg req = {0};
req.noResp = 1;
req.msgType = 1;
req.pCont = rpcMallocCont(10);
req.contLen = 10;

View File

@ -305,6 +305,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, "Database write operat
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_SYNCING, "Database is syncing")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_TSDB_STATE, "Invalid tsdb state")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_TB_NOT_EXIST, "Table not exists")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_HASH_MISMATCH, "Hash value mismatch")
// tsdb
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, "Invalid table ID")
@ -330,6 +331,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_AVAIL_DISK, "No available disk")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_MESSED_MSG, "TSDB messed message")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVLD_TAG_VAL, "TSDB invalid tag value")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_CACHE_LAST_ROW, "TSDB no cache last row data")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECREATED, "Table re-created")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_SMA_INDEX_IN_META, "No sma index in meta")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR, "TDB env open error")
@ -415,10 +417,12 @@ TAOS_DEFINE_ERROR(TSDB_CODE_CTG_MEM_ERROR, "catalog memory error"
TAOS_DEFINE_ERROR(TSDB_CODE_CTG_SYS_ERROR, "catalog system error")
TAOS_DEFINE_ERROR(TSDB_CODE_CTG_DB_DROPPED, "Database is dropped")
TAOS_DEFINE_ERROR(TSDB_CODE_CTG_OUT_OF_SERVICE, "catalog is out of service")
TAOS_DEFINE_ERROR(TSDB_CODE_CTG_VG_META_MISMATCH, "table meta and vgroup mismatch")
//scheduler
TAOS_DEFINE_ERROR(TSDB_CODE_SCH_STATUS_ERROR, "scheduler status error")
TAOS_DEFINE_ERROR(TSDB_CODE_SCH_INTERNAL_ERROR, "scheduler internal error")
TAOS_DEFINE_ERROR(TSDB_CODE_QW_MSG_ERROR, "Invalid msg order")
#ifdef TAOS_ERROR_C
};

View File

@ -305,7 +305,7 @@ int32_t taosHashGetSize(const SHashObj *pHashObj) {
return (int32_t)atomic_load_64((int64_t*)&pHashObj->size);
}
int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size) {
int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t size) {
if (pHashObj == NULL || key == NULL || keyLen == 0) {
return -1;
}

View File

@ -6,7 +6,7 @@ sql connect
print =============== create database
sql create database d1 vgroups 2
sql show databases
if $rows != 1 then
if $rows != 2 then
return -1
endi
@ -39,17 +39,18 @@ endi
print =============== drop database
sql drop database d1
sql show databases
if $rows != 0 then
return -1
endi
# todo release
#sql show databases
#if $rows != 1 then
# return -1
#endi
print =============== more databases
sql create database d2 vgroups 2
sql create database d3 vgroups 3
sql create database d4 vgroups 4
sql show databases
if $rows != 3 then
if $rows != 4 then
return -1
endi
@ -111,7 +112,7 @@ print =============== drop database
sql drop database d2
sql drop database d3
sql show databases
if $rows != 1 then
if $rows != 2 then
return -1
endi
@ -154,7 +155,7 @@ system sh/exec.sh -n dnode1 -s start
print =============== show databases
sql show databases
if $rows != 1 then
if $rows != 2 then
return -1
endi

View File

@ -20,7 +20,7 @@ sql show databases
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $rows != 1 then
if $rows != 2 then
return -1
endi
if $data00 != $db then
@ -52,16 +52,17 @@ print =============== step2
sql_error create database $db
sql create database if not exists $db
sql show databases
if $rows != 1 then
if $rows != 2 then
return -1
endi
print =============== step3
sql drop database $db
sql show databases
if $rows != 0 then
return -1
endi
# todo release
#sql show databases
#if $rows != 1 then
# return -1
#endi
print =============== step4
sql_error drop database $db

View File

@ -16,16 +16,22 @@ create1:
return -1
endi
# todo remove
sql create database useless_db
sql show dnodes
if $data4_2 != ready then
goto create1
endi
# todo remove
sql drop database useless_db
print ========== stop dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGKILL
print =============== create database
sql_error create database d1 vgroups 4
#print =============== create database
#sql_error create database d1 vgroups 4
print ========== start dnode2
system sh/exec.sh -n dnode2 -s start
@ -42,7 +48,7 @@ re-create1:
sql create database d1 vgroups 2 -x re-create1
sql show databases
if $rows != 1 then
if $rows != 2 then
return -1
endi
@ -66,6 +72,7 @@ sql_error drop database d1
print ========== start dnode2
system sh/exec.sh -n dnode2 -s start
sleep 1000
print =============== re-create database
$x = 0
@ -79,7 +86,7 @@ re-create2:
sql create database d1 vgroups 5 -x re-create2
sql show databases
if $rows != 1 then
if $rows != 2 then
return -1
endi

View File

@ -5,6 +5,9 @@ system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sql connect
# todo remove
sql create database useless_db
print =============== show dnodes
sql show dnodes;
if $rows != 1 then
@ -15,9 +18,10 @@ if $data00 != 1 then
return -1
endi
if $data02 != 0 then
return -1
endi
# check 'vnodes' feild ?
#if $data02 != 0 then
# return -1
#endi
sql show mnodes;
if $rows != 1 then
@ -49,9 +53,10 @@ if $data10 != 2 then
return -1
endi
if $data02 != 0 then
return -1
endi
# check 'vnodes' feild ?
#if $data02 != 0 then
# return -1
#endi
if $data12 != 0 then
return -1
@ -78,12 +83,15 @@ if $data02 != master then
return -1
endi
# todo remove
sql drop database useless_db
print =============== create database
sql create database d1 vgroups 4;
sql create database d2;
sql show databases
if $rows != 2 then
if $rows != 3 then
return -1
endi

View File

@ -7,7 +7,7 @@ sql connect
print =============== create database
sql create database d0
sql show databases
if $rows != 1 then
if $rows != 2 then
return -1
endi
@ -51,6 +51,7 @@ sql insert into ct3 values('2021-01-01 00:00:00.000', 10, 2.0, 3.0)
#===================================================================
print =============== query data from child table
sql select * from ct1
if $rows != 7 then
return -1
endi

View File

@ -6,7 +6,7 @@ sql connect
print =============== create database
sql create database d1
sql show databases
if $rows != 1 then
if $rows != 2 then
return -1
endi
@ -186,9 +186,8 @@ if $rows != 21 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
#system sh/exec.sh -n dnode1 -s start
sleep 2000
print =============== query data

View File

@ -6,6 +6,9 @@ system sh/exec.sh -n dnode1 -s start
sleep 500
sql connect
# todo remove
sql create database useless_db
$loop_cnt = 0
check_dnode_ready:
$loop_cnt = $loop_cnt + 1
@ -23,6 +26,9 @@ if $data04 != ready then
goto check_dnode_ready
endi
# todo remove
sql drop database useless_db
#root@trd02 /data2/dnode $ tmq_demo --help
#Used to tmq_demo
# -c Configuration directory, default is
@ -47,7 +53,7 @@ print result-> $system_content
sql show databases
print ===> $rows $data00 $data01 $data02 $data03
if $rows != 1 then
if $rows != 2 then
return -1
endi
if $data00 != tmqdb then

View File

@ -3,6 +3,9 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
# todo remove
sql create database useless_db
print =============== show users
sql show users
if $rows != 1 then
@ -71,4 +74,7 @@ print $data10 $data11 $data22
print $data20 $data11 $data22
print $data30 $data31 $data32
# todo remove
sql drop database useless_db
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -306,7 +306,7 @@ int32_t init_env() {
}
//const char* sql = "select * from tu1";
sprintf(sqlStr, "create topic test_stb_topic_1 as select * from %s", g_stConfInfo.stbName);
sprintf(sqlStr, "create topic test_stb_topic_1 as select * from %s0", g_stConfInfo.stbName);
/*pRes = tmq_create_topic(pConn, "test_stb_topic_1", sqlStr, strlen(sqlStr));*/
pRes = taos_query(pConn, sqlStr);
if (taos_errno(pRes) != 0) {