Merge pull request #1178 from taosdata/feature/liaohj

Feature/liaohj
This commit is contained in:
slguan 2020-02-03 21:15:24 +08:00 committed by GitHub
commit dcdb04fbaa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
59 changed files with 9204 additions and 6564 deletions

View File

@ -211,6 +211,12 @@
# whether to enable HTTP compression transmission
# httpEnableCompress 0
# the delayed time for launching each continuous query. 10% of the whole computing time window by default.
# streamCompDelayRatio 0.1
# the max allowed delayed time for launching continuous query. 20ms by default
# tsMaxStreamComputDelay 20000
# whether the telegraf table name contains the number of tags and the number of fields
# telegrafUseFieldNum 0

View File

@ -27,7 +27,7 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql);
void tscGetQualifiedTSList(SSqlObj* pSql, SJoinSubquerySupporter* p1, SJoinSubquerySupporter* p2, int32_t* num);
void tscSetupOutputColumnIndex(SSqlObj* pSql);
int32_t tscLaunchSecondSubquery(SSqlObj* pSql);
int32_t tscLaunchSecondPhaseSubqueries(SSqlObj* pSql);
void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code);
SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, int32_t index);
@ -121,7 +121,7 @@ STSBuf* tsBufCreate(bool autoDelete);
STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete);
STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t tsOrder);
void tsBufDestory(STSBuf* pTSBuf);
void* tsBufDestory(STSBuf* pTSBuf);
void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData, int32_t len);
int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeIdx);

View File

@ -21,17 +21,78 @@ extern "C" {
#endif
#include "taos.h"
#include "taosmsg.h"
#include "tsqldef.h"
#include "ttypes.h"
#include "taosmsg.h"
enum _sql_cmd {
TSDB_SQL_SELECT = 1,
TSDB_SQL_FETCH,
TSDB_SQL_INSERT,
TSDB_SQL_MGMT, // the SQL below is for mgmt node
TSDB_SQL_CREATE_DB,
TSDB_SQL_CREATE_TABLE,
TSDB_SQL_DROP_DB,
TSDB_SQL_DROP_TABLE,
TSDB_SQL_CREATE_ACCT,
TSDB_SQL_CREATE_USER, //10
TSDB_SQL_DROP_ACCT,
TSDB_SQL_DROP_USER,
TSDB_SQL_ALTER_USER,
TSDB_SQL_ALTER_ACCT,
TSDB_SQL_ALTER_TABLE,
TSDB_SQL_ALTER_DB,
TSDB_SQL_CREATE_MNODE,
TSDB_SQL_DROP_MNODE,
TSDB_SQL_CREATE_DNODE,
TSDB_SQL_DROP_DNODE, // 20
TSDB_SQL_CFG_DNODE,
TSDB_SQL_CFG_MNODE,
TSDB_SQL_SHOW,
TSDB_SQL_RETRIEVE,
TSDB_SQL_KILL_QUERY,
TSDB_SQL_KILL_STREAM,
TSDB_SQL_KILL_CONNECTION,
TSDB_SQL_READ, // SQL below is for read operation
TSDB_SQL_CONNECT,
TSDB_SQL_USE_DB, // 30
TSDB_SQL_META,
TSDB_SQL_METRIC,
TSDB_SQL_MULTI_META,
TSDB_SQL_HB,
TSDB_SQL_LOCAL, // SQL below for client local
TSDB_SQL_DESCRIBE_TABLE,
TSDB_SQL_RETRIEVE_METRIC,
TSDB_SQL_METRIC_JOIN_RETRIEVE,
TSDB_SQL_RETRIEVE_TAGS,
/*
* build empty result instead of accessing dnode to fetch result
* reset the client cache
*/
TSDB_SQL_RETRIEVE_EMPTY_RESULT, //40
TSDB_SQL_RESET_CACHE,
TSDB_SQL_SERV_STATUS,
TSDB_SQL_CURRENT_DB,
TSDB_SQL_SERV_VERSION,
TSDB_SQL_CLI_VERSION,
TSDB_SQL_CURRENT_USER,
TSDB_SQL_CFG_LOCAL,
TSDB_SQL_MAX //48
};
#define MAX_TOKEN_LEN 30
// token type
enum {
TSQL_NODE_TYPE_EXPR = 0x1,
TSQL_NODE_TYPE_ID = 0x2,
TSQL_NODE_TYPE_VALUE = 0x4,
TSQL_NODE_TYPE_EXPR = 0x1,
TSQL_NODE_TYPE_ID = 0x2,
TSQL_NODE_TYPE_VALUE = 0x4,
};
extern char tTokenTypeSwitcher[13];
@ -72,72 +133,12 @@ typedef struct tFieldList {
TAOS_FIELD *p;
} tFieldList;
// sql operation type
// create table operation type
enum TSQL_TYPE {
TSQL_CREATE_NORMAL_METER = 0x01,
TSQL_CREATE_NORMAL_METRIC = 0x02,
TSQL_CREATE_METER_FROM_METRIC = 0x04,
TSQL_CREATE_STREAM = 0x08,
TSQL_QUERY_METER = 0x10,
TSQL_INSERT = 0x20,
DROP_DNODE = 0x40,
DROP_DATABASE = 0x41,
DROP_TABLE = 0x42,
DROP_USER = 0x43,
DROP_ACCOUNT = 0x44,
USE_DATABASE = 0x50,
// show operation
SHOW_DATABASES = 0x60,
SHOW_TABLES = 0x61,
SHOW_STABLES = 0x62,
SHOW_MNODES = 0x63,
SHOW_DNODES = 0x64,
SHOW_ACCOUNTS = 0x65,
SHOW_USERS = 0x66,
SHOW_VGROUPS = 0x67,
SHOW_QUERIES = 0x68,
SHOW_STREAMS = 0x69,
SHOW_CONFIGS = 0x6a,
SHOW_SCORES = 0x6b,
SHOW_MODULES = 0x6c,
SHOW_CONNECTIONS = 0x6d,
SHOW_GRANTS = 0x6e,
SHOW_VNODES = 0x6f,
// create dnode
CREATE_DNODE = 0x80,
CREATE_DATABASE = 0x81,
CREATE_USER = 0x82,
CREATE_ACCOUNT = 0x83,
DESCRIBE_TABLE = 0x90,
ALTER_USER_PASSWD = 0xA0,
ALTER_USER_PRIVILEGES = 0xA1,
ALTER_DNODE = 0xA2,
ALTER_LOCAL = 0xA3,
ALTER_DATABASE = 0xA4,
ALTER_ACCT = 0xA5,
// reset operation
RESET_QUERY_CACHE = 0xB0,
// alter tags
ALTER_TABLE_TAGS_ADD = 0xC0,
ALTER_TABLE_TAGS_DROP = 0xC1,
ALTER_TABLE_TAGS_CHG = 0xC2,
ALTER_TABLE_TAGS_SET = 0xC4,
// alter table column
ALTER_TABLE_ADD_COLUMN = 0xD0,
ALTER_TABLE_DROP_COLUMN = 0xD1,
KILL_QUERY = 0xD2,
KILL_STREAM = 0xD3,
KILL_CONNECTION = 0xD4,
TSQL_CREATE_TABLE = 0x1,
TSQL_CREATE_STABLE = 0x2,
TSQL_CREATE_TABLE_FROM_STABLE = 0x3,
TSQL_CREATE_STREAM = 0x4,
};
typedef struct SQuerySQL {
@ -157,33 +158,31 @@ typedef struct SQuerySQL {
typedef struct SCreateTableSQL {
struct SSQLToken name; // meter name, create table [meterName] xxx
bool existCheck;
int8_t type; // create normal table/from super table/ stream
struct {
tFieldList *pTagColumns; // for normal table, pTagColumns = NULL;
tFieldList *pColumns;
} colInfo;
struct {
SSQLToken metricName; // metric name, for using clause
SSQLToken stableName; // super table name, for using clause
tVariantList *pTagVals; // create by using metric, tag value
STagData tagdata;
} usingInfo;
SQuerySQL *pSelect;
} SCreateTableSQL;
typedef struct SAlterTableSQL {
SSQLToken name;
int16_t type;
STagData tagData;
tFieldList * pAddColumns;
SSQLToken dropTagToken;
tVariantList *varList; // set t=val or: change src dst
} SAlterTableSQL;
typedef struct SInsertSQL {
SSQLToken name;
struct tSQLExprListList *pValue;
} SInsertSQL;
typedef struct SCreateDBInfo {
SSQLToken dbname;
int32_t replica;
@ -204,41 +203,68 @@ typedef struct SCreateDBInfo {
} SCreateDBInfo;
typedef struct SCreateAcctSQL {
int32_t users;
int32_t dbs;
int32_t tseries;
int32_t streams;
int32_t pps;
int64_t storage;
int64_t qtime;
int32_t conns;
int32_t maxUsers;
int32_t maxDbs;
int32_t maxTimeSeries;
int32_t maxStreams;
int32_t maxPointsPerSecond;
int64_t maxStorage;
int64_t maxQueryTime;
int32_t maxConnections;
SSQLToken stat;
} SCreateAcctSQL;
typedef struct SShowInfo {
uint8_t showType;
SSQLToken prefix;
SSQLToken pattern;
} SShowInfo;
typedef struct SUserInfo {
SSQLToken user;
SSQLToken passwd;
// bool hasPasswd;
SSQLToken privilege;
// bool hasPrivilege;
int16_t type;
} SUserInfo;
typedef struct tDCLSQL {
int32_t nTokens; /* Number of expressions on the list */
int32_t nAlloc; /* Number of entries allocated below */
SSQLToken *a; /* one entry for element */
bool existsCheck;
union {
SCreateDBInfo dbOpt;
SCreateAcctSQL acctOpt;
SShowInfo showOpt;
SSQLToken ip;
};
SUserInfo user;
} tDCLSQL;
typedef struct SSubclauseInfo { // "UNION" multiple select sub-clause
SQuerySQL **pClause;
int32_t numOfClause;
} SSubclauseInfo;
typedef struct SSqlInfo {
int32_t sqlType;
bool validSql;
int32_t type;
bool valid;
union {
SCreateTableSQL *pCreateTableInfo;
SInsertSQL * pInsertInfo;
SAlterTableSQL * pAlterInfo;
SQuerySQL * pQueryInfo;
tDCLSQL * pDCLInfo;
};
char pzErrMsg[256];
SSubclauseInfo subclauseInfo;
char pzErrMsg[256];
} SSqlInfo;
typedef struct tSQLExpr {
@ -338,31 +364,39 @@ SQuerySQL *tSetQuerySQLElems(SSQLToken *pSelectToken, tSQLExprList *pSelection,
SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pMetricName,
tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type);
void tSQLExprDestroy(tSQLExpr *);
void tSQLExprNodeDestroy(tSQLExpr *pExpr);
void tSQLExprNodeDestroy(tSQLExpr *pExpr);
tSQLExpr *tSQLExprNodeClone(tSQLExpr *pExpr);
SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type);
tSQLExprListList *tSQLListListAppend(tSQLExprListList *pList, tSQLExprList *pExprList);
void tSetInsertSQLElems(SSqlInfo *pInfo, SSQLToken *pName, tSQLExprListList *pList);
void destroyAllSelectClause(SSubclauseInfo *pSql);
void doDestroyQuerySql(SQuerySQL *pSql);
void destroyQuerySql(SQuerySQL *pSql);
SSqlInfo * setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type);
SSubclauseInfo *setSubclause(SSubclauseInfo *pClause, void *pSqlExprInfo);
void setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type);
SSubclauseInfo *appendSelectClause(SSubclauseInfo *pInfo, void *pSubclause);
void setCreatedMeterName(SSqlInfo *pInfo, SSQLToken *pMeterName, SSQLToken *pIfNotExists);
void SQLInfoDestroy(SSqlInfo *pInfo);
void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParams, ...);
void setDropDBTableInfo(SSqlInfo *pInfo, int32_t type, SSQLToken* pToken, SSQLToken* existsCheck);
void setShowOptions(SSqlInfo *pInfo, int32_t type, SSQLToken* prefix, SSQLToken* pPatterns);
tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SSQLToken *pToken);
void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBInfo *pDB, SSQLToken *pIgExists);
void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken *pPwd, SCreateAcctSQL *pAcctInfo);
void setCreateUserSQL(SSqlInfo *pInfo, SSQLToken *pName, SSQLToken *pPasswd);
void setKillSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *ip);
void setAlterUserSQL(SSqlInfo *pInfo, int16_t type, SSQLToken *pName, SSQLToken* pPwd, SSQLToken *pPrivilege);
void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo);
// prefix show db.tables;

View File

@ -120,7 +120,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
void tscDestroyLocalReducer(SSqlObj *pSql);
int32_t tscLocalDoReduce(SSqlObj *pSql);
int32_t tscDoLocalreduce(SSqlObj *pSql);
#ifdef __cplusplus
}

View File

@ -29,9 +29,9 @@ extern "C" {
#include "tsclient.h"
#include "tsdb.h"
#define UTIL_METER_IS_METRIC(metaInfo) \
#define UTIL_METER_IS_SUPERTABLE(metaInfo) \
(((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_METRIC))
#define UTIL_METER_IS_NOMRAL_METER(metaInfo) (!(UTIL_METER_IS_METRIC(metaInfo)))
#define UTIL_METER_IS_NOMRAL_METER(metaInfo) (!(UTIL_METER_IS_SUPERTABLE(metaInfo)))
#define UTIL_METER_IS_CREATE_FROM_METRIC(metaInfo) \
(((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_MTABLE))
@ -67,7 +67,7 @@ typedef struct SJoinSubquerySupporter {
} SJoinSubquerySupporter;
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name,
STableDataBlocks** dataBlocks);
SMeterMeta* pMeterMeta, STableDataBlocks** dataBlocks);
void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks);
void tscDestroyDataBlock(STableDataBlocks* pDataBlock);
@ -81,7 +81,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
void tscFreeUnusedDataBlocks(SDataBlockList* pList);
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pDataList);
int32_t tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size,
int32_t startOffset, int32_t rowSize, const char* tableId,
int32_t startOffset, int32_t rowSize, const char* tableId, SMeterMeta* pMeterMeta,
STableDataBlocks** dataBlocks);
SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx);
@ -95,23 +95,27 @@ SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx);
* @param pSql sql object
* @return
*/
bool tscIsPointInterpQuery(SSqlCmd* pCmd);
bool tscIsTWAQuery(SSqlCmd* pCmd);
bool tscProjectionQueryOnMetric(SSqlCmd* pCmd);
bool tscProjectionQueryOnTable(SSqlCmd* pCmd);
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
bool tscIsTwoStageMergeMetricQuery(SSqlCmd* pCmd);
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscProjectionQueryOnTable(SQueryInfo* pQueryInfo);
bool tscIsTwoStageMergeMetricQuery(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscQueryOnMetric(SSqlCmd* pCmd);
bool tscQueryMetricTags(SSqlCmd* pCmd);
bool tscQueryMetricTags(SQueryInfo* pQueryInfo);
bool tscIsSelectivityWithTagQuery(SSqlCmd* pCmd);
void tscAddSpecialColumnForSelect(SSqlCmd* pCmd, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex,
void tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex,
SSchema* pColSchema, int16_t isTag);
void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex, int32_t tableIndex);
void addRequiredTagColumn(SQueryInfo* pQueryInfo, int32_t tagColIndex, int32_t tableIndex);
int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex);
void tscClearInterpInfo(SSqlCmd* pCmd);
int32_t setMeterID(SMeterMetaInfo* pMeterMetaInfo, SSQLToken* pzTableName, SSqlObj* pSql);
void tscClearInterpInfo(SQueryInfo* pQueryInfo);
bool tscIsInsertOrImportData(char* sqlstr);
@ -125,29 +129,33 @@ void tscFieldInfoSetValFromField(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIE
void tscFieldInfoSetValue(SFieldInfo* pFieldInfo, int32_t index, int8_t type, const char* name, int16_t bytes);
void tscFieldInfoUpdateVisible(SFieldInfo* pFieldInfo, int32_t index, bool visible);
void tscFieldInfoCalOffset(SSqlCmd* pCmd);
void tscFieldInfoUpdateOffset(SSqlCmd* pCmd);
void tscFieldInfoCalOffset(SQueryInfo* pQueryInfo);
void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo);
void tscFieldInfoCopy(SFieldInfo* src, SFieldInfo* dst, const int32_t* indexList, int32_t size);
void tscFieldInfoCopyAll(SFieldInfo* src, SFieldInfo* dst);
void tscFieldInfoCopyAll(SFieldInfo* dst, SFieldInfo* src);
TAOS_FIELD* tscFieldInfoGetField(SSqlCmd* pCmd, int32_t index);
int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index);
int32_t tscGetResRowLength(SSqlCmd* pCmd);
TAOS_FIELD* tscFieldInfoGetField(SQueryInfo* pQueryInfo, int32_t index);
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index);
int32_t tscGetResRowLength(SQueryInfo* pQueryInfo);
void tscClearFieldInfo(SFieldInfo* pFieldInfo);
int32_t tscNumOfFields(SQueryInfo* pQueryInfo);
int32_t tscFieldInfoCompare(SFieldInfo* pFieldInfo1, SFieldInfo* pFieldInfo2);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex);
SSqlExpr* tscSqlExprInsert(SSqlCmd* pCmd, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t interSize);
SSqlExpr* tscSqlExprInsertEmpty(SSqlCmd* pCmd, int32_t index, int16_t functionId);
SSqlExpr* tscSqlExprInsertEmpty(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId);
SSqlExpr* tscSqlExprUpdate(SSqlCmd* pCmd, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
int16_t size);
SSqlExpr* tscSqlExprGet(SSqlCmd* pCmd, int32_t index);
SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index);
void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t uid);
void* tscSqlExprDestroy(SSqlExpr* pExpr);
void tscSqlExprInfoDestroy(SSqlExprInfo* pExprInfo);
SColumnBase* tscColumnBaseInfoInsert(SSqlCmd* pCmd, SColumnIndex* colIndex);
SColumnBase* tscColumnBaseInfoInsert(SQueryInfo* pQueryInfo, SColumnIndex* colIndex);
void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* src);
void tscColumnBaseCopy(SColumnBase* dst, const SColumnBase* src);
@ -162,7 +170,7 @@ int32_t tscValidateName(SSQLToken* pToken);
void tscIncStreamExecutionCount(void* pStream);
bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId);
bool tscValidateColumnId(SMeterMetaInfo* pMeterMetaInfo, int32_t colId);
// get starter position of metric query condition (query on tags) in SSqlCmd.payload
SCond* tsGetMetricQueryCondPos(STagCond* pCond, uint64_t tableIndex);
@ -171,30 +179,38 @@ void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str);
void tscTagCondCopy(STagCond* dest, const STagCond* src);
void tscTagCondRelease(STagCond* pCond);
void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SSqlCmd* pCmd);
void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo);
void tscSetFreeHeatBeat(STscObj* pObj);
bool tscShouldFreeHeatBeat(SSqlObj* pHb);
void tscCleanSqlCmd(SSqlCmd* pCmd);
bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql);
void tscRemoveAllMeterMetaInfo(SSqlCmd* pCmd, bool removeFromCache);
SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t index);
SMeterMetaInfo* tscGetMeterMetaInfoByUid(SSqlCmd* pCmd, uint64_t uid, int32_t* index);
void tscRemoveAllMeterMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache);
SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd *pCmd, int32_t subClauseIndex, int32_t tableIndex);
SMeterMetaInfo* tscGetMeterMetaInfoFromQueryInfo(SQueryInfo *pQueryInfo, int32_t tableIndex);
SQueryInfo *tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex);
int32_t tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex, SQueryInfo** pQueryInfo);
SMeterMetaInfo* tscGetMeterMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* index);
void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache);
SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta* pMeterMeta, SMetricMeta* pMetricMeta,
SMeterMetaInfo* tscAddMeterMetaInfo(SQueryInfo* pQueryInfo, const char* name, SMeterMeta* pMeterMeta, SMetricMeta* pMetricMeta,
int16_t numOfTags, int16_t* tags);
SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SSqlCmd* pCmd);
SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SQueryInfo *pQueryInfo);
int32_t tscAddSubqueryInfo(SSqlCmd *pCmd);
void tscFreeSubqueryInfo(SSqlCmd* pCmd);
void tscClearSubqueryInfo(SSqlCmd* pCmd);
void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* keyStr, uint64_t uid);
int tscGetMetricMeta(SSqlObj* pSql);
int tscGetMeterMeta(SSqlObj* pSql, char* meterId, int32_t tableIndex);
int tscGetMeterMetaEx(SSqlObj* pSql, char* meterId, bool createIfNotExists);
void tscGetMetricMetaCacheKey(SQueryInfo* pQueryInfo, char* keyStr, uint64_t uid);
int tscGetMetricMeta(SSqlObj* pSql, int32_t clauseIndex);
int tscGetMeterMeta(SSqlObj* pSql, SMeterMetaInfo* pMeterMetaInfo);
int tscGetMeterMetaEx(SSqlObj* pSql, SMeterMetaInfo* pMeterMetaInfo, bool createIfNotExists);
void tscResetForNextRetrieve(SSqlRes* pRes);
void tscAddTimestampColumn(SSqlCmd* pCmd, int16_t functionId, int16_t tableIndex);
void tscAddTimestampColumn(SQueryInfo* pQueryInfo, int16_t functionId, int16_t tableIndex);
void tscDoQuery(SSqlObj* pSql);
/**
@ -215,9 +231,9 @@ void tscDoQuery(SSqlObj* pSql);
* @return
*/
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, SSqlObj* pPrevSql);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIndex);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex);
void doAddGroupColumnForSubquery(SSqlCmd* pCmd, int32_t tagIndex);
void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex);
int16_t tscGetJoinTagColIndexByUid(STagCond* pTagCond, uint64_t uid);
@ -226,7 +242,13 @@ TAOS* taos_connect_a(char* ip, char* user, char* pass, char* db, uint16_t port,
void sortRemoveDuplicates(STableDataBlocks* dataBuf);
void tscPrintSelectClause(SSqlCmd* pCmd);
void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex);
bool hasMoreVnodesToTry(SSqlObj *pSql);
void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp);
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)());
#ifdef __cplusplus
}

View File

@ -20,14 +20,6 @@
extern "C" {
#endif
#include <errno.h>
#include <pthread.h>
#include <semaphore.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "os.h"
#include "taos.h"
#include "taosmsg.h"
@ -39,70 +31,9 @@ extern "C" {
#include "tsqlfunction.h"
#include "tutil.h"
#define TSC_GET_RESPTR_BASE(res, cmd, col, ord) \
((res->data + tscFieldInfoGetOffset(cmd, col) * res->numOfRows) + \
(1 - ord.order) * (res->numOfRows - 1) * tscFieldInfoGetField(cmd, col)->bytes)
enum _sql_cmd {
TSDB_SQL_SELECT,
TSDB_SQL_FETCH,
TSDB_SQL_INSERT,
TSDB_SQL_MGMT, // the SQL below is for mgmt node
TSDB_SQL_CREATE_DB,
TSDB_SQL_CREATE_TABLE,
TSDB_SQL_DROP_DB,
TSDB_SQL_DROP_TABLE,
TSDB_SQL_CREATE_ACCT,
TSDB_SQL_CREATE_USER,
TSDB_SQL_DROP_ACCT, // 10
TSDB_SQL_DROP_USER,
TSDB_SQL_ALTER_USER,
TSDB_SQL_ALTER_ACCT,
TSDB_SQL_ALTER_TABLE,
TSDB_SQL_ALTER_DB,
TSDB_SQL_CREATE_MNODE,
TSDB_SQL_DROP_MNODE,
TSDB_SQL_CREATE_DNODE,
TSDB_SQL_DROP_DNODE,
TSDB_SQL_CFG_DNODE, // 20
TSDB_SQL_CFG_MNODE,
TSDB_SQL_SHOW,
TSDB_SQL_RETRIEVE,
TSDB_SQL_KILL_QUERY,
TSDB_SQL_KILL_STREAM,
TSDB_SQL_KILL_CONNECTION,
TSDB_SQL_READ, // SQL below is for read operation
TSDB_SQL_CONNECT,
TSDB_SQL_USE_DB,
TSDB_SQL_META, // 30
TSDB_SQL_METRIC,
TSDB_SQL_MULTI_META,
TSDB_SQL_HB,
TSDB_SQL_LOCAL, // SQL below for client local
TSDB_SQL_DESCRIBE_TABLE,
TSDB_SQL_RETRIEVE_METRIC,
TSDB_SQL_METRIC_JOIN_RETRIEVE,
TSDB_SQL_RETRIEVE_TAGS,
/*
* build empty result instead of accessing dnode to fetch result
* reset the client cache
*/
TSDB_SQL_RETRIEVE_EMPTY_RESULT,
TSDB_SQL_RESET_CACHE, // 40
TSDB_SQL_SERV_STATUS,
TSDB_SQL_CURRENT_DB,
TSDB_SQL_SERV_VERSION,
TSDB_SQL_CLI_VERSION,
TSDB_SQL_CURRENT_USER,
TSDB_SQL_CFG_LOCAL,
TSDB_SQL_MAX
};
#define TSC_GET_RESPTR_BASE(res, _queryinfo, col, ord) \
(res->data + tscFieldInfoGetOffset(_queryinfo, col) * res->numOfRows)
// forward declaration
struct SSqlInfo;
@ -115,17 +46,17 @@ typedef struct SSqlGroupbyExpr {
} SSqlGroupbyExpr;
typedef struct SMeterMetaInfo {
SMeterMeta * pMeterMeta; // metermeta
SMetricMeta *pMetricMeta; // metricmeta
SMeterMeta * pMeterMeta; // metermeta
SMetricMeta *pMetricMeta; // metricmeta
/*
* 1. keep the vnode index during the multi-vnode super table projection query
* 2. keep the vnode index for multi-vnode insertion
*/
int32_t vnodeIndex;
char name[TSDB_METER_ID_LEN + 1]; // table(super table) name
int16_t numOfTags; // total required tags in query, including groupby tags
int16_t tagColumnIndex[TSDB_MAX_TAGS]; // clause + tag projection
int32_t vnodeIndex;
char name[TSDB_METER_ID_LEN + 1]; // table(super table) name
int16_t numOfTags; // total required tags in query, including groupby tags
int16_t tagColumnIndex[TSDB_MAX_TAGS]; // clause + tag projection
} SMeterMetaInfo;
/* the structure for sql function in select clause */
@ -182,13 +113,6 @@ typedef struct SColumnBaseInfo {
struct SLocalReducer;
// todo move to utility
typedef struct SString {
int32_t alloc;
int32_t n;
char * z;
} SString;
typedef struct SCond {
uint64_t uid;
char * cond;
@ -230,24 +154,24 @@ typedef struct SParamInfo {
} SParamInfo;
typedef struct STableDataBlocks {
char meterId[TSDB_METER_ID_LEN];
int8_t tsSource; // where does the UNIX timestamp come from, server or client
bool ordered; // if current rows are ordered or not
int64_t vgid; // virtual group id
int64_t prevTS; // previous timestamp, recorded to decide if the records array is ts ascending
int32_t numOfMeters; // number of tables in current submit block
char meterId[TSDB_METER_ID_LEN];
int8_t tsSource; // where does the UNIX timestamp come from, server or client
bool ordered; // if current rows are ordered or not
int64_t vgid; // virtual group id
int64_t prevTS; // previous timestamp, recorded to decide if the records array is ts ascending
int32_t numOfMeters; // number of tables in current submit block
int32_t rowSize; // row size for current table
int32_t rowSize; // row size for current table
uint32_t nAllocSize;
uint32_t headerSize; // header for metadata (submit metadata)
uint32_t size;
/*
* the metermeta for current table, the metermeta will be used during submit stage, keep a ref
* to avoid it to be removed from cache
*/
SMeterMeta* pMeterMeta;
SMeterMeta *pMeterMeta;
union {
char *filename;
char *pData;
@ -268,53 +192,69 @@ typedef struct SDataBlockList {
STableDataBlocks **pData;
} SDataBlockList;
typedef struct {
SOrderVal order;
int command;
int count; // TODO refactor
typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint16_t type; // query/insert/import type
char intervalTimeUnit;
union {
bool existsCheck; // check if the table exists
int8_t showType; // show command type
};
int8_t isParseFinish;
int8_t isInsertFromFile; // load data from file or not
bool import; // import/insert type
uint8_t msgType;
uint16_t type; // query type
char intervalTimeUnit;
int64_t etime, stime;
int64_t nAggTimeInterval; // aggregation time interval
int64_t nSlidingTime; // sliding window in mseconds
SSqlGroupbyExpr groupbyExpr; // group by tags info
/*
* use to keep short request msg and error msg, in such case, SSqlCmd->payload == SSqlCmd->ext;
* create table/query/insert operations will exceed the TSDB_SQLCMD_SIZE.
*
* In such cases, allocate the memory dynamically, and need to free the memory
*/
uint32_t allocSize;
char * payload;
int payloadLen;
short numOfCols;
SColumnBaseInfo colList;
SFieldInfo fieldsInfo;
SSqlExprInfo exprsInfo;
SLimitVal limit;
SLimitVal slimit;
int64_t globalLimit;
STagCond tagCond;
int16_t interpoType; // interpolate type
int16_t numOfTables;
// submit data blocks branched according to vnode
SDataBlockList * pDataBlocks;
SColumnBaseInfo colList;
SFieldInfo fieldsInfo;
SSqlExprInfo exprsInfo;
SLimitVal limit;
SLimitVal slimit;
STagCond tagCond;
SOrderVal order;
int16_t interpoType; // interpolate type
int16_t numOfTables;
SMeterMetaInfo **pMeterInfo;
struct STSBuf * tsBuf;
// todo use dynamic allocated memory for defaultVal
int64_t defaultVal[TSDB_MAX_COLUMNS]; // default value for interpolation
int64_t * defaultVal; // default value for interpolation
char * msg; // pointer to the pCmd->payload to keep error message temporarily
int64_t clauseLimit; // limit for current sub clause
// offset value in the original sql expression, NOT sent to virtual node, only applied at client side
int64_t prjOffset;
} SQueryInfo;
// data source from sql string or from file
enum {
DATA_FROM_SQL_STRING = 1,
DATA_FROM_DATA_FILE = 2,
};
typedef struct {
int command;
uint8_t msgType;
union {
bool existsCheck; // check if the table exists or not
bool inStream; // denote if current sql is executed in stream or not
bool createOnDemand; // if the table is missing, on-the-fly create it. during getmeterMeta
int8_t dataSourceType; // load data from file or not
};
union {
int32_t count;
int32_t numOfTablesInSubmit;
};
int32_t clauseIndex; // index of multiple subclause query
int8_t isParseFinish;
short numOfCols;
uint32_t allocSize;
char * payload;
int payloadLen;
SQueryInfo **pQueryInfo;
int32_t numOfClause;
// submit data blocks branched according to vnode
SDataBlockList *pDataBlocks;
// for parameter ('?') binding and batch processing
int32_t batchSize;
@ -330,8 +270,10 @@ struct STSBuf;
typedef struct {
uint8_t code;
int numOfRows; // num of results in current retrieved
int numOfTotal; // num of total results
int64_t numOfRows; // num of results in current retrieved
int64_t numOfTotal; // num of total results
int64_t numOfTotalInCurrentClause; // num of total result in current subclause
char * pRsp;
int rspType;
int rspLen;
@ -394,9 +336,9 @@ typedef struct _sql_obj {
tsem_t emptyRspSem;
SSqlCmd cmd;
SSqlRes res;
uint16_t numOfSubs;
char* asyncTblPos;
void* pTableHashList;
uint8_t numOfSubs;
char * asyncTblPos;
void * pTableHashList;
struct _sql_obj **pSubs;
struct _sql_obj * prev, *next;
} SSqlObj;
@ -436,9 +378,11 @@ typedef struct {
} SIpStrList;
// tscSql API
int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion);
int tsParseSql(SSqlObj *pSql, bool multiVnodeInsertion);
void tscInitMsgs();
extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);
void tscInitMsgs();
void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle);
int tscProcessSql(SSqlObj *pSql);
@ -457,15 +401,16 @@ int taos_retrieve(TAOS_RES *res);
* transfer function for metric query in stream computing, the function need to be change
* before send query message to vnode
*/
int32_t tscTansformSQLFunctionForMetricQuery(SSqlCmd *pCmd);
void tscRestoreSQLFunctionForMetricQuery(SSqlCmd *pCmd);
int32_t tscTansformSQLFunctionForSTableQuery(SQueryInfo *pQueryInfo);
void tscRestoreSQLFunctionForMetricQuery(SQueryInfo *pQueryInfo);
void tscClearSqlMetaInfoForce(SSqlCmd *pCmd);
int32_t tscCreateResPointerInfo(SSqlCmd *pCmd, SSqlRes *pRes);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscDestroyResPointerInfo(SSqlRes *pRes);
void tscFreeSqlCmdData(SSqlCmd *pCmd);
void tscFreeResData(SSqlObj* pSql);
/**
* free query result of the sql object
@ -489,12 +434,14 @@ void tscFreeSqlObj(SSqlObj *pObj);
void tscCloseTscObj(STscObj *pObj);
void tscProcessMultiVnodesInsert(SSqlObj *pSql);
void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql);
void tscKillMetricQuery(SSqlObj *pSql);
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
bool tscIsUpdateQuery(STscObj *pObj);
bool tscHasReachLimitation(SSqlObj* pSql);
void tscProcessMultiVnodesInsert(SSqlObj *pSql);
void tscProcessMultiVnodesInsertFromFile(SSqlObj *pSql);
void tscKillMetricQuery(SSqlObj *pSql);
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
bool tscIsUpdateQuery(STscObj *pObj);
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
@ -516,6 +463,8 @@ extern int tsInsertHeadSize;
extern int tscNumOfThreads;
extern SIpStrList tscMgmtIpList;
typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int numOfRows);
#ifdef __cplusplus
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -17,6 +17,7 @@
#include "taosmsg.h"
#include "tast.h"
#include "tlog.h"
#include "tscSQLParser.h"
#include "tscSyntaxtreefunction.h"
#include "tschemautil.h"
#include "tsdb.h"
@ -26,7 +27,6 @@
#include "tstoken.h"
#include "ttypes.h"
#include "tutil.h"
#include "tscSQLParser.h"
/*
*
@ -115,6 +115,9 @@ static tSQLSyntaxNode *tSQLSyntaxNodeCreate(SSchema *pSchema, int32_t numOfCols,
int32_t i = 0;
if (pToken->type == TK_ID) {
do {
SSQLToken tableToken = {0};
extractTableNameFromToken(pToken, &tableToken);
size_t len = strlen(pSchema[i].name);
if (strncmp(pToken->z, pSchema[i].name, pToken->n) == 0 && pToken->n == len) break;
} while (++i < numOfCols);
@ -268,7 +271,7 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha
}
// get the operator of expr
uint8_t optr = getBinaryExprOptr(&t0);
uint8_t optr = getBinaryExprOptr(&t0);
if (optr == 0) {
pError("not support binary operator:%d", t0.type);
tSQLSyntaxNodeDestroy(pLeft, NULL);
@ -323,7 +326,7 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha
pn->colId = -1;
return pn;
} else {
uint8_t localOptr = getBinaryExprOptr(&t0);
uint8_t localOptr = getBinaryExprOptr(&t0);
if (localOptr == 0) {
pError("not support binary operator:%d", t0.type);
free(pBinExpr);
@ -419,17 +422,17 @@ void tSQLBinaryExprToString(tSQLBinaryExpr *pExpr, char *dst, int32_t *len) {
if (pExpr == NULL) {
*dst = 0;
*len = 0;
return;
return;
}
int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->pLeft, dst, pExpr->pLeft->nodeType);
int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->pLeft, dst, pExpr->pLeft->nodeType);
dst += lhs;
*len = lhs;
char *start = tSQLOptrToString(pExpr->nSQLBinaryOptr, dst);
char *start = tSQLOptrToString(pExpr->nSQLBinaryOptr, dst);
*len += (start - dst);
*len += tSQLBinaryExprToStringImpl(pExpr->pRight, start, pExpr->pRight->nodeType);
*len += tSQLBinaryExprToStringImpl(pExpr->pRight, start, pExpr->pRight->nodeType);
}
static void UNUSED_FUNC destroySyntaxTree(tSQLSyntaxNode *pNode) { tSQLSyntaxNodeDestroy(pNode, NULL); }
@ -645,7 +648,7 @@ int32_t intersect(tQueryResultset *pLeft, tQueryResultset *pRight, tQueryResults
/*
* traverse the result and apply the function to each item to check if the item is qualified or not
*/
static void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, __result_filter_fn_t fp, tQueryResultset *pResult) {
static void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, __result_filter_fn_t fp, tQueryResultset *pResult) {
assert(pExpr->pLeft->nodeType == TSQL_NODE_COL && pExpr->pRight->nodeType == TSQL_NODE_VALUE);
// brutal force scan the result list and check for each item in the list

View File

@ -26,19 +26,18 @@
#include "tutil.h"
#include "tnote.h"
void tscProcessFetchRow(SSchedMsg *pMsg);
void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows);
static void tscProcessAsyncRetrieveNextVnode(void *param, TAOS_RES *tres, int numOfRows);
static void tscProcessAsyncContinueRetrieve(void *param, TAOS_RES *tres, int numOfRows);
static void tscProcessFetchRow(SSchedMsg *pMsg);
static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, void (*fp)());
/*
* proxy function to perform sequentially query&retrieve operation.
* If sql queries upon metric and two-stage merge procedure is not needed,
* it will sequentially query&retrieve data for all vnodes in pCmd->pMetricMeta
* Proxy function to perform sequentially query&retrieve operation.
* If sql queries upon a super table and two-stage merge procedure is not involved (when employ the projection
* query), it will sequentially query&retrieve data for all vnodes
*/
static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows);
static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows);
static void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows);
// TODO return the correct error code to client in tscQueueAsyncError
void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param) {
@ -81,7 +80,6 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *,
return;
}
pSql->sqlstr = malloc(sqlLen + 1);
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
@ -97,7 +95,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *,
strtolower(pSql->sqlstr, sqlstr);
tscDump("%p pObj:%p, Async SQL: %s", pSql, pObj, pSql->sqlstr);
int32_t code = tsParseSql(pSql, pObj->acctId, pObj->db, true);
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
if (code != TSDB_CODE_SUCCESS) {
@ -109,7 +107,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *,
tscDoQuery(pSql);
}
static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) {
static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) {
if (tres == NULL) {
return;
}
@ -118,36 +116,32 @@ static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOf
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
// sequentially retrieve data from remain vnodes first, query vnode specified by vnodeIdx
if (numOfRows == 0 && tscProjectionQueryOnMetric(pCmd)) {
// vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
assert(pMeterMetaInfo->vnodeIndex >= 0);
if (numOfRows == 0) {
if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
tscTryQueryNextVnode(pSql, tscAsyncQueryRowsForNextVnode);
} else {
/*
* all available virtual node has been checked already, now we need to check
* for the next subclause queries
*/
if (pCmd->clauseIndex < pCmd->numOfClause - 1) {
tscTryQueryNextClause(pSql, tscAsyncQueryRowsForNextVnode);
return;
}
/* reach the maximum number of output rows, abort */
if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) {
(*pSql->fetchFp)(param, tres, 0);
return;
}
/* update the limit value according to current retrieval results */
pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal;
pCmd->limit.offset = pRes->offset;
if ((++(pMeterMetaInfo->vnodeIndex)) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
tscTrace("%p retrieve data from next vnode:%d", pSql, pMeterMetaInfo->vnodeIndex);
pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first.
tscResetForNextRetrieve(pRes);
pSql->fp = tscProcessAsyncRetrieveNextVnode;
tscProcessSql(pSql);
return;
}
} else { // localreducer has handle this situation
if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) {
pRes->numOfTotal += pRes->numOfRows;
/*
* 1. has reach the limitation
* 2. no remain virtual nodes to be retrieved anymore
*/
(*pSql->fetchFp)(param, pSql, 0);
}
return;
}
// local reducer has handle this situation during super table non-projection query.
if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) {
pRes->numOfTotalInCurrentClause += pRes->numOfRows;
}
(*pSql->fetchFp)(param, tres, numOfRows);
@ -164,7 +158,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
if (pRes->qhandle == 0 || numOfRows != 0) {
if ((pRes->qhandle == 0 || numOfRows != 0) && pCmd->command < TSDB_SQL_LOCAL) {
if (pRes->qhandle == 0) {
tscError("qhandle is NULL");
} else {
@ -183,14 +177,18 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
}
/*
* retrieve callback for fetch rows proxy. It serves as the callback function of querying vnode
* retrieve callback for fetch rows proxy.
* The below two functions both serve as the callback function of query virtual node.
* query callback first, and then followed by retrieve callback
*/
static void tscProcessAsyncRetrieveNextVnode(void *param, TAOS_RES *tres, int numOfRows) {
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscProcessAsyncFetchRowsProxy);
static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows) {
// query completed, continue to retrieve
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchRowsProxy);
}
static void tscProcessAsyncContinueRetrieve(void *param, TAOS_RES *tres, int numOfRows) {
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscProcessAsyncRetrieve);
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows) {
// query completed, continue to retrieve
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchSingleRowProxy);
}
void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), void *param) {
@ -213,7 +211,7 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi
// user-defined callback function is stored in fetchFp
pSql->fetchFp = fp;
pSql->fp = tscProcessAsyncFetchRowsProxy;
pSql->fp = tscAsyncFetchRowsProxy;
pSql->param = param;
tscResetForNextRetrieve(pRes);
@ -245,11 +243,15 @@ void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
pSql->fetchFp = fp;
pSql->param = param;
if (pRes->row >= pRes->numOfRows) {
tscResetForNextRetrieve(pRes);
pSql->fp = tscProcessAsyncRetrieve;
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
pSql->fp = tscAsyncFetchSingleRowProxy;
if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC && pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
tscProcessSql(pSql);
} else {
SSchedMsg schedMsg;
@ -261,58 +263,45 @@ void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
}
}
void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows) {
void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows) {
SSqlObj *pSql = (SSqlObj *)tres;
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (numOfRows == 0) {
// sequentially retrieve data from remain vnodes.
if (tscProjectionQueryOnMetric(pCmd)) {
/*
* vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx till all vnode have been retrieved
*/
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
assert(pMeterMetaInfo->vnodeIndex >= 0);
/* reach the maximum number of output rows, abort */
if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) {
(*pSql->fetchFp)(pSql->param, pSql, NULL);
return;
}
/* update the limit value according to current retrieval results */
pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal;
if ((++pMeterMetaInfo->vnodeIndex) <= pMeterMetaInfo->pMetricMeta->numOfVnodes) {
pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first.
tscResetForNextRetrieve(pRes);
pSql->fp = tscProcessAsyncContinueRetrieve;
tscProcessSql(pSql);
return;
}
if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
tscTryQueryNextVnode(pSql, tscAsyncQuerySingleRowForNextVnode);
} else {
/*
* 1. has reach the limitation
* 2. no remain virtual nodes to be retrieved anymore
*/
(*pSql->fetchFp)(pSql->param, pSql, NULL);
}
} else {
for (int i = 0; i < pCmd->numOfCols; ++i)
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row;
pRes->row++;
(*pSql->fetchFp)(pSql->param, pSql, pSql->res.tsrow);
return;
}
for (int i = 0; i < pCmd->numOfCols; ++i)
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + pRes->bytes[i] * pRes->row;
pRes->row++;
(*pSql->fetchFp)(pSql->param, pSql, pSql->res.tsrow);
}
void tscProcessFetchRow(SSchedMsg *pMsg) {
SSqlObj *pSql = (SSqlObj *)pMsg->ahandle;
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
for (int i = 0; i < pCmd->numOfCols; ++i)
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row;
for (int i = 0; i < pCmd->numOfCols; ++i) {
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + pRes->bytes[i] * pRes->row;
}
pRes->row++;
(*pSql->fetchFp)(pSql->param, pSql, pRes->tsrow);
}
@ -371,7 +360,7 @@ void tscQueueAsyncRes(SSqlObj *pSql) {
tscTrace("%p SqlObj is freed, not add into queue async res", pSql);
return;
} else {
tscTrace("%p add into queued async res, code:%d", pSql, pSql->res.code);
tscError("%p add into queued async res, code:%d", pSql, pSql->res.code);
}
SSchedMsg schedMsg;
@ -404,10 +393,13 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows)
SSqlCmd *pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS;
assert(!pCmd->isInsertFromFile && pSql->signature == pSql);
assert(pCmd->dataSourceType != 0 && pSql->signature == pSql);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
assert(pCmd->numOfTables == 1);
int32_t index = 0;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, index);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
assert(pQueryInfo->numOfTables == 1 || pQueryInfo->numOfTables == 2);
SDataBlockList *pDataBlocks = pCmd->pDataBlocks;
if (pDataBlocks == NULL || pMeterMetaInfo->vnodeIndex >= pDataBlocks->nSize) {
@ -444,7 +436,6 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlObj *pSql = (SSqlObj *)param;
if (pSql == NULL || pSql->signature != pSql) return;
STscObj *pObj = pSql->pTscObj;
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
@ -464,10 +455,11 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) {
} else {
tscTrace("%p renew meterMeta successfully, command:%d, code:%d, thandle:%p, retry:%d",
pSql, pSql->cmd.command, pSql->res.code, pSql->thandle, pSql->retry);
assert(tscGetMeterMetaInfo(&pSql->cmd, 0)->pMeterMeta == NULL);
tscGetMeterMeta(pSql, tscGetMeterMetaInfo(&pSql->cmd, 0)->name, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0);
assert(pMeterMetaInfo->pMeterMeta == NULL);
tscGetMeterMeta(pSql, pMeterMetaInfo);
code = tscSendMsgToServer(pSql);
if (code != 0) {
pRes->code = code;
@ -485,24 +477,27 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) {
}
if (pSql->pStream == NULL) {
// check if it is a sub-query of metric query first, if true, enter another routine
if ((pSql->cmd.type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
// check if it is a sub-query of super table query first, if true, enter another routine
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pMeterMetaInfo->vnodeIndex >= 0 && pSql->param != NULL);
SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param;
SSqlObj * pParObj = trs->pParentSqlObj;
assert(pParObj->signature == pParObj && trs->subqueryIndex == pMeterMetaInfo->vnodeIndex &&
pMeterMetaInfo->pMeterMeta->numOfTags != 0);
tscTrace("%p get metricMeta during metric query successfully", pSql);
code = tscGetMeterMeta(pSql, tscGetMeterMetaInfo(&pSql->cmd, 0)->name, 0);
tscTrace("%p get metricMeta during super table query successfully", pSql);
code = tscGetMeterMeta(pSql, pMeterMetaInfo);
pRes->code = code;
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
code = tscGetMetricMeta(pSql);
code = tscGetMetricMeta(pSql, 0);
pRes->code = code;
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
@ -510,8 +505,8 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) {
if (pCmd->isParseFinish) {
tscTrace("%p resend data to vnode in metermeta callback since sql has been parsed completed", pSql);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
code = tscGetMeterMeta(pSql, pMeterMetaInfo);
assert(code == TSDB_CODE_SUCCESS);
if (pMeterMetaInfo->pMeterMeta) {
@ -519,28 +514,28 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) {
if (code == TSDB_CODE_SUCCESS) return;
}
} else {
code = tsParseSql(pSql, pObj->acctId, pObj->db, false);
code = tsParseSql(pSql, false);
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
}
}
} else { // stream computing
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
code = tscGetMeterMeta(pSql, pMeterMetaInfo);
pRes->code = code;
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
if (code == TSDB_CODE_SUCCESS && UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
code = tscGetMetricMeta(pSql);
if (code == TSDB_CODE_SUCCESS && UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) {
code = tscGetMetricMeta(pSql, pCmd->clauseIndex);
pRes->code = code;
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
}
}
if (code != 0) {
if (code != TSDB_CODE_SUCCESS) {
pSql->res.code = code;
tscQueueAsyncRes(pSql);
return;
}
@ -549,10 +544,12 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) {
tscTrace("%p stream:%p meta is updated, start new query, command:%d", pSql, pSql->pStream, pSql->cmd.command);
/*
* NOTE:
* transfer the sql function for metric query before get meter/metric meta,
* transfer the sql function for super table query before get meter/metric meta,
* since in callback functions, only tscProcessSql(pStream->pSql) is executed!
*/
tscTansformSQLFunctionForMetricQuery(&pSql->cmd);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
tscTansformSQLFunctionForSTableQuery(pQueryInfo);
tscIncStreamExecutionCount(pSql->pStream);
} else {
tscTrace("%p get meterMeta/metricMeta successfully", pSql);

View File

@ -72,6 +72,8 @@ for (int32_t i = 0; i < (ctx)->tagInfo.numOfTagCols; ++i) { \
void noop1(SQLFunctionCtx *UNUSED_PARAM(pCtx)) {}
void noop2(SQLFunctionCtx *UNUSED_PARAM(pCtx), int32_t UNUSED_PARAM(index)) {}
void doFinalizer(SQLFunctionCtx *pCtx) { resetResultInfo(GET_RES_INFO(pCtx)); }
typedef struct tValuePair {
tVariant v;
int64_t timestamp;
@ -355,8 +357,8 @@ static void function_finalizer(SQLFunctionCtx *pCtx) {
pTrace("no result generated, result is set to NULL");
setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes);
}
resetResultInfo(GET_RES_INFO(pCtx));
doFinalizer(pCtx);
}
/*
@ -889,6 +891,7 @@ static void avg_finalizer(SQLFunctionCtx *pCtx) {
// cannot set the numOfIteratedElems again since it is set during previous iteration
GET_RES_INFO(pCtx)->numOfRes = 1;
doFinalizer(pCtx);
}
/////////////////////////////////////////////////////////////////////////////////////////////
@ -909,7 +912,17 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
tval = &pCtx->preAggVals.max;
index = pCtx->preAggVals.maxIndex;
}
/**
* NOTE: work around the bug caused by invalid pre-calculated function.
* Here the selectivity + ts will not return correct value.
*
* The following codes of 3 lines will be removed later.
*/
if (index < 0 || index >= pCtx->size + pCtx->startOffset) {
index = 0;
}
TSKEY key = pCtx->ptsList[index];
if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) {
@ -1423,8 +1436,8 @@ static void stddev_finalizer(SQLFunctionCtx *pCtx) {
*retValue = sqrt(pStd->res / pStd->num);
SET_VAL(pCtx, 1, 1);
}
resetResultInfo(GET_RES_INFO(pCtx));
doFinalizer(pCtx);
}
//////////////////////////////////////////////////////////////////////////////////////
@ -1456,7 +1469,9 @@ static void first_function(SQLFunctionCtx *pCtx) {
}
memcpy(pCtx->aOutputBuf, data, pCtx->inputBytes);
DO_UPDATE_TAG_COLUMNS(pCtx, i);
TSKEY k = pCtx->ptsList[i];
DO_UPDATE_TAG_COLUMNS(pCtx, k);
SResultInfo *pInfo = GET_RES_INFO(pCtx);
pInfo->hasResult = DATA_SET_FLAG;
@ -1824,7 +1839,7 @@ static void last_row_finalizer(SQLFunctionCtx *pCtx) {
}
GET_RES_INFO(pCtx)->numOfRes = 1;
resetResultInfo(GET_RES_INFO(pCtx));
doFinalizer(pCtx);
}
//////////////////////////////////////////////////////////////////////////////////
@ -2005,15 +2020,8 @@ static void copyTopBotRes(SQLFunctionCtx *pCtx, int32_t type) {
STopBotInfo *pRes = pResInfo->interResultBuf;
tValuePair **tvp = pRes->res;
int32_t step = 0;
// in case of second stage merge, always use incremental output.
if (pCtx->currentStage == SECONDARY_STAGE_MERGE) {
step = QUERY_ASC_FORWARD_STEP;
} else {
step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
}
int32_t step = QUERY_ASC_FORWARD_STEP;
int32_t len = GET_RES_INFO(pCtx)->numOfRes;
switch (type) {
@ -2392,8 +2400,8 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) {
GET_TRUE_DATA_TYPE();
copyTopBotRes(pCtx, type);
resetResultInfo(pResInfo);
doFinalizer(pCtx);
}
///////////////////////////////////////////////////////////////////////////////////////////////
@ -2469,8 +2477,8 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
tOrderDescDestroy(pMemBucket->pOrderDesc);
tMemBucketDestroy(pMemBucket);
resetResultInfo(GET_RES_INFO(pCtx));
doFinalizer(pCtx);
}
//////////////////////////////////////////////////////////////////////////////////
@ -2678,8 +2686,8 @@ static void apercentile_finalizer(SQLFunctionCtx *pCtx) {
return;
}
}
resetResultInfo(pResInfo);
doFinalizer(pCtx);
}
/////////////////////////////////////////////////////////////////////////////////
@ -2859,7 +2867,7 @@ static void leastsquares_finalizer(SQLFunctionCtx *pCtx) {
param[1][2] /= param[1][1];
sprintf(pCtx->aOutputBuf, "(%lf, %lf)", param[0][2], param[1][2]);
resetResultInfo(GET_RES_INFO(pCtx));
doFinalizer(pCtx);
}
static void date_col_output_function(SQLFunctionCtx *pCtx) {
@ -2878,17 +2886,17 @@ static FORCE_INLINE void date_col_output_function_f(SQLFunctionCtx *pCtx, int32_
static void col_project_function(SQLFunctionCtx *pCtx) {
INC_INIT_VAL(pCtx, pCtx->size);
char *pDest = 0;
char *pData = GET_INPUT_CHAR(pCtx);
if (pCtx->order == TSQL_SO_ASC) {
pDest = pCtx->aOutputBuf;
memcpy(pCtx->aOutputBuf, pData, (size_t)pCtx->size * pCtx->inputBytes);
} else {
pDest = pCtx->aOutputBuf - (pCtx->size - 1) * pCtx->inputBytes;
for(int32_t i = 0; i < pCtx->size; ++i) {
memcpy(pCtx->aOutputBuf + (pCtx->size - 1 - i) * pCtx->inputBytes, pData + i * pCtx->inputBytes,
pCtx->inputBytes);
}
}
char *pData = GET_INPUT_CHAR(pCtx);
memcpy(pDest, pData, (size_t)pCtx->size * pCtx->inputBytes);
pCtx->aOutputBuf += pCtx->size * pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
pCtx->aOutputBuf += pCtx->size * pCtx->outputBytes;
}
static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
@ -2903,7 +2911,7 @@ static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
char *pData = GET_INPUT_CHAR_INDEX(pCtx, index);
memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes);
pCtx->aOutputBuf += pCtx->inputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
pCtx->aOutputBuf += pCtx->inputBytes/* * GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/;
}
/**
@ -2915,18 +2923,17 @@ static void tag_project_function(SQLFunctionCtx *pCtx) {
INC_INIT_VAL(pCtx, pCtx->size);
assert(pCtx->inputBytes == pCtx->outputBytes);
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
for (int32_t i = 0; i < pCtx->size; ++i) {
tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->outputType);
pCtx->aOutputBuf += pCtx->outputBytes * factor;
pCtx->aOutputBuf += pCtx->outputBytes;
}
}
static void tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
INC_INIT_VAL(pCtx, 1);
tVariantDump(&pCtx->tag, pCtx->aOutputBuf, pCtx->tag.nType);
pCtx->aOutputBuf += pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
pCtx->aOutputBuf += pCtx->outputBytes;
}
/**
@ -2975,8 +2982,8 @@ static void diff_function(SQLFunctionCtx *pCtx) {
int32_t notNullElems = 0;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
int32_t i = (pCtx->order == TSQL_SO_ASC) ? 0 : pCtx->size - 1;
TSKEY * pTimestamp = pCtx->ptsOutputBuf;
switch (pCtx->inputType) {
@ -2996,14 +3003,14 @@ static void diff_function(SQLFunctionCtx *pCtx) {
*pOutput = pData[i] - pCtx->param[1].i64Key;
*pTimestamp = pCtx->ptsList[i];
pOutput += step;
pTimestamp += step;
pOutput += 1;
pTimestamp += 1;
} else {
*pOutput = pData[i] - pData[i - step];
*pTimestamp = pCtx->ptsList[i];
pOutput += step;
pTimestamp += step;
pOutput += 1;
pTimestamp += 1;
}
pCtx->param[1].i64Key = pData[i];
@ -3028,14 +3035,14 @@ static void diff_function(SQLFunctionCtx *pCtx) {
*pOutput = pData[i] - pCtx->param[1].i64Key;
*pTimestamp = pCtx->ptsList[i];
pOutput += step;
pTimestamp += step;
pOutput += 1;
pTimestamp += 1;
} else {
*pOutput = pData[i] - pData[i - step];
*pTimestamp = pCtx->ptsList[i];
pOutput += step;
pTimestamp += step;
pOutput += 1;
pTimestamp += 1;
}
pCtx->param[1].i64Key = pData[i];
@ -3059,13 +3066,13 @@ static void diff_function(SQLFunctionCtx *pCtx) {
} else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) {
*pOutput = pData[i] - pCtx->param[1].dKey;
*pTimestamp = pCtx->ptsList[i];
pOutput += step;
pTimestamp += step;
pOutput += 1;
pTimestamp += 1;
} else {
*pOutput = pData[i] - pData[i - step];
*pTimestamp = pCtx->ptsList[i];
pOutput += step;
pTimestamp += step;
pOutput += 1;
pTimestamp += 1;
}
pCtx->param[1].dKey = pData[i];
@ -3089,13 +3096,15 @@ static void diff_function(SQLFunctionCtx *pCtx) {
} else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) {
*pOutput = pData[i] - pCtx->param[1].dKey;
*pTimestamp = pCtx->ptsList[i];
pOutput += step;
pTimestamp += step;
pOutput += 1;
pTimestamp += 1;
} else {
*pOutput = pData[i] - pData[i - step];
*pTimestamp = pCtx->ptsList[i];
pOutput += step;
pTimestamp += step;
pOutput += 1;
pTimestamp += 1;
}
// keep the last value, the remain may be all null
@ -3120,13 +3129,14 @@ static void diff_function(SQLFunctionCtx *pCtx) {
} else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) {
*pOutput = pData[i] - pCtx->param[1].i64Key;
*pTimestamp = pCtx->ptsList[i];
pOutput += step;
pTimestamp += step;
pOutput += 1;
pTimestamp += 1;
} else {
*pOutput = pData[i] - pData[i - step];
*pTimestamp = pCtx->ptsList[i];
pOutput += step;
pTimestamp += step;
pOutput += 1;
pTimestamp += 1;
}
pCtx->param[1].i64Key = pData[i];
@ -3150,13 +3160,15 @@ static void diff_function(SQLFunctionCtx *pCtx) {
} else if ((i == 0 && pCtx->order == TSQL_SO_ASC) || (i == pCtx->size - 1 && pCtx->order == TSQL_SO_DESC)) {
*pOutput = pData[i] - pCtx->param[1].i64Key;
*pTimestamp = pCtx->ptsList[i];
pOutput += step;
pTimestamp += step;
pOutput += 1;
pTimestamp += 1;
} else {
*pOutput = pData[i] - pData[i - step];
*pTimestamp = pCtx->ptsList[i];
pOutput += step;
pTimestamp += step;
pOutput += 1;
pTimestamp += 1;
}
pCtx->param[1].i64Key = pData[i];
@ -3181,8 +3193,8 @@ static void diff_function(SQLFunctionCtx *pCtx) {
GET_RES_INFO(pCtx)->numOfRes += forwardStep;
pCtx->aOutputBuf = pCtx->aOutputBuf + forwardStep * pCtx->outputBytes * step;
pCtx->ptsOutputBuf = (char *)pCtx->ptsOutputBuf + forwardStep * TSDB_KEYSIZE * step;
pCtx->aOutputBuf += forwardStep * pCtx->outputBytes;
pCtx->ptsOutputBuf += forwardStep * TSDB_KEYSIZE;
}
}
@ -3209,7 +3221,7 @@ static void diff_function_f(SQLFunctionCtx *pCtx, int32_t index) {
GET_RES_INFO(pCtx)->numOfRes += 1;
}
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
int32_t step = 1/*GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/;
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_INT: {
@ -3277,7 +3289,8 @@ static void arithmetic_function(SQLFunctionCtx *pCtx) {
tSQLBinaryExprCalcTraverse(sas->pExpr->pBinExprInfo.pBinExpr, pCtx->size, pCtx->aOutputBuf, sas, pCtx->order,
arithmetic_callback_function);
pCtx->aOutputBuf += pCtx->outputBytes * pCtx->size * GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
pCtx->aOutputBuf += pCtx->outputBytes * pCtx->size/* * GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/;
pCtx->param[1].pz = NULL;
}
static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) {
@ -3288,7 +3301,7 @@ static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) {
tSQLBinaryExprCalcTraverse(sas->pExpr->pBinExprInfo.pBinExpr, 1, pCtx->aOutputBuf, sas, pCtx->order,
arithmetic_callback_function);
pCtx->aOutputBuf += pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
pCtx->aOutputBuf += pCtx->outputBytes/* * GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/;
}
#define LIST_MINMAX_N(ctx, minOutput, maxOutput, elemCnt, data, type, tsdbType, numOfNotNullElem) \
@ -3504,7 +3517,6 @@ void spread_func_sec_merge(SQLFunctionCtx *pCtx) {
pCtx->param[3].dKey = pData->max;
}
// pCtx->numOfIteratedElems += 1;
GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG;
}
@ -3536,9 +3548,8 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) {
*(double *)pCtx->aOutputBuf = pInfo->max - pInfo->min;
}
// SET_VAL(pCtx, pCtx->numOfIteratedElems, 1);
resetResultInfo(GET_RES_INFO(pCtx));
GET_RES_INFO(pCtx)->numOfRes = 1; // todo add test case
}
/*
@ -4171,7 +4182,7 @@ void twa_function_finalizer(SQLFunctionCtx *pCtx) {
}
GET_RES_INFO(pCtx)->numOfRes = 1;
resetResultInfo(GET_RES_INFO(pCtx));
doFinalizer(pCtx);
}
/**
@ -4333,7 +4344,7 @@ static void ts_comp_finalize(SQLFunctionCtx *pCtx) {
strcpy(pCtx->aOutputBuf, pTSbuf->path);
tsBufDestory(pTSbuf);
resetResultInfo(GET_RES_INFO(pCtx));
doFinalizer(pCtx);
}
/*
@ -4373,7 +4384,7 @@ SQLAggFuncElem aAggs[28] = {{
count_function,
count_function_f,
no_next_step,
noop1,
doFinalizer,
count_func_merge,
count_func_merge,
count_load_data_info,
@ -4616,7 +4627,7 @@ SQLAggFuncElem aAggs[28] = {{
date_col_output_function,
date_col_output_function_f,
no_next_step,
noop1,
doFinalizer,
copy_function,
copy_function,
no_data_info,
@ -4631,7 +4642,7 @@ SQLAggFuncElem aAggs[28] = {{
noop1,
noop2,
no_next_step,
noop1,
doFinalizer,
copy_function,
copy_function,
data_req_load_info,
@ -4646,7 +4657,7 @@ SQLAggFuncElem aAggs[28] = {{
tag_function,
noop2,
no_next_step,
noop1,
doFinalizer,
copy_function,
copy_function,
no_data_info,
@ -4676,7 +4687,7 @@ SQLAggFuncElem aAggs[28] = {{
tag_function,
tag_function_f,
no_next_step,
noop1,
doFinalizer,
copy_function,
copy_function,
no_data_info,
@ -4691,7 +4702,7 @@ SQLAggFuncElem aAggs[28] = {{
col_project_function,
col_project_function_f,
no_next_step,
noop1,
doFinalizer,
copy_function,
copy_function,
data_req_load_info,
@ -4706,7 +4717,7 @@ SQLAggFuncElem aAggs[28] = {{
tag_project_function,
tag_project_function_f,
no_next_step,
noop1,
doFinalizer,
copy_function,
copy_function,
no_data_info,
@ -4721,7 +4732,7 @@ SQLAggFuncElem aAggs[28] = {{
arithmetic_function,
arithmetic_function_f,
no_next_step,
noop1,
doFinalizer,
copy_function,
copy_function,
data_req_load_info,
@ -4736,7 +4747,7 @@ SQLAggFuncElem aAggs[28] = {{
diff_function,
diff_function_f,
no_next_step,
noop1,
doFinalizer,
noop1,
noop1,
data_req_load_info,
@ -4782,7 +4793,7 @@ SQLAggFuncElem aAggs[28] = {{
interp_function,
do_sum_f, // todo filter handle
no_next_step,
noop1,
doFinalizer,
noop1,
copy_function,
no_data_info,

View File

@ -22,20 +22,7 @@
#include "ttime.h"
#include "tutil.h"
static UNUSED_FUNC bool isSubqueryCompleted(SSqlObj* pSql) {
bool hasData = true;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlRes* pRes = &pSql->pSubs[i]->res;
// in case inner join, if any subquery exhausted, query completed
if (pRes->numOfRows == 0) {
hasData = false;
break;
}
}
return hasData;
}
static void freeSubqueryObj(SSqlObj* pSql);
static bool doCompare(int32_t order, int64_t left, int64_t right) {
if (order == TSQL_SO_ASC) {
@ -53,11 +40,16 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor
*st = INT64_MAX;
*et = INT64_MIN;
SLimitVal* pLimit = &pSql->cmd.limit;
int32_t order = pSql->cmd.order.order;
pSql->pSubs[0]->cmd.tsBuf = output1;
pSql->pSubs[1]->cmd.tsBuf = output2;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
SLimitVal* pLimit = &pQueryInfo->limit;
int32_t order = pQueryInfo->order.order;
SQueryInfo* pSubQueryInfo1 = tscGetQueryInfoDetail(&pSql->pSubs[0]->cmd, 0);
SQueryInfo* pSubQueryInfo2 = tscGetQueryInfoDetail(&pSql->pSubs[1]->cmd, 0);
pSubQueryInfo1->tsBuf = output1;
pSubQueryInfo2->tsBuf = output2;
tsBufResetPos(pSupporter1->pTSBuf);
tsBufResetPos(pSupporter2->pTSBuf);
@ -104,16 +96,19 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor
numOfInput2++;
} else {
if (*st > elem1.ts) {
*st = elem1.ts;
}
if (*et < elem1.ts) {
*et = elem1.ts;
}
// in case of stable query, limit/offset is not applied here
if (pLimit->offset == 0 || pSql->cmd.nAggTimeInterval > 0 || QUERY_IS_STABLE_QUERY(pSql->cmd.type)) {
/*
* in case of stable query, limit/offset is not applied here. the limit/offset is applied to the
* final results which is acquired after the secondry merge of in the client.
*/
if (pLimit->offset == 0 || pQueryInfo->nAggTimeInterval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
if (*st > elem1.ts) {
*st = elem1.ts;
}
if (*et < elem1.ts) {
*et = elem1.ts;
}
tsBufAppend(output1, elem1.vnode, elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts));
tsBufAppend(output2, elem2.vnode, elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts));
} else {
@ -150,15 +145,15 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor
tsBufDestory(pSupporter1->pTSBuf);
tsBufDestory(pSupporter2->pTSBuf);
tscTrace("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " for secondary query after ts blocks intersecting", pSql,
numOfInput1, numOfInput2, output1->numOfTotal);
tscTrace("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " for secondary query after ts blocks "
"intersecting, skey:%" PRId64 ", ekey:%" PRId64, pSql,
numOfInput1, numOfInput2, output1->numOfTotal, *st, *et);
return output1->numOfTotal;
}
// todo handle failed to create sub query
SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState,
/*int32_t* numOfComplete, int32_t* gc,*/ int32_t index) {
SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, int32_t index) {
SJoinSubquerySupporter* pSupporter = calloc(1, sizeof(SJoinSubquerySupporter));
if (pSupporter == NULL) {
return NULL;
@ -168,11 +163,15 @@ SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pS
pSupporter->pState = pState;
pSupporter->subqueryIndex = index;
pSupporter->interval = pSql->cmd.nAggTimeInterval;
pSupporter->limit = pSql->cmd.limit;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
pSupporter->interval = pQueryInfo->nAggTimeInterval;
pSupporter->limit = pQueryInfo->limit;
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, index);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, pSql->cmd.clauseIndex, index);
pSupporter->uid = pMeterMetaInfo->pMeterMeta->uid;
assert (pSupporter->uid != 0);
getTmpfilePath("join-", pSupporter->path);
pSupporter->f = fopen(pSupporter->path, "w");
@ -189,7 +188,7 @@ void tscDestroyJoinSupporter(SJoinSubquerySupporter* pSupporter) {
return;
}
tfree(pSupporter->exprsInfo.pExprs);
tscSqlExprInfoDestroy(&pSupporter->exprsInfo);
tscColumnBaseInfoDestroy(&pSupporter->colList);
tscClearFieldInfo(&pSupporter->fieldsInfo);
@ -209,10 +208,9 @@ void tscDestroyJoinSupporter(SJoinSubquerySupporter* pSupporter) {
* primary timestamp column , the secondary query is not necessary
*
*/
bool needSecondaryQuery(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
for (int32_t i = 0; i < pCmd->colList.numOfCols; ++i) {
SColumnBase* pBase = tscColumnBaseInfoGet(&pCmd->colList, i);
bool needSecondaryQuery(SQueryInfo* pQueryInfo) {
for (int32_t i = 0; i < pQueryInfo->colList.numOfCols; ++i) {
SColumnBase* pBase = tscColumnBaseInfoGet(&pQueryInfo->colList, i);
if (pBase->colIndex.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return true;
}
@ -224,110 +222,147 @@ bool needSecondaryQuery(SSqlObj* pSql) {
/*
* launch secondary stage query to fetch the result that contains timestamp in set
*/
int32_t tscLaunchSecondSubquery(SSqlObj* pSql) {
int32_t tscLaunchSecondPhaseSubqueries(SSqlObj* pSql) {
int32_t numOfSub = 0;
SJoinSubquerySupporter* pSupporter = NULL;
/*
* If the columns are not involved in the final select clause, the secondary query will not be launched
* for the subquery.
*/
SSubqueryState* pState = NULL;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
pSupporter = pSql->pSubs[i]->param;
pSupporter->pState->numOfCompleted = 0;
/*
* If the columns are not involved in the final select clause, the secondary query will not be launched
* for the subquery.
*/
if (pSupporter->exprsInfo.numOfExprs > 0) {
++numOfSub;
}
}
assert(numOfSub > 0);
// scan all subquery, if one sub query has only ts, ignore it
tscTrace(
"%p start to launch secondary subqueries, total:%d, only:%d needs to query, others are not retrieve in "
"select clause",
pSql, pSql->numOfSubs, numOfSub);
tscTrace("%p start to launch secondary subqueries, total:%d, only:%d needs to query, others are not retrieve in "
"select clause", pSql, pSql->numOfSubs, numOfSub);
int32_t j = 0;
/*
* the subqueries that do not actually launch the secondary query to virtual node is set as completed.
*/
pState = pSupporter->pState;
pState->numOfTotal = pSql->numOfSubs;
pState->numOfCompleted = (pSql->numOfSubs - numOfSub);
bool success = true;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
pSupporter = pSub->param;
pSupporter->pState->numOfTotal = numOfSub;
SSqlObj *pPrevSub = pSql->pSubs[i];
pSql->pSubs[i] = NULL;
pSupporter = pPrevSub->param;
if (pSupporter->exprsInfo.numOfExprs == 0) {
tscTrace("%p subIndex: %d, not need to launch query, ignore it", pSql, i);
tscDestroyJoinSupporter(pSupporter);
taos_free_result(pSub);
tscFreeSqlObj(pPrevSub);
pSql->pSubs[i] = NULL;
continue;
}
SSqlObj* pNew = createSubqueryObj(pSql, (int16_t)i, tscJoinQueryCallback, pSupporter, NULL);
SQueryInfo *pSubQueryInfo = tscGetQueryInfoDetail(&pPrevSub->cmd, 0);
STSBuf *pTSBuf = pSubQueryInfo->tsBuf;
pSubQueryInfo->tsBuf = NULL;
// free result for async object will also free sqlObj
taos_free_result(pPrevSub);
SSqlObj *pNew = createSubqueryObj(pSql, (int16_t) i, tscJoinQueryCallback, pSupporter, NULL);
if (pNew == NULL) {
pSql->numOfSubs = i; // revise the number of subquery
pSupporter->pState->numOfTotal = i;
pSupporter->pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
tscDestroyJoinSupporter(pSupporter);
return 0;
success = false;
break;
}
tscFreeSqlCmdData(&pNew->cmd);
pSql->pSubs[j++] = pNew;
pNew->cmd.tsBuf = pSub->cmd.tsBuf;
pSub->cmd.tsBuf = NULL;
taos_free_result(pSub);
tscClearSubqueryInfo(&pNew->cmd);
pSql->pSubs[i] = pNew;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
pQueryInfo->tsBuf = pTSBuf; // transfer the ownership of timestamp comp-z data to the new created object
// set the second stage sub query for join process
pNew->cmd.type |= TSDB_QUERY_TYPE_JOIN_SEC_STAGE;
pNew->cmd.nAggTimeInterval = pSupporter->interval;
pNew->cmd.groupbyExpr = pSupporter->groupbyExpr;
tscColumnBaseInfoCopy(&pNew->cmd.colList, &pSupporter->colList, 0);
tscTagCondCopy(&pNew->cmd.tagCond, &pSupporter->tagCond);
tscSqlExprCopy(&pNew->cmd.exprsInfo, &pSupporter->exprsInfo, pSupporter->uid);
tscFieldInfoCopyAll(&pSupporter->fieldsInfo, &pNew->cmd.fieldsInfo);
// add the ts function for interval query if it is missing
if (pSupporter->exprsInfo.pExprs[0].functionId != TSDB_FUNC_TS && pNew->cmd.nAggTimeInterval > 0) {
tscAddTimestampColumn(&pNew->cmd, TSDB_FUNC_TS, 0);
pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_SEC_STAGE;
pQueryInfo->nAggTimeInterval = pSupporter->interval;
pQueryInfo->groupbyExpr = pSupporter->groupbyExpr;
tscColumnBaseInfoCopy(&pQueryInfo->colList, &pSupporter->colList, 0);
tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond);
tscSqlExprCopy(&pQueryInfo->exprsInfo, &pSupporter->exprsInfo, pSupporter->uid);
tscFieldInfoCopyAll(&pQueryInfo->fieldsInfo, &pSupporter->fieldsInfo);
/*
* if the first column of the secondary query is not ts function, add this function.
* Because this column is required to filter with timestamp after intersecting.
*/
if (pSupporter->exprsInfo.pExprs[0].functionId != TSDB_FUNC_TS) {
tscAddTimestampColumn(pQueryInfo, TSDB_FUNC_TS, 0);
}
// todo refactor function name
tscAddTimestampColumn(&pNew->cmd, TSDB_FUNC_TS, 0);
tscFieldInfoCalOffset(&pNew->cmd);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0);
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
assert(pNew->numOfSubs == 0 && pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1);
tscFieldInfoCalOffset(pNewQueryInfo);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pNewQueryInfo, 0);
/*
* When handling the projection query, the offset value will be modified for table-table join, which is changed
* during the timestamp intersection.
*/
pSupporter->limit = pSql->cmd.limit;
pNew->cmd.limit = pSupporter->limit;
pSupporter->limit = pQueryInfo->limit;
pNewQueryInfo->limit = pSupporter->limit;
// fetch the join tag column
if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
SSqlExpr* pExpr = tscSqlExprGet(&pNew->cmd, 0);
assert(pNew->cmd.tagCond.joinInfo.hasJoin);
int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd.tagCond, pMeterMetaInfo->pMeterMeta->uid);
if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) {
SSqlExpr *pExpr = tscSqlExprGet(pNewQueryInfo, 0);
assert(pQueryInfo->tagCond.joinInfo.hasJoin);
int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pQueryInfo->tagCond, pMeterMetaInfo->pMeterMeta->uid);
pExpr->param[0].i64Key = tagColIndex;
pExpr->numOfParams = 1;
}
#ifdef _DEBUG_VIEW
tscPrintSelectClause(&pNew->cmd);
#endif
tscProcessSql(pNew);
tscPrintSelectClause(pNew, 0);
tscTrace("%p subquery:%p tableIndex:%d, vnodeIdx:%d, type:%d, exprInfo:%d, colList:%d, fieldsInfo:%d, name:%s",
pSql, pNew, 0, pMeterMetaInfo->vnodeIndex, pNewQueryInfo->type,
pNewQueryInfo->exprsInfo.numOfExprs, pNewQueryInfo->colList.numOfCols,
pNewQueryInfo->fieldsInfo.numOfOutputCols, pNewQueryInfo->pMeterInfo[0]->name);
}
//prepare the subqueries object failed, abort
if (!success) {
pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY;
tscError("%p failed to prepare subqueries objs for secondary phase query, numOfSub:%d, code:%d", pSql,
pSql->numOfSubs, pSql->res.code);
freeSubqueryObj(pSql);
return pSql->res.code;
}
for(int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
if (pSub == NULL) {
continue;
}
tscProcessSql(pSub);
}
// revise the number of subs
pSql->numOfSubs = j;
return 0;
return TSDB_CODE_SUCCESS;
}
static void freeSubqueryObj(SSqlObj* pSql) {
@ -360,7 +395,10 @@ static void doQuitSubquery(SSqlObj* pParentSql) {
}
static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSubquerySupporter* pSupporter) {
if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) {
int32_t numOfTotal = pSupporter->pState->numOfTotal;
int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1);
if (finished >= numOfTotal) {
pSqlObj->res.code = abs(pSupporter->pState->code);
tscError("%p all subquery return and query failed, global code:%d", pSqlObj, pSqlObj->res.code);
@ -369,11 +407,11 @@ static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSubquerySupporter* pSupporter
}
// update the query time range according to the join results on timestamp
static void updateQueryTimeRange(SSqlObj* pSql, int64_t st, int64_t et) {
assert(pSql->cmd.stime <= st && pSql->cmd.etime >= et);
static void updateQueryTimeRange(SQueryInfo* pQueryInfo, int64_t st, int64_t et) {
assert(pQueryInfo->stime <= st && pQueryInfo->etime >= et);
pSql->cmd.stime = st;
pSql->cmd.etime = et;
pQueryInfo->stime = st;
pQueryInfo->etime = et;
}
static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
@ -381,8 +419,12 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
SSqlObj* pParentSql = pSupporter->pObj;
SSqlObj* pSql = (SSqlObj*)tres;
if ((pSql->cmd.type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) == 0) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if ((pQueryInfo->type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) == 0) {
if (pSupporter->pState->code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows,
pSupporter->pState->code);
@ -408,8 +450,8 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
tscTrace("%p create tmp file for ts block:%s", pSql, pBuf->path);
pSupporter->pTSBuf = pBuf;
} else {
assert(pSql->cmd.numOfTables == 1); // for subquery, only one metermetaInfo
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
assert(pQueryInfo->numOfTables == 1); // for subquery, only one metermetaInfo
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
tsBufMerge(pSupporter->pTSBuf, pBuf, pMeterMetaInfo->vnodeIndex);
tsBufDestory(pBuf);
@ -422,12 +464,19 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
taos_fetch_rows_a(tres, joinRetrieveCallback, param);
} else if (numOfRows == 0) { // no data from this vnode anymore
if (tscProjectionQueryOnMetric(&pParentSql->cmd)) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
assert(pSql->cmd.numOfTables == 1);
SQueryInfo* pParentQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, pParentSql->cmd.clauseIndex);
//todo refactor
if (tscNonOrderedProjectionQueryOnSTable(pParentQueryInfo, 0)) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
assert(pQueryInfo->numOfTables == 1);
// for projection query, need to try next vnode
if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
int32_t totalVnode = pMeterMetaInfo->pMetricMeta->numOfVnodes;
if ((++pMeterMetaInfo->vnodeIndex) < totalVnode) {
tscTrace("%p current vnode:%d exhausted, try next:%d. total vnode:%d. current numOfRes:%d", pSql,
pMeterMetaInfo->vnodeIndex - 1, pMeterMetaInfo->vnodeIndex, totalVnode, pRes->numOfTotal);
pSql->cmd.command = TSDB_SQL_SELECT;
pSql->fp = tscJoinQueryCallback;
tscProcessSql(pSql);
@ -435,8 +484,13 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
return;
}
}
if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) {
int32_t numOfTotal = pSupporter->pState->numOfTotal;
int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1);
if (finished >= numOfTotal) {
assert(finished == numOfTotal);
if (pSupporter->pState->code != TSDB_CODE_SUCCESS) {
tscTrace("%p sub:%p, numOfSub:%d, quit from further procedure due to other queries failure", pParentSql, tres,
pSupporter->subqueryIndex);
@ -456,8 +510,8 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
tscTrace("%p free all sub SqlObj and quit", pParentSql);
doQuitSubquery(pParentSql);
} else {
updateQueryTimeRange(pParentSql, st, et);
tscLaunchSecondSubquery(pParentSql);
updateQueryTimeRange(pParentQueryInfo, st, et);
tscLaunchSecondPhaseSubqueries(pParentSql);
}
}
} else { // failure of sub query
@ -477,10 +531,10 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
if (numOfRows >= 0) {
pSql->res.numOfTotal += pSql->res.numOfRows;
}
if (tscProjectionQueryOnMetric(&pSql->cmd) && numOfRows == 0) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
assert(pSql->cmd.numOfTables == 1);
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && numOfRows == 0) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
assert(pQueryInfo->numOfTables == 1);
// for projection query, need to try next vnode if current vnode is exhausted
if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
@ -494,11 +548,13 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
return;
}
}
if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) {
assert(pSupporter->pState->numOfCompleted == pSupporter->pState->numOfTotal);
tscTrace("%p all %d secondary retrieves are completed, global code:%d", tres, pSupporter->pState->numOfTotal,
int32_t numOfTotal = pSupporter->pState->numOfTotal;
int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1);
if (finished >= numOfTotal) {
assert(finished == numOfTotal);
tscTrace("%p all %d secondary subquery retrieves completed, global code:%d", tres, numOfTotal,
pParentSql->res.code);
if (pSupporter->pState->code != TSDB_CODE_SUCCESS) {
@ -507,57 +563,83 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
}
tsem_post(&pParentSql->rspSem);
} else {
tscTrace("%p sub:%p completed, completed:%d, total:%d", pParentSql, tres, finished, numOfTotal);
}
}
}
static SJoinSubquerySupporter* tscUpdateSubqueryStatus(SSqlObj* pSql, int32_t numOfFetch) {
int32_t notInvolved = 0;
SJoinSubquerySupporter* pSupporter = NULL;
SSubqueryState* pState = NULL;
for(int32_t i = 0; i < pSql->numOfSubs; ++i) {
if (pSql->pSubs[i] == NULL) {
notInvolved++;
} else {
pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[i]->param;
pState = pSupporter->pState;
}
}
pState->numOfTotal = pSql->numOfSubs;
pState->numOfCompleted = pSql->numOfSubs - numOfFetch;
return pSupporter;
}
void tscFetchDatablockFromSubquery(SSqlObj* pSql) {
int32_t numOfFetch = 0;
assert(pSql->numOfSubs >= 1);
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
if (pSql->pSubs[i] == NULL) { // this subquery does not need to involve in secondary query
continue;
}
SSqlRes *pRes = &pSql->pSubs[i]->res;
SSqlCmd *pCmd = &pSql->pSubs[i]->cmd;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
if (tscProjectionQueryOnMetric(pCmd)) {
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pRes->row >= pRes->numOfRows && pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes &&
(!tscHasReachLimitation(pSql->pSubs[i]))) {
(!tscHasReachLimitation(pQueryInfo, pRes))) {
numOfFetch++;
}
} else {
if (pRes->row >= pRes->numOfRows && (!tscHasReachLimitation(pSql->pSubs[i]))) {
if (pRes->row >= pRes->numOfRows && (!tscHasReachLimitation(pQueryInfo, pRes))) {
numOfFetch++;
}
}
}
if (numOfFetch <= 0) {
return ;
return;
}
// TODO multi-vnode retrieve for projection query with limitation has bugs, since the global limiation is not handled
tscTrace("%p retrieve data from %d subqueries", pSql, numOfFetch);
SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[0]->param;
pSupporter->pState->numOfTotal = numOfFetch; // wait for all subqueries completed
pSupporter->pState->numOfCompleted = 0;
SJoinSubquerySupporter* pSupporter = tscUpdateSubqueryStatus(pSql, numOfFetch);
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlObj* pSql1 = pSql->pSubs[i];
if (pSql1 == NULL) {
continue;
}
SSqlRes* pRes1 = &pSql1->res;
SSqlCmd* pCmd1 = &pSql1->cmd;
pSupporter = (SJoinSubquerySupporter*)pSql1->param;
// wait for all subqueries completed
pSupporter->pState->numOfTotal = numOfFetch;
assert(pRes1->numOfRows >= 0 && pCmd1->numOfTables == 1);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd1, 0);
assert(pRes1->numOfRows >= 0 && pQueryInfo->numOfTables == 1);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd1, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
if (pRes1->row >= pRes1->numOfRows) {
tscTrace("%p subquery:%p retrieve data from vnode, subquery:%d, vnodeIndex:%d", pSql, pSql1,
@ -576,6 +658,16 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql) {
// wait for all subquery completed
tsem_wait(&pSql->rspSem);
// update the records for each subquery
for(int32_t i = 0; i < pSql->numOfSubs; ++i) {
if (pSql->pSubs[i] == NULL) {
continue;
}
SSqlRes* pRes1 = &pSql->pSubs[i]->res;
pRes1->numOfTotalInCurrentClause += pRes1->numOfRows;
}
}
// all subqueries return, set the result output index
@ -589,26 +681,28 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
return; // the column transfer support struct has been built
}
pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pCmd->fieldsInfo.numOfOutputCols);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pQueryInfo->fieldsInfo.numOfOutputCols);
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pCmd, i);
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
int32_t tableIndexOfSub = -1;
for (int32_t j = 0; j < pCmd->numOfTables; ++j) {
SSqlObj* pSub = pSql->pSubs[j];
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSub->cmd, 0);
for (int32_t j = 0; j < pQueryInfo->numOfTables; ++j) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, j);
if (pMeterMetaInfo->pMeterMeta->uid == pExpr->uid) {
tableIndexOfSub = j;
break;
}
}
assert(tableIndexOfSub >= 0 && tableIndexOfSub < pQueryInfo->numOfTables);
SSqlCmd* pSubCmd = &pSql->pSubs[tableIndexOfSub]->cmd;
for (int32_t k = 0; k < pSubCmd->exprsInfo.numOfExprs; ++k) {
SSqlExpr* pSubExpr = tscSqlExprGet(pSubCmd, k);
SQueryInfo* pSubQueryInfo = tscGetQueryInfoDetail(pSubCmd, 0);
for (int32_t k = 0; k < pSubQueryInfo->exprsInfo.numOfExprs; ++k) {
SSqlExpr* pSubExpr = tscSqlExprGet(pSubQueryInfo, k);
if (pExpr->functionId == pSubExpr->functionId && pExpr->colInfo.colId == pSubExpr->colInfo.colId) {
pRes->pColumnIndex[i] = (SColumnIndex){.tableIndex = tableIndexOfSub, .columnIndex = k};
break;
@ -619,7 +713,7 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
SSqlObj* pSql = (SSqlObj*)tres;
// SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
// SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0);
// int32_t idx = pSql->cmd.vnodeIdx;
@ -643,12 +737,13 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
// // no qualified result
// }
//
// tscLaunchSecondSubquery(pSql, ts, num);
// tscLaunchSecondPhaseSubqueries(pSql, ts, num);
// } else {
// }
// } else {
if ((pSql->cmd.type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) != TSDB_QUERY_TYPE_JOIN_SEC_STAGE) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if ((pQueryInfo->type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) != TSDB_QUERY_TYPE_JOIN_SEC_STAGE) {
if (code != TSDB_CODE_SUCCESS) { // direct call joinRetrieveCallback and set the error code
joinRetrieveCallback(param, pSql, code);
} else { // first stage query, continue to retrieve data
@ -675,16 +770,20 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
quitAllSubquery(pParentSql, pSupporter);
} else {
if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) {
int32_t numOfTotal = pSupporter->pState->numOfTotal;
int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1);
if (finished >= numOfTotal) {
assert(finished == numOfTotal);
tscSetupOutputColumnIndex(pParentSql);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
/**
* if the query is a continue query (vnodeIndex > 0 for projection query) for next vnode, do the retrieval of
* data instead of returning to its invoker
*/
if (pMeterMetaInfo->vnodeIndex > 0 && tscProjectionQueryOnMetric(&pSql->cmd)) {
if (pMeterMetaInfo->vnodeIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
assert(pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes);
pSupporter->pState->numOfCompleted = 0; // reset the record value
@ -820,7 +919,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
pTSBuf->f = fopen(pTSBuf->path, "r+");
if (pTSBuf->f == NULL) {
free(pTSBuf);
free(pTSBuf);
return NULL;
}
@ -893,9 +992,9 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
return pTSBuf;
}
void tsBufDestory(STSBuf* pTSBuf) {
void* tsBufDestory(STSBuf* pTSBuf) {
if (pTSBuf == NULL) {
return;
return NULL;
}
tfree(pTSBuf->assistBuf);
@ -914,6 +1013,7 @@ void tsBufDestory(STSBuf* pTSBuf) {
}
free(pTSBuf);
return NULL;
}
static STSVnodeBlockInfoEx* tsBufGetLastVnodeInfo(STSBuf* pTSBuf) {

View File

@ -77,7 +77,7 @@ static int32_t getToStringLength(const char *pData, int32_t length, int32_t type
* length((uint64_t) 123456789011) > 12, greater than sizsof(uint64_t)
*/
static int32_t tscMaxLengthOfTagsFields(SSqlObj *pSql) {
SMeterMeta *pMeta = tscGetMeterMetaInfo(&pSql->cmd, 0)->pMeterMeta;
SMeterMeta *pMeta = tscGetMeterMetaInfo(&pSql->cmd, 0, 0)->pMeterMeta;
if (pMeta->meterType == TSDB_METER_METRIC || pMeta->meterType == TSDB_METER_OTABLE ||
pMeta->meterType == TSDB_METER_STABLE) {
@ -106,8 +106,9 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
SSqlRes *pRes = &pSql->res;
// one column for each row
SSqlCmd * pCmd = &pSql->cmd;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
SMeterMeta * pMeta = pMeterMetaInfo->pMeterMeta;
/*
@ -119,7 +120,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
int32_t numOfRows = pMeta->numOfColumns;
int32_t totalNumOfRows = numOfRows + pMeta->numOfTags;
if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) {
numOfRows = pMeta->numOfColumns + pMeta->numOfTags;
}
@ -127,31 +128,31 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
SSchema *pSchema = tsGetSchema(pMeta);
for (int32_t i = 0; i < numOfRows; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0);
strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name,
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, 0);
strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name,
TSDB_COL_NAME_LEN);
char *type = tDataTypeDesc[pSchema[i].type].aName;
pField = tscFieldInfoGetField(pCmd, 1);
strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes);
pField = tscFieldInfoGetField(pQueryInfo, 1);
strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes);
int32_t bytes = pSchema[i].bytes;
if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes / TSDB_NCHAR_SIZE;
}
pField = tscFieldInfoGetField(pCmd, 2);
*(int32_t *)(pRes->data + tscFieldInfoGetOffset(pCmd, 2) * totalNumOfRows + pField->bytes * i) = bytes;
pField = tscFieldInfoGetField(pQueryInfo, 2);
*(int32_t *)(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 2) * totalNumOfRows + pField->bytes * i) = bytes;
pField = tscFieldInfoGetField(pCmd, 3);
pField = tscFieldInfoGetField(pQueryInfo, 3);
if (i >= pMeta->numOfColumns && pMeta->numOfTags != 0) {
strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 3) * totalNumOfRows + pField->bytes * i, "tag",
strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i, "tag",
strlen("tag") + 1);
}
}
if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) {
return 0;
}
@ -159,27 +160,27 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
char *pTagValue = tsGetTagsValue(pMeta);
for (int32_t i = numOfRows; i < totalNumOfRows; ++i) {
// field name
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0);
strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name,
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, 0);
strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name,
TSDB_COL_NAME_LEN);
// type name
pField = tscFieldInfoGetField(pCmd, 1);
pField = tscFieldInfoGetField(pQueryInfo, 1);
char *type = tDataTypeDesc[pSchema[i].type].aName;
strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes);
strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes);
// type length
int32_t bytes = pSchema[i].bytes;
pField = tscFieldInfoGetField(pCmd, 2);
pField = tscFieldInfoGetField(pQueryInfo, 2);
if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes / TSDB_NCHAR_SIZE;
}
*(int32_t *)(pRes->data + tscFieldInfoGetOffset(pCmd, 2) * totalNumOfRows + pField->bytes * i) = bytes;
*(int32_t *)(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 2) * totalNumOfRows + pField->bytes * i) = bytes;
// tag value
pField = tscFieldInfoGetField(pCmd, 3);
char *target = pRes->data + tscFieldInfoGetOffset(pCmd, 3) * totalNumOfRows + pField->bytes * i;
pField = tscFieldInfoGetField(pQueryInfo, 3);
char *target = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i;
if (isNull(pTagValue, pSchema[i].type)) {
sprintf(target, "%s", TSDB_DATA_NULL_STR);
@ -236,25 +237,28 @@ static int32_t tscBuildMeterSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
SSqlCmd *pCmd = &pSql->cmd;
pCmd->numOfCols = numOfCols;
pCmd->order.order = TSQL_SO_ASC;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
pQueryInfo->order.order = TSQL_SO_ASC;
tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, "Field", TSDB_COL_NAME_LEN);
tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, "Field", TSDB_COL_NAME_LEN);
rowLen += TSDB_COL_NAME_LEN;
tscFieldInfoSetValue(&pCmd->fieldsInfo, 1, TSDB_DATA_TYPE_BINARY, "Type", typeColLength);
tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 1, TSDB_DATA_TYPE_BINARY, "Type", typeColLength);
rowLen += typeColLength;
tscFieldInfoSetValue(&pCmd->fieldsInfo, 2, TSDB_DATA_TYPE_INT, "Length", sizeof(int32_t));
tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 2, TSDB_DATA_TYPE_INT, "Length", sizeof(int32_t));
rowLen += sizeof(int32_t);
tscFieldInfoSetValue(&pCmd->fieldsInfo, 3, TSDB_DATA_TYPE_BINARY, "Note", noteColLength);
tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 3, TSDB_DATA_TYPE_BINARY, "Note", noteColLength);
rowLen += noteColLength;
return rowLen;
}
static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
assert(tscGetMeterMetaInfo(&pSql->cmd, 0)->pMeterMeta != NULL);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
assert(tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0)->pMeterMeta != NULL);
const int32_t NUM_OF_DESCRIBE_TABLE_COLUMNS = 4;
const int32_t TYPE_COLUMN_LENGTH = 16;
@ -267,7 +271,7 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
int32_t rowLen =
tscBuildMeterSchemaResultFields(pSql, NUM_OF_DESCRIBE_TABLE_COLUMNS, TYPE_COLUMN_LENGTH, note_field_length);
tscFieldInfoCalOffset(&pSql->cmd);
tscFieldInfoCalOffset(pQueryInfo);
return tscSetValueToResObj(pSql, rowLen);
}
@ -277,7 +281,9 @@ static int tscBuildMetricTagProjectionResult(SSqlObj *pSql) {
// only need to reorganize the results in the column format
SSqlCmd * pCmd = &pSql->cmd;
SSqlRes * pRes = &pSql->res;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta;
SSchema * pSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta);
@ -294,7 +300,7 @@ static int tscBuildMetricTagProjectionResult(SSqlObj *pSql) {
}
int32_t totalNumOfResults = pMetricMeta->numOfMeters;
int32_t rowLen = tscGetResRowLength(pCmd);
int32_t rowLen = tscGetResRowLength(pQueryInfo);
tscInitResObjForLocalQuery(pSql, totalNumOfResults, rowLen);
@ -304,17 +310,17 @@ static int tscBuildMetricTagProjectionResult(SSqlObj *pSql) {
for (int32_t j = 0; j < pSidList->numOfSids; ++j) {
SMeterSidExtInfo *pSidExt = tscGetMeterSidInfo(pSidList, j);
for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) {
SColIndexEx *pColIndex = &tscSqlExprGet(pCmd, k)->colInfo;
for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) {
SColIndexEx *pColIndex = &tscSqlExprGet(pQueryInfo, k)->colInfo;
int16_t offsetId = pColIndex->colIdx;
assert((pColIndex->flag & TSDB_COL_TAG) != 0);
char * val = pSidExt->tags + vOffset[offsetId];
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, k);
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, k);
memcpy(pRes->data + tscFieldInfoGetOffset(pCmd, k) * totalNumOfResults + pField->bytes * rowIdx, val,
memcpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, k) * totalNumOfResults + pField->bytes * rowIdx, val,
(size_t)pField->bytes);
}
rowIdx++;
@ -328,21 +334,23 @@ static int tscBuildMetricTagSqlFunctionResult(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SMetricMeta *pMetricMeta = tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SMetricMeta *pMetricMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0)->pMetricMeta;
int32_t totalNumOfResults = 1; // count function only produce one result
int32_t rowLen = tscGetResRowLength(pCmd);
int32_t rowLen = tscGetResRowLength(pQueryInfo);
tscInitResObjForLocalQuery(pSql, totalNumOfResults, rowLen);
int32_t rowIdx = 0;
for (int32_t i = 0; i < totalNumOfResults; ++i) {
for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pCmd, i);
for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr->colInfo.colIdx == -1 && pExpr->functionId == TSDB_FUNC_COUNT) {
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, k);
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, k);
memcpy(pRes->data + tscFieldInfoGetOffset(pCmd, i) * totalNumOfResults + pField->bytes * rowIdx,
memcpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, i) * totalNumOfResults + pField->bytes * rowIdx,
&pMetricMeta->numOfMeters, sizeof(pMetricMeta->numOfMeters));
} else {
tscError("not support operations");
@ -357,15 +365,17 @@ static int tscBuildMetricTagSqlFunctionResult(SSqlObj *pSql) {
static int tscProcessQueryTags(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, 0)->pMeterMeta;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SMeterMeta *pMeterMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0)->pMeterMeta;
if (pMeterMeta == NULL || pMeterMeta->numOfTags == 0 || pMeterMeta->numOfColumns == 0) {
strcpy(pCmd->payload, "invalid table");
pSql->res.code = TSDB_CODE_INVALID_TABLE;
return pSql->res.code;
}
SSqlExpr *pExpr = tscSqlExprGet(pCmd, 0);
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, 0);
if (pExpr->functionId == TSDB_FUNC_COUNT) {
return tscBuildMetricTagSqlFunctionResult(pSql);
} else {
@ -374,7 +384,9 @@ static int tscProcessQueryTags(SSqlObj *pSql) {
}
static void tscProcessCurrentUser(SSqlObj *pSql) {
SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
tscSetLocalQueryResult(pSql, pSql->pTscObj->user, pExpr->aliasName, TSDB_USER_LEN);
}
@ -387,19 +399,24 @@ static void tscProcessCurrentDB(SSqlObj *pSql) {
setNull(db, TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN);
}
SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
tscSetLocalQueryResult(pSql, db, pExpr->aliasName, TSDB_DB_NAME_LEN);
}
static void tscProcessServerVer(SSqlObj *pSql) {
const char* v = pSql->pTscObj->sversion;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0);
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
tscSetLocalQueryResult(pSql, v, pExpr->aliasName, tListLen(pSql->pTscObj->sversion));
}
static void tscProcessClientVer(SSqlObj *pSql) {
SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
tscSetLocalQueryResult(pSql, version, pExpr->aliasName, strlen(version));
}
@ -417,7 +434,9 @@ static void tscProcessServStatus(SSqlObj *pSql) {
}
}
SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
tscSetLocalQueryResult(pSql, "1", pExpr->aliasName, 2);
}
@ -426,12 +445,16 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
SSqlRes *pRes = &pSql->res;
pCmd->numOfCols = 1;
pCmd->order.order = TSQL_SO_ASC;
tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, columnName, valueLength);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pQueryInfo->order.order = TSQL_SO_ASC;
tscClearFieldInfo(&pQueryInfo->fieldsInfo);
tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, columnName, valueLength);
tscInitResObjForLocalQuery(pSql, 1, valueLength);
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0);
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, 0);
strncpy(pRes->data, val, pField->bytes);
}

View File

@ -19,7 +19,7 @@
#define _XOPEN_SOURCE
#include "os.h"
#include "ihash.h"
#include "hash.h"
#include "tscSecondaryMerge.h"
#include "tscUtil.h"
#include "tschemautil.h"
@ -71,8 +71,6 @@ static int32_t tscToDouble(SSQLToken *pToken, double *value, char **endPtr) {
}
int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
//char * token; //fang not used
//int tokenlen; //fang not used
int32_t index = 0;
SSQLToken sToken;
int64_t interval;
@ -115,13 +113,12 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1
index = 0;
sToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL);
pTokenEnd += index;
if (sToken.type == TK_MINUS || sToken.type == TK_PLUS) {
index = 0;
valueToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL);
pTokenEnd += index;
if (valueToken.n < 2) {
return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z);
}
@ -129,7 +126,7 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1
if (getTimestampInUsFromStr(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_INVALID_SQL;
}
if (timePrec == TSDB_TIME_PRECISION_MILLI) {
interval /= 1000;
}
@ -152,8 +149,8 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload,
int64_t iv;
int32_t numType;
char * endptr = NULL;
errno = 0; // clear the previous existed error information
errno = 0; // clear the previous existed error information
switch (pSchema->type) {
case TSDB_DATA_TYPE_BOOL: { // bool
if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) {
@ -193,7 +190,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload,
return tscInvalidSQLErrMsg(msg, "tinyint data overflow", pToken->z);
}
*((int8_t *)payload) = (int8_t) iv;
*((int8_t *)payload) = (int8_t)iv;
}
break;
@ -378,7 +375,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
} else {
if (pDataBlocks->tsSource == TSDB_USE_SERVER_TS) {
return -1; // client time/server time can not be mixed
} else if (pDataBlocks->tsSource == -1) {
pDataBlocks->tsSource = TSDB_USE_CLI_TS;
}
@ -393,9 +390,9 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
}
int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, char *error,
int16_t timePrec, int32_t *code, char* tmpTokenBuf) {
int32_t index = 0;
//bool isPrevOptr; //fang, never used
int16_t timePrec, int32_t *code, char *tmpTokenBuf) {
int32_t index = 0;
// bool isPrevOptr; //fang, never used
SSQLToken sToken = {0};
char * payload = pDataBlocks->pData + pDataBlocks->size;
@ -403,8 +400,8 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
int32_t rowSize = 0;
for (int i = 0; i < spd->numOfAssignedCols; ++i) {
// the start position in data block buffer of current value in sql
char * start = payload + spd->elems[i].offset;
int16_t colIndex = spd->elems[i].colIndex;
char * start = payload + spd->elems[i].offset;
int16_t colIndex = spd->elems[i].colIndex;
SSchema *pSchema = schema + colIndex;
rowSize += pSchema->bytes;
@ -417,7 +414,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
if (tscAddParamToDataBlock(pDataBlocks, pSchema->type, (uint8_t)timePrec, pSchema->bytes, offset) != NULL) {
continue;
}
strcpy(error, "client out of memory");
*code = TSDB_CODE_CLI_OUT_OF_MEMORY;
return -1;
@ -434,10 +431,10 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
// Remove quotation marks
if (TK_STRING == sToken.type) {
// delete escape character: \\, \', \"
char delim = sToken.z[0];
char delim = sToken.z[0];
int32_t cnt = 0;
int32_t j = 0;
for (int32_t k = 1; k < sToken.n - 1; ++k) {
for (int32_t k = 1; k < sToken.n - 1; ++k) {
if (sToken.z[k] == delim || sToken.z[k] == '\\') {
if (sToken.z[k + 1] == delim) {
cnt++;
@ -447,13 +444,13 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
continue;
}
}
tmpTokenBuf[j] = sToken.z[k];
j++;
}
tmpTokenBuf[j] = 0;
tmpTokenBuf[j] = 0;
sToken.z = tmpTokenBuf;
sToken.n -= 2 + cnt;
sToken.n -= 2 + cnt;
}
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
@ -475,7 +472,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
char *ptr = payload;
for (int32_t i = 0; i < spd->numOfCols; ++i) {
if (!spd->hasVal[i]) { // current column do not have any value to insert, set it to null
if (!spd->hasVal[i]) { // current column do not have any value to insert, set it to null
setNull(ptr, schema[i].type, schema[i].bytes);
}
@ -500,7 +497,7 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
}
int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMeta, int maxRows,
SParsedDataColInfo *spd, char *error, int32_t *code, char* tmpTokenBuf) {
SParsedDataColInfo *spd, char *error, int32_t *code, char *tmpTokenBuf) {
int32_t index = 0;
SSQLToken sToken;
@ -534,7 +531,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
}
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision, code, tmpTokenBuf);
if (len <= 0) { // error message has been set in tsParseOneRowData
if (len <= 0) { // error message has been set in tsParseOneRowData
return -1;
}
@ -593,8 +590,7 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int3
pDataBlock->pData = tmp;
memset(pDataBlock->pData + pDataBlock->size, 0, pDataBlock->nAllocSize - pDataBlock->size);
} else {
//assert(false);
// do nothing
// do nothing, if allocate more memory failed
pDataBlock->nAllocSize = nAllocSizeOld;
*numOfRows = (int32_t)(pDataBlock->nAllocSize - pDataBlock->headerSize) / rowSize;
return TSDB_CODE_CLI_OUT_OF_MEMORY;
@ -658,12 +654,13 @@ void sortRemoveDuplicates(STableDataBlocks *dataBuf) {
static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char **str, SParsedDataColInfo *spd,
int32_t *totalNum) {
SSqlCmd * pCmd = &pSql->cmd;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta;
STableDataBlocks *dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pTableHashList, pCmd->pDataBlocks, pMeterMeta->uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SShellSubmitBlock), pMeterMeta->rowSize, pMeterMetaInfo->name, &dataBuf);
sizeof(SShellSubmitBlock), pMeterMeta->rowSize, pMeterMetaInfo->name,
pMeterMeta, &dataBuf);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@ -673,9 +670,9 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char
if (TSDB_CODE_SUCCESS != ret) {
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
int32_t code = TSDB_CODE_INVALID_SQL;
char* tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \"
char * tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
@ -687,7 +684,7 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char
}
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
SParamInfo* param = dataBuf->params + i;
SParamInfo *param = dataBuf->params + i;
if (param->idx == -1) {
param->idx = pCmd->numOfParams++;
param->offset -= sizeof(SShellSubmitBlock);
@ -708,16 +705,20 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char
return TSDB_CODE_SUCCESS;
}
static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) {
static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
int32_t index = 0;
SSQLToken sToken;
SSQLToken tableToken;
SSQLToken sToken = {0};
SSQLToken tableToken = {0};
int32_t code = TSDB_CODE_SUCCESS;
SSqlCmd * pCmd = &pSql->cmd;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
const int32_t TABLE_INDEX = 0;
const int32_t STABLE_INDEX = 1;
SSqlCmd * pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
char *sql = *sqlstr;
// get the token of specified table
index = 0;
tableToken = tStrGetToken(sql, &index, false, 0, NULL);
@ -754,41 +755,54 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) {
if (numOfColList == 0 && cstart != NULL) {
return TSDB_CODE_INVALID_SQL;
}
if (sToken.type == TK_USING) { // create table if not exists
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, TABLE_INDEX);
if (sToken.type == TK_USING) { // create table if not exists according to the super table
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
STagData *pTag = (STagData *)pCmd->payload;
memset(pTag, 0, sizeof(STagData));
setMeterID(pSql, &sToken, 0);
/*
* the source super table is moved to the secondary position of the pMeterMetaInfo list
*/
if (pQueryInfo->numOfTables < 2) {
tscAddEmptyMeterMetaInfo(pQueryInfo);
}
strncpy(pTag->name, pMeterMetaInfo->name, TSDB_METER_ID_LEN);
code = tscGetMeterMeta(pSql, pTag->name, 0);
SMeterMetaInfo *pSTableMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, STABLE_INDEX);
setMeterID(pSTableMeterMetaInfo, &sToken, pSql);
strncpy(pTag->name, pSTableMeterMetaInfo->name, TSDB_METER_ID_LEN);
code = tscGetMeterMeta(pSql, pSTableMeterMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
if (!UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
if (!UTIL_METER_IS_SUPERTABLE(pSTableMeterMetaInfo)) {
return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z);
}
SSchema *pTagSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta);
SSchema *pTagSchema = tsGetTagSchema(pSTableMeterMetaInfo->pMeterMeta);
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
SParsedDataColInfo spd = {0};
uint8_t numOfTags = pMeterMetaInfo->pMeterMeta->numOfTags;
SParsedDataColInfo spd = {0};
uint8_t numOfTags = pSTableMeterMetaInfo->pMeterMeta->numOfTags;
spd.numOfCols = numOfTags;
// if specify some tags column
if (sToken.type != TK_LP) {
tscSetAssignedColumnInfo(&spd, pTagSchema, numOfTags);
} else {
/* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen) tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn); */
/* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen)
* tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn); */
int16_t offset[TSDB_MAX_COLUMNS] = {0};
for (int32_t t = 1; t < numOfTags; ++t) {
offset[t] = offset[t - 1] + pTagSchema[t - 1].bytes;
@ -815,14 +829,14 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) {
for (int32_t t = 0; t < numOfTags; ++t) {
if (strncmp(sToken.z, pTagSchema[t].name, sToken.n) == 0 && strlen(pTagSchema[t].name) == sToken.n) {
SParsedColElem *pElem = &spd.elems[spd.numOfAssignedCols++];
pElem->offset = offset[t];
pElem->offset = offset[t];
pElem->colIndex = t;
if (spd.hasVal[t] == true) {
return tscInvalidSQLErrMsg(pCmd->payload, "duplicated tag name", sToken.z);
}
spd.hasVal[t] = true;
spd.hasVal[t] = true;
findColumnIndex = true;
break;
}
@ -841,7 +855,7 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) {
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
}
if (sToken.type != TK_TAGS) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
}
@ -849,9 +863,9 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) {
uint32_t ignoreTokenTypes = TK_LP;
uint32_t numOfIgnoreToken = 1;
for (int i = 0; i < spd.numOfAssignedCols; ++i) {
char* tagVal = pTag->data + spd.elems[i].offset;
char * tagVal = pTag->data + spd.elems[i].offset;
int16_t colIndex = spd.elems[i].colIndex;
index = 0;
sToken = tStrGetToken(sql, &index, true, numOfIgnoreToken, &ignoreTokenTypes);
sql += index;
@ -867,13 +881,14 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) {
sToken.n -= 2;
}
code = tsParseOneColumnData(&pTagSchema[colIndex], &sToken, tagVal, pCmd->payload, &sql, false, pMeterMetaInfo->pMeterMeta->precision);
code = tsParseOneColumnData(&pTagSchema[colIndex], &sToken, tagVal, pCmd->payload, &sql, false,
pSTableMeterMetaInfo->pMeterMeta->precision);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
if ((pTagSchema[colIndex].type == TSDB_DATA_TYPE_BINARY ||
pTagSchema[colIndex].type == TSDB_DATA_TYPE_NCHAR) && sToken.n > pTagSchema[colIndex].bytes) {
if ((pTagSchema[colIndex].type == TSDB_DATA_TYPE_BINARY || pTagSchema[colIndex].type == TSDB_DATA_TYPE_NCHAR) &&
sToken.n > pTagSchema[colIndex].bytes) {
return tscInvalidSQLErrMsg(pCmd->payload, "string too long", sToken.z);
}
}
@ -888,35 +903,38 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) {
// 2. set the null value for the columns that do not assign values
if (spd.numOfAssignedCols < spd.numOfCols) {
char *ptr = pTag->data;
for (int32_t i = 0; i < spd.numOfCols; ++i) {
if (!spd.hasVal[i]) { // current tag column do not have any value to insert, set it to null
if (!spd.hasVal[i]) { // current tag column do not have any value to insert, set it to null
setNull(ptr, pTagSchema[i].type, pTagSchema[i].bytes);
}
ptr += pTagSchema[i].bytes;
}
}
}
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
}
int32_t ret = setMeterID(pSql, &tableToken, 0);
int32_t ret = setMeterID(pMeterMetaInfo, &tableToken, pSql);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
createTable = true;
code = tscGetMeterMetaEx(pSql, pMeterMetaInfo->name, true);
if (TSDB_CODE_ACTION_IN_PROGRESS == code) return code;
code = tscGetMeterMetaEx(pSql, pMeterMetaInfo, true);
if (TSDB_CODE_ACTION_IN_PROGRESS == code) {
return code;
}
} else {
if (cstart != NULL) {
sql = cstart;
} else {
sql = sToken.z;
}
code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0);
code = tscGetMeterMeta(pSql, pMeterMetaInfo);
}
int32_t len = cend - cstart + 1;
@ -941,6 +959,15 @@ int validateTableName(char *tblName, int len) {
return tscValidateName(&token);
}
static int32_t validateDataSource(SSqlCmd *pCmd, int8_t type, const char *sql) {
if (pCmd->dataSourceType != 0 && pCmd->dataSourceType != type) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sql);
}
pCmd->dataSourceType = type;
return TSDB_CODE_SUCCESS;
}
/**
* usage: insert into table1 values() () table2 values()()
*
@ -950,48 +977,68 @@ int validateTableName(char *tblName, int len) {
* @param pSql
* @return
*/
int doParserInsertSql(SSqlObj *pSql, char *str) {
int doParseInsertSql(SSqlObj *pSql, char *str) {
SSqlCmd *pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_INVALID_SQL;
int32_t totalNum = 0;
SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd);
int32_t totalNum = 0;
int32_t code = TSDB_CODE_SUCCESS;
SMeterMetaInfo *pMeterMetaInfo = NULL;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
assert(pQueryInfo != NULL);
if (pQueryInfo->numOfTables == 0) {
pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pQueryInfo);
} else {
pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
}
if ((code = tscAllocPayload(pCmd, TSDB_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) {
return code;
}
ASSERT(((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList))
assert(((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList))
|| ((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList)));
if ((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList)) {
pSql->pTableHashList = taosInitIntHash(128, POINTER_BYTES, taosHashInt);
pSql->pTableHashList = taosInitHashTable(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false);
pSql->cmd.pDataBlocks = tscCreateBlockArrayList();
if (NULL == pSql->pTableHashList || NULL == pSql->cmd.pDataBlocks) {
code = TSDB_CODE_CLI_OUT_OF_MEMORY;
goto _error_clean;
}
} else {
ASSERT((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList));
assert((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList));
str = pSql->asyncTblPos;
}
tscTrace("%p create data block list for submit data:%p, asyncTblPos:%p, pTableHashList:%p", pSql, pSql->cmd.pDataBlocks, pSql->asyncTblPos, pSql->pTableHashList);
while (1) {
int32_t index = 0;
int32_t index = 0;
SSQLToken sToken = tStrGetToken(str, &index, false, 0, NULL);
if (sToken.n == 0) { // parse file, do not release the STableDataBlock
if (pCmd->isInsertFromFile == 1) {
// no data in the sql string anymore.
if (sToken.n == 0) {
/*
* if the data is from the data file, no data has been generated yet. So, there no data to
* merge or submit, save the file path and parse the file in other routines.
*/
if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) {
goto _clean;
}
if (totalNum > 0) {
break;
} else { // no data in current sql string, error
/*
* if no data has been generated during parsing the sql string, error msg will return
* Otherwise, create the first submit block and submit to virtual node.
*/
if (totalNum == 0) {
code = TSDB_CODE_INVALID_SQL;
goto _error_clean;
} else {
break;
}
}
@ -1003,34 +1050,35 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
goto _error_clean;
}
//TODO refactor
if ((code = setMeterID(pSql, &sToken, 0)) != TSDB_CODE_SUCCESS) {
if ((code = setMeterID(pMeterMetaInfo, &sToken, pSql)) != TSDB_CODE_SUCCESS) {
goto _error_clean;
}
void *fp = pSql->fp;
ptrdiff_t pos = pSql->asyncTblPos - pSql->sqlstr;
if ((code = tscParseSqlForCreateTableOnDemand(&str, pSql)) != TSDB_CODE_SUCCESS) {
if ((code = tscCheckIfCreateTable(&str, pSql)) != TSDB_CODE_SUCCESS) {
/*
* For async insert, after get the metermeta from server, the sql string will not be
* parsed using the new metermeta to avoid the overhead cause by get metermeta data information.
* And during the getMeterMetaCallback function, the sql string will be parsed from the
* interrupted position.
*/
if (fp != NULL) {
if (TSDB_CODE_ACTION_IN_PROGRESS == code) {
tscTrace("async insert and waiting to get meter meta, then continue parse sql from offset: %" PRId64, pos);
return code;
}
// todo add to return
tscError("async insert parse error, code:%d, %s", code, tsError[code]);
pSql->asyncTblPos = NULL;
goto _error_clean; // TODO: should _clean or _error_clean to async flow ????
} else {
/*
* for async insert, the free data block operations, which is tscDestroyBlockArrayList,
* must be executed before launch another threads to get metermeta, since the
* later ops may manipulate SSqlObj through another thread in getMeterMetaCallback function.
*/
goto _error_clean;
}
goto _error_clean; // TODO: should _clean or _error_clean to async flow ????
}
if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) {
code = tscInvalidSQLErrMsg(pCmd->payload, "insert data into super table is not supported", NULL);
goto _error_clean;
}
@ -1038,8 +1086,9 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
index = 0;
sToken = tStrGetToken(str, &index, false, 0, NULL);
str += index;
if (sToken.n == 0) {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE are required", sToken.z);
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE required", sToken.z);
goto _error_clean;
}
@ -1049,13 +1098,8 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
tscSetAssignedColumnInfo(&spd, pSchema, pMeterMetaInfo->pMeterMeta->numOfColumns);
if (pCmd->isInsertFromFile == -1) {
pCmd->isInsertFromFile = 0;
} else {
if (pCmd->isInsertFromFile == 1) {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z);
goto _error_clean;
}
if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
goto _error_clean;
}
/*
@ -1067,13 +1111,8 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
goto _error_clean;
}
} else if (sToken.type == TK_FILE) {
if (pCmd->isInsertFromFile == -1) {
pCmd->isInsertFromFile = 1;
} else {
if (pCmd->isInsertFromFile == 0) {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z);
goto _error_clean;
}
if (validateDataSource(pCmd, DATA_FROM_DATA_FILE, sToken.z) != TSDB_CODE_SUCCESS) {
goto _error_clean;
}
index = 0;
@ -1097,23 +1136,22 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
wordfree(&full_path);
STableDataBlocks *pDataBlock = NULL;
int32_t ret = tscCreateDataBlock(PATH_MAX, pMeterMetaInfo->pMeterMeta->rowSize, sizeof(SShellSubmitBlock),
pMeterMetaInfo->name, &pDataBlock);
SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta;
int32_t ret = tscCreateDataBlock(PATH_MAX, pMeterMeta->rowSize, sizeof(SShellSubmitBlock), pMeterMetaInfo->name,
pMeterMeta, &pDataBlock);
if (ret != TSDB_CODE_SUCCESS) {
goto _error_clean;
}
tscAppendDataBlock(pCmd->pDataBlocks, pDataBlock);
strcpy(pDataBlock->filename, fname);
} else if (sToken.type == TK_LP) {
/* insert into tablename(col1, col2,..., coln) values(v1, v2,... vn); */
SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, 0)->pMeterMeta;
SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0)->pMeterMeta;
SSchema * pSchema = tsGetSchema(pMeterMeta);
if (pCmd->isInsertFromFile == -1) {
pCmd->isInsertFromFile = 0;
} else if (pCmd->isInsertFromFile == 1) {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z);
if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
goto _error_clean;
}
@ -1194,7 +1232,7 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
if (pCmd->numOfParams > 0) {
goto _clean;
}
// submit to more than one vnode
if (pCmd->pDataBlocks->nSize > 0) {
// merge according to vgid
@ -1207,8 +1245,8 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
goto _error_clean;
}
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0);
// set the next sent data vnode index in data block arraylist
pMeterMetaInfo->vnodeIndex = 1;
} else {
@ -1222,14 +1260,16 @@ _error_clean:
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
_clean:
taosCleanUpIntHash(pSql->pTableHashList);
taosCleanUpHashTable(pSql->pTableHashList);
pSql->pTableHashList = NULL;
pSql->asyncTblPos = NULL;
pCmd->isParseFinish = 1;
return code;
}
int tsParseInsertSql(SSqlObj *pSql, char *sql, char *acct, char *db) {
int tsParseInsertSql(SSqlObj *pSql) {
if (!pSql->pTscObj->writeAuth) {
return TSDB_CODE_NO_RIGHTS;
}
@ -1237,37 +1277,36 @@ int tsParseInsertSql(SSqlObj *pSql, char *sql, char *acct, char *db) {
int32_t index = 0;
SSqlCmd *pCmd = &pSql->cmd;
SSQLToken sToken = tStrGetToken(sql, &index, false, 0, NULL);
SSQLToken sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT);
pCmd->import = (sToken.type == TK_IMPORT);
sToken = tStrGetToken(sql, &index, false, 0, NULL);
pCmd->count = 0;
pCmd->command = TSDB_SQL_INSERT;
SQueryInfo *pQueryInfo = NULL;
tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo);
uint16_t type = (sToken.type == TK_INSERT)? TSDB_QUERY_TYPE_INSERT:TSDB_QUERY_TYPE_IMPORT;
TSDB_QUERY_SET_TYPE(pQueryInfo->type, type);
sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
if (sToken.type != TK_INTO) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword INTO is expected", sToken.z);
}
pCmd->count = 0;
pCmd->command = TSDB_SQL_INSERT;
pCmd->isInsertFromFile = -1;
pSql->res.numOfRows = 0;
return doParserInsertSql(pSql, sql + index);
return doParseInsertSql(pSql, pSql->sqlstr + index);
}
int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion) {
int tsParseSql(SSqlObj *pSql, bool multiVnodeInsertion) {
int32_t ret = TSDB_CODE_SUCCESS;
// must before clean the sqlcmd object
tscRemoveAllMeterMetaInfo(&pSql->cmd, false);
if (NULL == pSql->asyncTblPos) {
tscCleanSqlCmd(&pSql->cmd);
} else {
tscTrace("continue parse sql: %s", pSql->asyncTblPos);
}
if (tscIsInsertOrImportData(pSql->sqlstr)) {
/*
* only for async multi-vnode insertion
@ -1282,11 +1321,11 @@ int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion) {
pSql->fp = tscAsyncInsertMultiVnodesProxy;
}
ret = tsParseInsertSql(pSql, pSql->sqlstr, acct, db);
ret = tsParseInsertSql(pSql);
} else {
ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE);
if (TSDB_CODE_SUCCESS != ret) return ret;
SSqlInfo SQLInfo = {0};
tSQLParse(&SQLInfo, pSql->sqlstr);
@ -1309,7 +1348,8 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock
int32_t code = TSDB_CODE_SUCCESS;
SSqlCmd *pCmd = &pSql->cmd;
SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, 0)->pMeterMeta;
assert(pCmd->numOfClause == 1);
SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0)->pMeterMeta;
SShellSubmitBlock *pBlocks = (SShellSubmitBlock *)(pTableDataBlocks->pData);
tsSetBlockInfo(pBlocks, pMeterMeta, numOfRows);
@ -1341,43 +1381,46 @@ static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp, char *tmpTokenBuf) {
int numOfRows = 0;
int32_t code = 0;
int nrows = 0;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta;
assert(pCmd->numOfClause == 1);
int32_t rowSize = pMeterMeta->rowSize;
pCmd->pDataBlocks = tscCreateBlockArrayList();
STableDataBlocks *pTableDataBlock = NULL;
int32_t ret = tscCreateDataBlock(TSDB_PAYLOAD_SIZE, pMeterMeta->rowSize, sizeof(SShellSubmitBlock),
pMeterMetaInfo->name, &pTableDataBlock);
int32_t ret = tscCreateDataBlock(TSDB_PAYLOAD_SIZE, rowSize, sizeof(SShellSubmitBlock),
pMeterMetaInfo->name, pMeterMeta, &pTableDataBlock);
if (ret != TSDB_CODE_SUCCESS) {
return -1;
}
tscAppendDataBlock(pCmd->pDataBlocks, pTableDataBlock);
code = tscAllocateMemIfNeed(pTableDataBlock, rowSize, &maxRows);
if (TSDB_CODE_SUCCESS != code) return -1;
int count = 0;
SParsedDataColInfo spd = {.numOfCols = pMeterMetaInfo->pMeterMeta->numOfColumns};
SSchema * pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta);
SParsedDataColInfo spd = {.numOfCols = pMeterMeta->numOfColumns};
SSchema * pSchema = tsGetSchema(pMeterMeta);
tscSetAssignedColumnInfo(&spd, pSchema, pMeterMetaInfo->pMeterMeta->numOfColumns);
tscSetAssignedColumnInfo(&spd, pSchema, pMeterMeta->numOfColumns);
while ((readLen = getline(&line, &n, fp)) != -1) {
// line[--readLen] = '\0';
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) line[--readLen] = 0;
if (readLen == 0) continue; //fang, <= to ==
if (readLen == 0) continue; // fang, <= to ==
char *lineptr = line;
strtolower(line, line);
len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision, &code, tmpTokenBuf);
if (len <= 0 || pTableDataBlock->numOfParams > 0) {
pSql->res.code = code;
return (-code);
}
pTableDataBlock->size += len;
count++;
@ -1431,11 +1474,13 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) {
}
STableDataBlocks *pDataBlock = NULL;
SMeterMetaInfo * pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo * pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
assert(pCmd->numOfClause == 1);
int32_t code = TSDB_CODE_SUCCESS;
/* the first block has been sent to server in processSQL function */
assert(pCmd->isInsertFromFile != -1 && pMeterMetaInfo->vnodeIndex >= 1 && pCmd->pDataBlocks != NULL);
assert(pMeterMetaInfo->vnodeIndex >= 1 && pCmd->pDataBlocks != NULL);
if (pMeterMetaInfo->vnodeIndex < pCmd->pDataBlocks->nSize) {
SDataBlockList *pDataBlocks = pCmd->pDataBlocks;
@ -1447,7 +1492,8 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) {
}
if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) {
tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex, pDataBlocks->nSize);
tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex,
pDataBlocks->nSize);
continue;
}
@ -1460,17 +1506,19 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) {
}
// multi-vnodes insertion in sync query model
void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) {
void tscProcessMultiVnodesInsertFromFile(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
if (pCmd->command != TSDB_SQL_INSERT) {
return;
}
SMeterMetaInfo * pInfo = tscGetMeterMetaInfo(pCmd, 0);
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
STableDataBlocks *pDataBlock = NULL;
int32_t affected_rows = 0;
assert(pCmd->isInsertFromFile == 1 && pCmd->pDataBlocks != NULL);
assert(pCmd->dataSourceType == DATA_FROM_DATA_FILE && pCmd->pDataBlocks != NULL);
SDataBlockList *pDataBlockList = pCmd->pDataBlocks;
pCmd->pDataBlocks = NULL;
@ -1481,7 +1529,7 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) {
if (pDataBlock == NULL) {
continue;
}
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, TSDB_PAYLOAD_SIZE)) {
tscError("%p failed to malloc when insert file", pSql);
continue;
@ -1496,16 +1544,16 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) {
continue;
}
strncpy(pInfo->name, pDataBlock->meterId, TSDB_METER_ID_LEN);
strncpy(pMeterMetaInfo->name, pDataBlock->meterId, TSDB_METER_ID_LEN);
memset(pDataBlock->pData, 0, pDataBlock->nAllocSize);
int32_t ret = tscGetMeterMeta(pSql, pInfo->name, 0);
int32_t ret = tscGetMeterMeta(pSql, pMeterMetaInfo);
if (ret != TSDB_CODE_SUCCESS) {
tscError("%p get meter meta failed, abort", pSql);
continue;
}
char* tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \"
char *tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
tscError("%p calloc failed", pSql);
continue;
@ -1513,7 +1561,7 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) {
int nrows = tscInsertDataFromFile(pSql, fp, tmpTokenBuf);
free(tmpTokenBuf);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
if (nrows < 0) {

View File

@ -22,7 +22,7 @@
#include "tstrbuild.h"
int tsParseInsertSql(SSqlObj *pSql, char *sql, char *acct, char *db);
int tsParseInsertSql(SSqlObj *pSql);
int taos_query_imp(STscObj* pObj, SSqlObj* pSql);
////////////////////////////////////////////////////////////////////////////////
@ -385,12 +385,11 @@ static int insertStmtAddBatch(STscStmt* stmt) {
}
static int insertStmtPrepare(STscStmt* stmt) {
STscObj* taos = stmt->taos;
SSqlObj *pSql = stmt->pSql;
pSql->cmd.numOfParams = 0;
pSql->cmd.batchSize = 0;
return tsParseInsertSql(pSql, pSql->sqlstr, taos->acctId, taos->db);
return tsParseInsertSql(pSql);
}
static int insertStmtReset(STscStmt* pStmt) {
@ -409,7 +408,7 @@ static int insertStmtReset(STscStmt* pStmt) {
}
pCmd->batchSize = 0;
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
pMeterMetaInfo->vnodeIndex = 0;
return TSDB_CODE_SUCCESS;
}
@ -423,7 +422,8 @@ static int insertStmtExecute(STscStmt* stmt) {
++pCmd->batchSize;
}
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
assert(pCmd->numOfClause == 1);
if (pCmd->pDataBlocks->nSize > 0) {
// merge according to vgid
@ -448,6 +448,8 @@ static int insertStmtExecute(STscStmt* stmt) {
SSqlRes *pRes = &pSql->res;
pRes->numOfRows = 0;
pRes->numOfTotal = 0;
pRes->numOfTotalInCurrentClause = 0;
pRes->qhandle = 0;
pSql->thandle = NULL;

File diff suppressed because it is too large Load Diff

View File

@ -24,7 +24,7 @@
int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pStr) {
void *pParser = ParseAlloc(malloc);
pSQLInfo->validSql = true;
pSQLInfo->valid = true;
int32_t i = 0;
while (1) {
@ -50,12 +50,12 @@ int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pStr) {
}
case TK_ILLEGAL: {
snprintf(pSQLInfo->pzErrMsg, tListLen(pSQLInfo->pzErrMsg), "unrecognized token: \"%s\"", t0.z);
pSQLInfo->validSql = false;
pSQLInfo->valid = false;
goto abort_parse;
}
default:
Parse(pParser, t0.type, t0, pSQLInfo);
if (pSQLInfo->validSql == false) {
if (pSQLInfo->valid == false) {
goto abort_parse;
}
}
@ -554,58 +554,64 @@ tSQLExprListList *tSQLListListAppend(tSQLExprListList *pList, tSQLExprList *pExp
return pList;
}
void tSetInsertSQLElems(SSqlInfo *pInfo, SSQLToken *pName, tSQLExprListList *pList) {
SInsertSQL *pInsert = calloc(1, sizeof(SInsertSQL));
pInsert->name = *pName;
pInsert->pValue = pList;
pInfo->pInsertInfo = pInsert;
pInfo->sqlType = TSQL_INSERT;
void doDestroyQuerySql(SQuerySQL *pQuerySql) {
if (pQuerySql == NULL) {
return;
}
tSQLExprListDestroy(pQuerySql->pSelection);
pQuerySql->pSelection = NULL;
tSQLExprDestroy(pQuerySql->pWhere);
pQuerySql->pWhere = NULL;
tVariantListDestroy(pQuerySql->pSortOrder);
pQuerySql->pSortOrder = NULL;
tVariantListDestroy(pQuerySql->pGroupby);
pQuerySql->pGroupby = NULL;
tVariantListDestroy(pQuerySql->from);
pQuerySql->from = NULL;
tVariantListDestroy(pQuerySql->fillType);
free(pQuerySql);
}
void destroyQuerySql(SQuerySQL *pSql) {
if (pSql == NULL) return;
void destroyAllSelectClause(SSubclauseInfo *pClause) {
if (pClause == NULL || pClause->numOfClause == 0) {
return;
}
tSQLExprListDestroy(pSql->pSelection);
pSql->pSelection = NULL;
tSQLExprDestroy(pSql->pWhere);
pSql->pWhere = NULL;
tVariantListDestroy(pSql->pSortOrder);
pSql->pSortOrder = NULL;
tVariantListDestroy(pSql->pGroupby);
pSql->pGroupby = NULL;
tVariantListDestroy(pSql->from);
pSql->from = NULL;
tVariantListDestroy(pSql->fillType);
free(pSql);
for(int32_t i = 0; i < pClause->numOfClause; ++i) {
SQuerySQL *pQuerySql = pClause->pClause[i];
doDestroyQuerySql(pQuerySql);
}
tfree(pClause->pClause);
}
SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pMetricName,
SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pStableName,
tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type) {
SCreateTableSQL *pCreate = calloc(1, sizeof(SCreateTableSQL));
switch (type) {
case TSQL_CREATE_NORMAL_METER: {
case TSQL_CREATE_TABLE: {
pCreate->colInfo.pColumns = pCols;
assert(pTagVals == NULL && pTags == NULL);
break;
}
case TSQL_CREATE_NORMAL_METRIC: {
case TSQL_CREATE_STABLE: {
pCreate->colInfo.pColumns = pCols;
pCreate->colInfo.pTagColumns = pTags;
assert(pTagVals == NULL && pTags != NULL && pCols != NULL);
break;
}
case TSQL_CREATE_METER_FROM_METRIC: {
case TSQL_CREATE_TABLE_FROM_STABLE: {
pCreate->usingInfo.pTagVals = pTagVals;
pCreate->usingInfo.metricName = *pMetricName;
pCreate->usingInfo.stableName = *pStableName;
break;
}
case TSQL_CREATE_STREAM: {
@ -616,19 +622,24 @@ SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLTo
assert(false);
}
pCreate->type = type;
return pCreate;
}
SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type) {
SAlterTableSQL *pAlterTable = calloc(1, sizeof(SAlterTableSQL));
pAlterTable->name = *pMeterName;
pAlterTable->type = type;
if (type == ALTER_TABLE_ADD_COLUMN || type == ALTER_TABLE_TAGS_ADD) {
if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) {
pAlterTable->pAddColumns = pCols;
assert(pVals == NULL);
} else {
/* ALTER_TABLE_TAGS_CHG, ALTER_TABLE_TAGS_SET, ALTER_TABLE_TAGS_DROP,
* ALTER_TABLE_DROP_COLUMN */
/*
* ALTER_TABLE_TAGS_CHG, ALTER_TABLE_TAGS_SET, ALTER_TABLE_TAGS_DROP,
* ALTER_TABLE_DROP_COLUMN
*/
pAlterTable->varList = pVals;
assert(pCols == NULL);
}
@ -639,27 +650,28 @@ SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tV
void SQLInfoDestroy(SSqlInfo *pInfo) {
if (pInfo == NULL) return;
if (pInfo->sqlType == TSQL_QUERY_METER) {
destroyQuerySql(pInfo->pQueryInfo);
} else if (pInfo->sqlType >= TSQL_CREATE_NORMAL_METER && pInfo->sqlType <= TSQL_CREATE_STREAM) {
if (pInfo->type == TSDB_SQL_SELECT) {
destroyAllSelectClause(&pInfo->subclauseInfo);
} else if (pInfo->type == TSDB_SQL_CREATE_TABLE) {
SCreateTableSQL *pCreateTableInfo = pInfo->pCreateTableInfo;
destroyQuerySql(pCreateTableInfo->pSelect);
doDestroyQuerySql(pCreateTableInfo->pSelect);
tFieldListDestroy(pCreateTableInfo->colInfo.pColumns);
tFieldListDestroy(pCreateTableInfo->colInfo.pTagColumns);
tVariantListDestroy(pCreateTableInfo->usingInfo.pTagVals);
tfree(pInfo->pCreateTableInfo);
} else if (pInfo->sqlType >= ALTER_TABLE_TAGS_ADD && pInfo->sqlType <= ALTER_TABLE_DROP_COLUMN) {
} else if (pInfo->type == TSDB_SQL_ALTER_TABLE) {
tVariantListDestroy(pInfo->pAlterInfo->varList);
tFieldListDestroy(pInfo->pAlterInfo->pAddColumns);
tfree(pInfo->pAlterInfo);
} else {
if (pInfo->pDCLInfo != NULL && pInfo->pDCLInfo->nAlloc > 0) {
free(pInfo->pDCLInfo->a);
}
if (pInfo->sqlType == CREATE_DATABASE) {
if (pInfo->type == TSDB_SQL_CREATE_DB) {
tVariantListDestroy(pInfo->pDCLInfo->dbOpt.keep);
}
@ -667,13 +679,52 @@ void SQLInfoDestroy(SSqlInfo *pInfo) {
}
}
void setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type) {
pInfo->sqlType = type;
pInfo->pCreateTableInfo = pSqlExprInfo;
SSubclauseInfo* setSubclause(SSubclauseInfo* pSubclause, void *pSqlExprInfo) {
if (pSubclause == NULL) {
pSubclause = calloc(1, sizeof(SSubclauseInfo));
}
int32_t newSize = pSubclause->numOfClause + 1;
char* tmp = realloc(pSubclause->pClause, newSize * POINTER_BYTES);
if (tmp == NULL) {
return pSubclause;
}
pSubclause->pClause = (SQuerySQL**) tmp;
pSubclause->pClause[newSize - 1] = pSqlExprInfo;
pSubclause->numOfClause++;
return pSubclause;
}
SSqlInfo* setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type) {
pInfo->type = type;
if (type == TSDB_SQL_SELECT) {
pInfo->subclauseInfo = *(SSubclauseInfo*) pSqlExprInfo;
free(pSqlExprInfo);
} else {
pInfo->pCreateTableInfo = pSqlExprInfo;
}
if (pMeterName != NULL) {
pInfo->pCreateTableInfo->name = *pMeterName;
}
return pInfo;
}
SSubclauseInfo* appendSelectClause(SSubclauseInfo *pQueryInfo, void *pSubclause) {
char* tmp = realloc(pQueryInfo->pClause, (pQueryInfo->numOfClause + 1) * POINTER_BYTES);
if (tmp == NULL) { // out of memory
return pQueryInfo;
}
pQueryInfo->pClause = (SQuerySQL**) tmp;
pQueryInfo->pClause[pQueryInfo->numOfClause++] = pSubclause;
return pQueryInfo;
}
void setCreatedMeterName(SSqlInfo *pInfo, SSQLToken *pMeterName, SSQLToken *pIfNotExists) {
@ -703,7 +754,7 @@ tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SSQLToken *pToken) {
}
void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) {
pInfo->sqlType = type;
pInfo->type = type;
if (nParam == 0) return;
if (pInfo->pDCLInfo == NULL) pInfo->pDCLInfo = (tDCLSQL *)calloc(1, sizeof(tDCLSQL));
@ -718,8 +769,42 @@ void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) {
va_end(va);
}
void setDropDBTableInfo(SSqlInfo *pInfo, int32_t type, SSQLToken* pToken, SSQLToken* existsCheck) {
pInfo->type = type;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
tTokenListAppend(pInfo->pDCLInfo, pToken);
pInfo->pDCLInfo->existsCheck = (existsCheck->n == 1);
}
void setShowOptions(SSqlInfo *pInfo, int32_t type, SSQLToken* prefix, SSQLToken* pPatterns) {
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
pInfo->type = TSDB_SQL_SHOW;
SShowInfo* pShowInfo = &pInfo->pDCLInfo->showOpt;
pShowInfo->showType = type;
if (prefix != NULL && prefix->type != 0) {
pShowInfo->prefix = *prefix;
} else {
pShowInfo->prefix.type = 0;
}
if (pPatterns != NULL && pPatterns->type != 0) {
pShowInfo->pattern = *pPatterns;
} else {
pShowInfo->pattern.type = 0;
}
}
void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBInfo *pDB, SSQLToken *pIgExists) {
pInfo->sqlType = type;
pInfo->type = type;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
@ -731,20 +816,74 @@ void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBI
}
void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken *pPwd, SCreateAcctSQL *pAcctInfo) {
pInfo->sqlType = type;
pInfo->type = type;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
pInfo->pDCLInfo->acctOpt = *pAcctInfo;
tTokenListAppend(pInfo->pDCLInfo, pName);
if (pPwd->n > 0) {
tTokenListAppend(pInfo->pDCLInfo, pPwd);
assert(pName != NULL);
pInfo->pDCLInfo->user.user = *pName;
if (pPwd != NULL) {
pInfo->pDCLInfo->user.passwd = *pPwd;
// pInfo->pDCLInfo->user.hasPasswd = true;
} else {
// pInfo->pDCLInfo->user.hasPasswd = false;
}
}
void setCreateUserSQL(SSqlInfo *pInfo, SSQLToken *pName, SSQLToken *pPasswd) {
pInfo->type = TSDB_SQL_CREATE_USER;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
assert(pName != NULL && pPasswd != NULL);
pInfo->pDCLInfo->user.user = *pName;
pInfo->pDCLInfo->user.passwd = *pPasswd;
}
void setAlterUserSQL(SSqlInfo *pInfo, int16_t type, SSQLToken *pName, SSQLToken* pPwd, SSQLToken *pPrivilege) {
pInfo->type = TSDB_SQL_ALTER_USER;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
assert(pName != NULL);
SUserInfo* pUser = &pInfo->pDCLInfo->user;
pUser->type = type;
pUser->user = *pName;
if (pPwd != NULL) {
pUser->passwd = *pPwd;
// pUser->hasPasswd = true;
} else {
pUser->passwd.type = TSDB_DATA_TYPE_NULL;
}
if (pPrivilege != NULL) {
pUser->privilege = *pPrivilege;
// pUser->hasPrivilege = true;
} else {
pUser->privilege.type = TSDB_DATA_TYPE_NULL;
}
}
void setKillSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *ip) {
pInfo->type = type;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
assert(ip != NULL);
pInfo->pDCLInfo->ip = *ip;
}
void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo) {
pDBInfo->numOfBlocksPerTable = 50;
pDBInfo->compressionLevel = -1;

View File

@ -58,12 +58,13 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu
* the fields and offset attributes in pCmd and pModel may be different due to
* merge requirement. So, the final result in pRes structure is formatted in accordance with the pCmd object.
*/
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
SQLFunctionCtx *pCtx = &pReducer->pCtx[i];
pCtx->aOutputBuf = pReducer->pResultBuf->data + tscFieldInfoGetOffset(pCmd, i) * pReducer->resColModel->maxCapacity;
pCtx->order = pCmd->order.order;
pCtx->functionId = pCmd->exprsInfo.pExprs[i].functionId;
pCtx->aOutputBuf = pReducer->pResultBuf->data + tscFieldInfoGetOffset(pQueryInfo, i) * pReducer->resColModel->maxCapacity;
pCtx->order = pQueryInfo->order.order;
pCtx->functionId = pQueryInfo->exprsInfo.pExprs[i].functionId;
// input buffer hold only one point data
pCtx->aInputElemBuf = pReducer->pTempBuffer->data + pDesc->pSchema->colOffset[i];
@ -72,7 +73,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu
pCtx->inputType = pDesc->pSchema->pFields[i].type;
pCtx->inputBytes = pDesc->pSchema->pFields[i].bytes;
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i);
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i);
// output data format yet comes from pCmd.
pCtx->outputBytes = pField->bytes;
pCtx->outputType = pField->type;
@ -84,15 +85,15 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu
pRes->bytes[i] = pField->bytes;
SSqlExpr *pExpr = tscSqlExprGet(pCmd, i);
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
// for top/bottom function, the output of timestamp is the first column
int32_t functionId = pExpr->functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
pCtx->ptsOutputBuf = pReducer->pCtx[0].aOutputBuf;
pCtx->param[2].i64Key = pCmd->order.order;
pCtx->param[2].i64Key = pQueryInfo->order.order;
pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT;
pCtx->param[1].i64Key = pCmd->order.orderColId;
pCtx->param[1].i64Key = pQueryInfo->order.orderColId;
}
SResultInfo *pResInfo = &pReducer->pResInfo[i];
@ -105,11 +106,11 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SSqlRes *pRes, SLocalReducer *pRedu
int16_t n = 0;
int16_t tagLen = 0;
SQLFunctionCtx** pTagCtx = calloc(pCmd->fieldsInfo.numOfOutputCols, POINTER_BYTES);
SQLFunctionCtx** pTagCtx = calloc(pQueryInfo->fieldsInfo.numOfOutputCols, POINTER_BYTES);
SQLFunctionCtx* pCtx = NULL;
for(int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
SSqlExpr *pExpr = tscSqlExprGet(pCmd, i);
for(int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr->functionId == TSDB_FUNC_TAG_DUMMY || pExpr->functionId == TSDB_FUNC_TS_DUMMY) {
tagLen += pExpr->resBytes;
pTagCtx[n++] = &pReducer->pCtx[i];
@ -214,7 +215,9 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
#ifdef _DEBUG_VIEW
printf("load data page into mem for build loser tree: %" PRIu64 " rows\n", pDS->filePage.numOfElems);
SSrcColumnInfo colInfo[256] = {0};
tscGetSrcColumnInfo(colInfo, pCmd);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
tscGetSrcColumnInfo(colInfo, pQueryInfo);
tColModelDisplayEx(pDesc->pSchema, pDS->filePage.data, pDS->filePage.numOfElems, pMemBuffer[0]->numOfElemsPerPage,
colInfo);
@ -238,7 +241,9 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
param->pLocalData = pReducer->pLocalDataSrc;
param->pDesc = pReducer->pDesc;
param->numOfElems = pReducer->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage;
param->groupOrderType = pCmd->groupbyExpr.orderType;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
param->groupOrderType = pQueryInfo->groupbyExpr.orderType;
pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator);
if (pReducer->pLoserTree == NULL || pRes->code != 0) {
@ -247,12 +252,12 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
// the input data format follows the old format, but output in a new format.
// so, all the input must be parsed as old format
pReducer->pCtx = (SQLFunctionCtx *)calloc(pCmd->fieldsInfo.numOfOutputCols, sizeof(SQLFunctionCtx));
pReducer->pCtx = (SQLFunctionCtx *)calloc(pQueryInfo->fieldsInfo.numOfOutputCols, sizeof(SQLFunctionCtx));
pReducer->rowSize = pMemBuffer[0]->nElemSize;
tscRestoreSQLFunctionForMetricQuery(pCmd);
tscFieldInfoCalOffset(pCmd);
tscRestoreSQLFunctionForMetricQuery(pQueryInfo);
tscFieldInfoCalOffset(pQueryInfo);
if (pReducer->rowSize > pMemBuffer[0]->nPageSize) {
assert(false); // todo fixed row size is larger than the minimum page size;
@ -272,7 +277,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->nResultBufSize = pMemBuffer[0]->nPageSize * 16;
pReducer->pResultBuf = (tFilePage *)calloc(1, pReducer->nResultBufSize + sizeof(tFilePage));
int32_t finalRowLength = tscGetResRowLength(pCmd);
int32_t finalRowLength = tscGetResRowLength(pQueryInfo);
pReducer->resColModel = finalmodel;
pReducer->resColModel->maxCapacity = pReducer->nResultBufSize / finalRowLength;
assert(finalRowLength <= pReducer->rowSize);
@ -294,33 +299,40 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
}
pReducer->pTempBuffer->numOfElems = 0;
pReducer->pResInfo = calloc((size_t)pCmd->fieldsInfo.numOfOutputCols, sizeof(SResultInfo));
pReducer->pResInfo = calloc((size_t)pQueryInfo->fieldsInfo.numOfOutputCols, sizeof(SResultInfo));
tscCreateResPointerInfo(pCmd, pRes);
tscCreateResPointerInfo(pRes, pQueryInfo);
tscInitSqlContext(pCmd, pRes, pReducer, pDesc);
// we change the maxCapacity of schema to denote that there is only one row in temp buffer
pReducer->pDesc->pSchema->maxCapacity = 1;
pReducer->offset = pCmd->limit.offset;
//restore the limitation value at the last stage
if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
pQueryInfo->limit.limit = pQueryInfo->clauseLimit;
pQueryInfo->limit.offset = pQueryInfo->prjOffset;
}
pReducer->offset = pQueryInfo->limit.offset;
pRes->pLocalReducer = pReducer;
pRes->numOfGroups = 0;
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
int16_t prec = pMeterMetaInfo->pMeterMeta->precision;
int64_t stime = (pCmd->stime < pCmd->etime) ? pCmd->stime : pCmd->etime;
int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, prec);
int64_t stime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->stime : pQueryInfo->etime;
int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, prec);
SInterpolationInfo *pInterpoInfo = &pReducer->interpolationInfo;
taosInitInterpoInfo(pInterpoInfo, pCmd->order.order, revisedSTime, pCmd->groupbyExpr.numOfGroupCols,
taosInitInterpoInfo(pInterpoInfo, pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
pReducer->rowSize);
int32_t startIndex = pCmd->fieldsInfo.numOfOutputCols - pCmd->groupbyExpr.numOfGroupCols;
int32_t startIndex = pQueryInfo->fieldsInfo.numOfOutputCols - pQueryInfo->groupbyExpr.numOfGroupCols;
if (pCmd->groupbyExpr.numOfGroupCols > 0) {
pInterpoInfo->pTags[0] = (char *)pInterpoInfo->pTags + POINTER_BYTES * pCmd->groupbyExpr.numOfGroupCols;
for (int32_t i = 1; i < pCmd->groupbyExpr.numOfGroupCols; ++i) {
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
pInterpoInfo->pTags[0] = (char *)pInterpoInfo->pTags + POINTER_BYTES * pQueryInfo->groupbyExpr.numOfGroupCols;
for (int32_t i = 1; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) {
pInterpoInfo->pTags[i] = pReducer->resColModel->pFields[startIndex + i - 1].bytes + pInterpoInfo->pTags[i - 1];
}
} else {
@ -429,7 +441,8 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
}
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// there is no more result, so we release all allocated resource
SLocalReducer *pLocalReducer = (SLocalReducer*)atomic_exchange_ptr(&pRes->pLocalReducer, NULL);
if (pLocalReducer != NULL) {
@ -443,15 +456,15 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
taosDestoryInterpoInfo(&pLocalReducer->interpolationInfo);
if (pLocalReducer->pCtx != NULL) {
for(int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
for(int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i];
tVariantDestroy(&pCtx->tag);
tVariantDestroy(&pCtx->tag);
if (pCtx->tagInfo.pTagCtxList != NULL) {
tfree(pCtx->tagInfo.pTagCtxList);
}
}
tfree(pLocalReducer->pCtx);
}
@ -462,7 +475,7 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
tfree(pLocalReducer->pResultBuf);
if (pLocalReducer->pResInfo != NULL) {
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
tfree(pLocalReducer->pResInfo[i].interResultBuf);
}
@ -497,12 +510,14 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCmd, tColModel *pModel) {
int32_t numOfGroupByCols = 0;
if (pCmd->groupbyExpr.numOfGroupCols > 0) {
numOfGroupByCols = pCmd->groupbyExpr.numOfGroupCols;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
numOfGroupByCols = pQueryInfo->groupbyExpr.numOfGroupCols;
}
// primary timestamp column is involved in final result
if (pCmd->nAggTimeInterval != 0) {
if (pQueryInfo->nAggTimeInterval != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
numOfGroupByCols++;
}
@ -512,20 +527,20 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
}
if (numOfGroupByCols > 0) {
int32_t startCols = pCmd->fieldsInfo.numOfOutputCols - pCmd->groupbyExpr.numOfGroupCols;
int32_t startCols = pQueryInfo->fieldsInfo.numOfOutputCols - pQueryInfo->groupbyExpr.numOfGroupCols;
// tags value locate at the last columns
for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) {
for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) {
orderIdx[i] = startCols++;
}
if (pCmd->nAggTimeInterval != 0) {
if (pQueryInfo->nAggTimeInterval != 0) {
// the first column is the timestamp, handles queries like "interval(10m) group by tags"
orderIdx[numOfGroupByCols - 1] = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
}
*pOrderDesc = tOrderDesCreate(orderIdx, numOfGroupByCols, pModel, pCmd->order.order);
*pOrderDesc = tOrderDesCreate(orderIdx, numOfGroupByCols, pModel, pQueryInfo->order.order);
tfree(orderIdx);
if (*pOrderDesc == NULL) {
@ -536,9 +551,17 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
}
bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage *tmpBuffer) {
int16_t functionId = tscSqlExprGet(pCmd, 0)->functionId;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
int16_t functionId = tscSqlExprGet(pQueryInfo, 0)->functionId;
// disable merge procedure for column projection query
assert(functionId != TSDB_FUNC_ARITHM);
if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
return true;
}
if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) {
return false;
}
@ -553,10 +576,10 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
if (pOrderDesc->orderIdx.pData[numOfCols - 1] == PRIMARYKEY_TIMESTAMP_COL_INDEX) { //<= 0
// super table interval query
assert(pCmd->nAggTimeInterval > 0);
assert(pQueryInfo->nAggTimeInterval > 0);
pOrderDesc->orderIdx.numOfOrderedCols -= 1;
} else { // simple group by query
assert(pCmd->nAggTimeInterval == 0);
assert(pQueryInfo->nAggTimeInterval == 0);
}
// only one row exists
@ -575,7 +598,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
tColModel *pModel = NULL;
*pFinalModel = NULL;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
(*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * pMeterMetaInfo->pMetricMeta->numOfVnodes);
if (*pMemBuffer == NULL) {
@ -584,7 +608,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
return pRes->code;
}
pSchema = (SSchema *)calloc(1, sizeof(SSchema) * pCmd->fieldsInfo.numOfOutputCols);
pSchema = (SSchema *)calloc(1, sizeof(SSchema) * pQueryInfo->fieldsInfo.numOfOutputCols);
if (pSchema == NULL) {
tscError("%p failed to allocate memory", pSql);
pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
@ -592,8 +616,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
}
int32_t rlen = 0;
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
SSqlExpr *pExpr = tscSqlExprGet(pCmd, i);
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
pSchema[i].bytes = pExpr->resBytes;
pSchema[i].type = pExpr->resType;
@ -602,9 +626,11 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
}
int32_t capacity = 0;
if (0 != rlen) capacity = nBufferSizes / rlen;
if (rlen != 0) {
capacity = nBufferSizes / rlen;
}
pModel = tColModelCreate(pSchema, pCmd->fieldsInfo.numOfOutputCols, capacity);
pModel = tColModelCreate(pSchema, pQueryInfo->fieldsInfo.numOfOutputCols, capacity);
for (int32_t i = 0; i < pMeterMetaInfo->pMetricMeta->numOfVnodes; ++i) {
char tmpPath[512] = {0};
@ -620,16 +646,16 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
return pRes->code;
}
memset(pSchema, 0, sizeof(SSchema) * pCmd->fieldsInfo.numOfOutputCols);
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i);
memset(pSchema, 0, sizeof(SSchema) * pQueryInfo->fieldsInfo.numOfOutputCols);
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i);
pSchema[i].type = pField->type;
pSchema[i].bytes = pField->bytes;
strcpy(pSchema[i].name, pField->name);
}
*pFinalModel = tColModelCreate(pSchema, pCmd->fieldsInfo.numOfOutputCols, capacity);
*pFinalModel = tColModelCreate(pSchema, pQueryInfo->fieldsInfo.numOfOutputCols, capacity);
tfree(pSchema);
return TSDB_CODE_SUCCESS;
@ -719,15 +745,15 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
}
}
void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SSqlCmd *pCmd, SInterpolationInfo *pInterpoInfo) {
void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo, SInterpolationInfo *pInterpoInfo) {
// discard following dataset in the same group and reset the interpolation information
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
int16_t prec = pMeterMetaInfo->pMeterMeta->precision;
int64_t stime = (pCmd->stime < pCmd->etime) ? pCmd->stime : pCmd->etime;
int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, prec);
int64_t stime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->stime : pQueryInfo->etime;
int64_t revisedSTime = taosGetIntervalStartTimestamp(stime, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, prec);
taosInitInterpoInfo(pInterpoInfo, pCmd->order.order, revisedSTime, pCmd->groupbyExpr.numOfGroupCols,
taosInitInterpoInfo(pInterpoInfo, pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
pLocalReducer->rowSize);
pLocalReducer->discard = true;
@ -738,11 +764,12 @@ void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SSqlCmd *pC
}
// todo merge with following function
static void reversedCopyResultToDstBuf(SSqlCmd *pCmd, SSqlRes *pRes, tFilePage *pFinalDataPage) {
for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i);
static void reversedCopyResultToDstBuf(SQueryInfo* pQueryInfo, SSqlRes *pRes, tFilePage *pFinalDataPage) {
for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i);
int32_t offset = tscFieldInfoGetOffset(pCmd, i);
int32_t offset = tscFieldInfoGetOffset(pQueryInfo, i);
char * src = pFinalDataPage->data + (pRes->numOfRows - 1) * pField->bytes + pRes->numOfRows * offset;
char * dst = pRes->data + pRes->numOfRows * offset;
@ -754,12 +781,11 @@ static void reversedCopyResultToDstBuf(SSqlCmd *pCmd, SSqlRes *pRes, tFilePage *
}
}
static void reversedCopyFromInterpolationToDstBuf(SSqlCmd *pCmd, SSqlRes *pRes, tFilePage **pResPages,
SLocalReducer *pLocalReducer) {
for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i);
static void reversedCopyFromInterpolationToDstBuf(SQueryInfo* pQueryInfo, SSqlRes *pRes, tFilePage **pResPages, SLocalReducer *pLocalReducer) {
for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i);
int32_t offset = tscFieldInfoGetOffset(pCmd, i);
int32_t offset = tscFieldInfoGetOffset(pQueryInfo, i);
assert(offset == pLocalReducer->resColModel->colOffset[i]);
char *src = pResPages[i]->data + (pRes->numOfRows - 1) * pField->bytes;
@ -781,7 +807,8 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo
SSqlCmd * pCmd = &pSql->cmd;
SSqlRes * pRes = &pSql->res;
tFilePage *pFinalDataPage = pLocalReducer->pResultBuf;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (pRes->pLocalReducer != pLocalReducer) {
/*
* Release the SSqlObj is called, and it is int destroying function invoked by other thread.
@ -791,111 +818,112 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo
assert(pRes->pLocalReducer == NULL);
}
if (pCmd->nAggTimeInterval == 0 || pCmd->interpoType == TSDB_INTERPO_NONE) {
if (pQueryInfo->nAggTimeInterval == 0 || pQueryInfo->interpoType == TSDB_INTERPO_NONE) {
// no interval query, no interpolation
pRes->data = pLocalReducer->pFinalRes;
pRes->numOfRows = pFinalDataPage->numOfElems;
pRes->numOfTotal += pRes->numOfRows;
pRes->numOfTotalInCurrentClause += pRes->numOfRows;
if (pCmd->limit.offset > 0) {
if (pCmd->limit.offset < pRes->numOfRows) {
if (pQueryInfo->limit.offset > 0) {
if (pQueryInfo->limit.offset < pRes->numOfRows) {
int32_t prevSize = pFinalDataPage->numOfElems;
tColModelErase(pLocalReducer->resColModel, pFinalDataPage, prevSize, 0, pCmd->limit.offset - 1);
tColModelErase(pLocalReducer->resColModel, pFinalDataPage, prevSize, 0, pQueryInfo->limit.offset - 1);
/* remove the hole in column model */
tColModelCompact(pLocalReducer->resColModel, pFinalDataPage, prevSize);
pRes->numOfRows -= pCmd->limit.offset;
pRes->numOfTotal -= pCmd->limit.offset;
pCmd->limit.offset = 0;
pRes->numOfRows -= pQueryInfo->limit.offset;
pRes->numOfTotalInCurrentClause -= pQueryInfo->limit.offset;
pQueryInfo->limit.offset = 0;
} else {
pCmd->limit.offset -= pRes->numOfRows;
pQueryInfo->limit.offset -= pRes->numOfRows;
pRes->numOfRows = 0;
pRes->numOfTotal = 0;
pRes->numOfTotalInCurrentClause = 0;
}
}
if (pCmd->limit.limit >= 0 && pRes->numOfTotal > pCmd->limit.limit) {
if (pQueryInfo->limit.limit >= 0 && pRes->numOfTotalInCurrentClause > pQueryInfo->limit.limit) {
/* impose the limitation of output rows on the final result */
int32_t prevSize = pFinalDataPage->numOfElems;
int32_t overFlow = pRes->numOfTotal - pCmd->limit.limit;
int32_t overFlow = pRes->numOfTotalInCurrentClause - pQueryInfo->limit.limit;
assert(overFlow < pRes->numOfRows);
pRes->numOfTotal = pCmd->limit.limit;
pRes->numOfTotalInCurrentClause = pQueryInfo->limit.limit;
pRes->numOfRows -= overFlow;
pFinalDataPage->numOfElems -= overFlow;
tColModelCompact(pLocalReducer->resColModel, pFinalDataPage, prevSize);
/* set remain data to be discarded, and reset the interpolation information */
savePrevRecordAndSetupInterpoInfo(pLocalReducer, pCmd, &pLocalReducer->interpolationInfo);
savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, &pLocalReducer->interpolationInfo);
}
int32_t rowSize = tscGetResRowLength(pCmd);
int32_t rowSize = tscGetResRowLength(pQueryInfo);
// handle the descend order output
if (pCmd->order.order == TSQL_SO_ASC) {
// if (pQueryInfo->order.order == TSQL_SO_ASC) {
memcpy(pRes->data, pFinalDataPage->data, pRes->numOfRows * rowSize);
} else {
reversedCopyResultToDstBuf(pCmd, pRes, pFinalDataPage);
}
// } else {
// reversedCopyResultToDstBuf(pQueryInfo, pRes, pFinalDataPage);
// }
pFinalDataPage->numOfElems = 0;
return;
}
int64_t * pPrimaryKeys = (int64_t *)pLocalReducer->pBufForInterpo;
int64_t *pPrimaryKeys = (int64_t *)pLocalReducer->pBufForInterpo;
SInterpolationInfo *pInterpoInfo = &pLocalReducer->interpolationInfo;
int64_t actualETime = (pCmd->stime < pCmd->etime) ? pCmd->etime : pCmd->stime;
int64_t actualETime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->etime : pQueryInfo->stime;
tFilePage **pResPages = malloc(POINTER_BYTES * pCmd->fieldsInfo.numOfOutputCols);
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i);
tFilePage **pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutputCols);
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i);
pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalReducer->resColModel->maxCapacity);
}
char ** srcData = (char **)malloc((POINTER_BYTES + sizeof(int32_t)) * pCmd->fieldsInfo.numOfOutputCols);
int32_t *functions = (int32_t *)((char *)srcData + pCmd->fieldsInfo.numOfOutputCols * sizeof(void *));
char ** srcData = (char **)malloc((POINTER_BYTES + sizeof(int32_t)) * pQueryInfo->fieldsInfo.numOfOutputCols);
int32_t *functions = (int32_t *)((char *)srcData + pQueryInfo->fieldsInfo.numOfOutputCols * sizeof(void *));
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
srcData[i] = pLocalReducer->pBufForInterpo + tscFieldInfoGetOffset(pCmd, i) * pInterpoInfo->numOfRawDataInRows;
functions[i] = tscSqlExprGet(pCmd, i)->functionId;
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
srcData[i] = pLocalReducer->pBufForInterpo + tscFieldInfoGetOffset(pQueryInfo, i) * pInterpoInfo->numOfRawDataInRows;
functions[i] = tscSqlExprGet(pQueryInfo, i)->functionId;
}
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
int8_t precision = pMeterMetaInfo->pMeterMeta->precision;
while (1) {
int32_t remains = taosNumOfRemainPoints(pInterpoInfo);
TSKEY etime = taosGetRevisedEndKey(actualETime, pCmd->order.order, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit,
TSKEY etime = taosGetRevisedEndKey(actualETime, pQueryInfo->order.order, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit,
precision);
int32_t nrows = taosGetNumOfResultWithInterpo(pInterpoInfo, pPrimaryKeys, remains, pCmd->nAggTimeInterval, etime,
int32_t nrows = taosGetNumOfResultWithInterpo(pInterpoInfo, pPrimaryKeys, remains, pQueryInfo->nAggTimeInterval, etime,
pLocalReducer->resColModel->maxCapacity);
int32_t newRows = taosDoInterpoResult(pInterpoInfo, pCmd->interpoType, pResPages, remains, nrows,
pCmd->nAggTimeInterval, pPrimaryKeys, pLocalReducer->resColModel, srcData,
pCmd->defaultVal, functions, pLocalReducer->resColModel->maxCapacity);
int32_t newRows = taosDoInterpoResult(pInterpoInfo, pQueryInfo->interpoType, pResPages, remains, nrows,
pQueryInfo->nAggTimeInterval, pPrimaryKeys, pLocalReducer->resColModel, srcData,
pQueryInfo->defaultVal, functions, pLocalReducer->resColModel->maxCapacity);
assert(newRows <= nrows);
if (pCmd->limit.offset < newRows) {
newRows -= pCmd->limit.offset;
if (pQueryInfo->limit.offset < newRows) {
newRows -= pQueryInfo->limit.offset;
if (pCmd->limit.offset > 0) {
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i);
memmove(pResPages[i]->data, pResPages[i]->data + pField->bytes * pCmd->limit.offset, newRows * pField->bytes);
if (pQueryInfo->limit.offset > 0) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i);
memmove(pResPages[i]->data, pResPages[i]->data + pField->bytes * pQueryInfo->limit.offset, newRows * pField->bytes);
}
}
pRes->data = pLocalReducer->pFinalRes;
pRes->numOfRows = newRows;
pRes->numOfTotal += newRows;
pRes->numOfTotalInCurrentClause += newRows;
pCmd->limit.offset = 0;
pQueryInfo->limit.offset = 0;
break;
} else {
pCmd->limit.offset -= newRows;
pQueryInfo->limit.offset -= newRows;
pRes->numOfRows = 0;
int32_t rpoints = taosNumOfRemainPoints(pInterpoInfo);
@ -907,7 +935,7 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo
/* all output for current group are completed */
int32_t totalRemainRows =
taosGetNumOfResWithoutLimit(pInterpoInfo, pPrimaryKeys, rpoints, pCmd->nAggTimeInterval, actualETime);
taosGetNumOfResWithoutLimit(pInterpoInfo, pPrimaryKeys, rpoints, pQueryInfo->nAggTimeInterval, actualETime);
if (totalRemainRows <= 0) {
break;
}
@ -916,33 +944,33 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo
}
if (pRes->numOfRows > 0) {
if (pCmd->limit.limit >= 0 && pRes->numOfTotal > pCmd->limit.limit) {
int32_t overFlow = pRes->numOfTotal - pCmd->limit.limit;
if (pQueryInfo->limit.limit >= 0 && pRes->numOfTotalInCurrentClause > pQueryInfo->limit.limit) {
int32_t overFlow = pRes->numOfTotalInCurrentClause - pQueryInfo->limit.limit;
pRes->numOfRows -= overFlow;
assert(pRes->numOfRows >= 0);
pRes->numOfTotal = pCmd->limit.limit;
pRes->numOfTotalInCurrentClause = pQueryInfo->limit.limit;
pFinalDataPage->numOfElems -= overFlow;
/* set remain data to be discarded, and reset the interpolation information */
savePrevRecordAndSetupInterpoInfo(pLocalReducer, pCmd, pInterpoInfo);
savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, pInterpoInfo);
}
if (pCmd->order.order == TSQL_SO_ASC) {
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i);
if (pQueryInfo->order.order == TSQL_SO_ASC) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i);
memcpy(pRes->data + pLocalReducer->resColModel->colOffset[i] * pRes->numOfRows, pResPages[i]->data,
pField->bytes * pRes->numOfRows);
}
} else {
reversedCopyFromInterpolationToDstBuf(pCmd, pRes, pResPages, pLocalReducer);
reversedCopyFromInterpolationToDstBuf(pQueryInfo, pRes, pResPages, pLocalReducer);
}
}
pFinalDataPage->numOfElems = 0;
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
tfree(pResPages[i]);
}
tfree(pResPages);
@ -966,8 +994,10 @@ static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer)
static void doExecuteSecondaryMerge(SSqlCmd* pCmd, SLocalReducer *pLocalReducer, bool needInit) {
// the tag columns need to be set before all functions execution
for(int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) {
SSqlExpr * pExpr = tscSqlExprGet(pCmd, j);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
for(int32_t j = 0; j < pQueryInfo->fieldsInfo.numOfOutputCols; ++j) {
SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, j);
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[j];
tVariantAssign(&pCtx->param[0], &pExpr->param[0]);
@ -986,8 +1016,8 @@ static void doExecuteSecondaryMerge(SSqlCmd* pCmd, SLocalReducer *pLocalReducer,
}
}
for (int32_t j = 0; j < pCmd->fieldsInfo.numOfOutputCols; ++j) {
int32_t functionId = tscSqlExprGet(pCmd, j)->functionId;
for (int32_t j = 0; j < pQueryInfo->fieldsInfo.numOfOutputCols; ++j) {
int32_t functionId = tscSqlExprGet(pQueryInfo, j)->functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
@ -1004,11 +1034,11 @@ static void handleUnprocessedRow(SSqlCmd* pCmd, SLocalReducer *pLocalReducer, tF
}
}
static int64_t getNumOfResultLocal(SSqlCmd *pCmd, SQLFunctionCtx *pCtx) {
static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx) {
int64_t maxOutput = 0;
for (int32_t j = 0; j < pCmd->exprsInfo.numOfExprs; ++j) {
int32_t functionId = tscSqlExprGet(pCmd, j)->functionId;
for (int32_t j = 0; j < pQueryInfo->exprsInfo.numOfExprs; ++j) {
int32_t functionId = tscSqlExprGet(pQueryInfo, j)->functionId;
/*
* ts, tag, tagprj function can not decide the output number of current query
@ -1031,10 +1061,10 @@ static int64_t getNumOfResultLocal(SSqlCmd *pCmd, SQLFunctionCtx *pCtx) {
* filled with the same result, which is the tags, specified in group by clause
*
*/
static void fillMultiRowsOfTagsVal(SSqlCmd *pCmd, int32_t numOfRes, SLocalReducer *pLocalReducer) {
static void fillMultiRowsOfTagsVal(SQueryInfo* pQueryInfo, int32_t numOfRes, SLocalReducer *pLocalReducer) {
int32_t maxBufSize = 0; // find the max tags column length to prepare the buffer
for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pCmd, k);
for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k);
if (maxBufSize < pExpr->resBytes && pExpr->functionId == TSDB_FUNC_TAG) {
maxBufSize = pExpr->resBytes;
}
@ -1043,8 +1073,8 @@ static void fillMultiRowsOfTagsVal(SSqlCmd *pCmd, int32_t numOfRes, SLocalReduce
assert(maxBufSize >= 0);
char *buf = malloc((size_t) maxBufSize);
for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pCmd, k);
for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k);
if (pExpr->functionId != TSDB_FUNC_TAG) {
continue;
}
@ -1064,9 +1094,9 @@ static void fillMultiRowsOfTagsVal(SSqlCmd *pCmd, int32_t numOfRes, SLocalReduce
free(buf);
}
int32_t finalizeRes(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) {
for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pCmd, k);
int32_t finalizeRes(SQueryInfo* pQueryInfo, SLocalReducer *pLocalReducer) {
for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k);
aAggs[pExpr->functionId].xFinalize(&pLocalReducer->pCtx[k]);
// allow to re-initialize for the next round
@ -1075,10 +1105,10 @@ int32_t finalizeRes(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) {
pLocalReducer->hasPrevRow = false;
int32_t numOfRes = (int32_t)getNumOfResultLocal(pCmd, pLocalReducer->pCtx);
int32_t numOfRes = (int32_t)getNumOfResultLocal(pQueryInfo, pLocalReducer->pCtx);
pLocalReducer->pResultBuf->numOfElems += numOfRes;
fillMultiRowsOfTagsVal(pCmd, numOfRes, pLocalReducer);
fillMultiRowsOfTagsVal(pQueryInfo, numOfRes, pLocalReducer);
return numOfRes;
}
@ -1089,9 +1119,9 @@ int32_t finalizeRes(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) {
* results generated by simple aggregation function, we merge them all into one points
* *Exception*: column projection query, required no merge procedure
*/
bool needToMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
bool needToMerge(SQueryInfo* pQueryInfo, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
int32_t ret = 0; // merge all result by default
int16_t functionId = tscSqlExprGet(pCmd, 0)->functionId;
int16_t functionId = tscSqlExprGet(pQueryInfo, 0)->functionId;
if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) { // column projection query
ret = 1; // disable merge procedure
@ -1111,24 +1141,25 @@ bool needToMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, tFilePage *tmpBuff
return (ret == 0);
}
static bool reachGroupResultLimit(SSqlCmd *pCmd, SSqlRes *pRes) {
return (pRes->numOfGroups >= pCmd->slimit.limit && pCmd->slimit.limit >= 0);
static bool reachGroupResultLimit(SQueryInfo* pQueryInfo, SSqlRes *pRes) {
return (pRes->numOfGroups >= pQueryInfo->slimit.limit && pQueryInfo->slimit.limit >= 0);
}
static bool saveGroupResultInfo(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pRes->numOfGroups += 1;
// the output group is limited by the slimit clause
if (reachGroupResultLimit(pCmd, pRes)) {
if (reachGroupResultLimit(pQueryInfo, pRes)) {
return true;
}
// pRes->pGroupRec = realloc(pRes->pGroupRec, pRes->numOfGroups*sizeof(SResRec));
// pRes->pGroupRec[pRes->numOfGroups-1].numOfRows = pRes->numOfRows;
// pRes->pGroupRec[pRes->numOfGroups-1].numOfTotal = pRes->numOfTotal;
// pRes->pGroupRec[pRes->numOfGroups-1].numOfTotalInCurrentClause = pRes->numOfTotalInCurrentClause;
return false;
}
@ -1143,6 +1174,8 @@ static bool saveGroupResultInfo(SSqlObj *pSql) {
bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCurrentGroupRes) {
SSqlCmd * pCmd = &pSql->cmd;
SSqlRes * pRes = &pSql->res;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
tFilePage *pResBuf = pLocalReducer->pResultBuf;
tColModel *pModel = pLocalReducer->resColModel;
@ -1152,9 +1185,9 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no
* ignore the output of the current group since this group is skipped by user
* We set the numOfRows to be 0 and discard the possible remain results.
*/
if (pCmd->slimit.offset > 0) {
if (pQueryInfo->slimit.offset > 0) {
pRes->numOfRows = 0;
pCmd->slimit.offset -= 1;
pQueryInfo->slimit.offset -= 1;
pLocalReducer->discard = !noMoreCurrentGroupRes;
return false;
}
@ -1168,24 +1201,24 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no
#endif
SInterpolationInfo *pInterpoInfo = &pLocalReducer->interpolationInfo;
int32_t startIndex = pCmd->fieldsInfo.numOfOutputCols - pCmd->groupbyExpr.numOfGroupCols;
int32_t startIndex = pQueryInfo->fieldsInfo.numOfOutputCols - pQueryInfo->groupbyExpr.numOfGroupCols;
for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) {
for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) {
memcpy(pInterpoInfo->pTags[i],
pLocalReducer->pBufForInterpo + pModel->colOffset[startIndex + i] * pResBuf->numOfElems,
pModel->pFields[startIndex + i].bytes);
}
taosInterpoSetStartInfo(&pLocalReducer->interpolationInfo, pResBuf->numOfElems, pCmd->interpoType);
taosInterpoSetStartInfo(&pLocalReducer->interpolationInfo, pResBuf->numOfElems, pQueryInfo->interpoType);
doInterpolateResult(pSql, pLocalReducer, noMoreCurrentGroupRes);
return true;
}
void resetOutputBuf(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { // reset output buffer to the beginning
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
void resetOutputBuf(SQueryInfo* pQueryInfo, SLocalReducer *pLocalReducer) { // reset output buffer to the beginning
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
pLocalReducer->pCtx[i].aOutputBuf =
pLocalReducer->pResultBuf->data + tscFieldInfoGetOffset(pCmd, i) * pLocalReducer->resColModel->maxCapacity;
pLocalReducer->pResultBuf->data + tscFieldInfoGetOffset(pQueryInfo, i) * pLocalReducer->resColModel->maxCapacity;
}
memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage));
@ -1194,18 +1227,21 @@ void resetOutputBuf(SSqlCmd *pCmd, SLocalReducer *pLocalReducer) { // reset out
static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer *pLocalReducer) {
// In handling data in other groups, we need to reset the interpolation information for a new group data
pRes->numOfRows = 0;
pRes->numOfTotal = 0;
pCmd->limit.offset = pLocalReducer->offset;
pRes->numOfTotalInCurrentClause = 0;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pQueryInfo->limit.offset = pLocalReducer->offset;
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
int16_t precision = pMeterMetaInfo->pMeterMeta->precision;
// for group result interpolation, do not return if not data is generated
if (pCmd->interpoType != TSDB_INTERPO_NONE) {
int64_t stime = (pCmd->stime < pCmd->etime) ? pCmd->stime : pCmd->etime;
int64_t newTime = taosGetIntervalStartTimestamp(stime, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, precision);
if (pQueryInfo->interpoType != TSDB_INTERPO_NONE) {
int64_t stime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->stime : pQueryInfo->etime;
int64_t newTime = taosGetIntervalStartTimestamp(stime, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, precision);
taosInitInterpoInfo(&pLocalReducer->interpolationInfo, pCmd->order.order, newTime, pCmd->groupbyExpr.numOfGroupCols,
taosInitInterpoInfo(&pLocalReducer->interpolationInfo, pQueryInfo->order.order, newTime, pQueryInfo->groupbyExpr.numOfGroupCols,
pLocalReducer->rowSize);
}
}
@ -1218,22 +1254,23 @@ static bool doInterpolationForCurrentGroup(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
SLocalReducer * pLocalReducer = pRes->pLocalReducer;
SInterpolationInfo *pInterpoInfo = &pLocalReducer->interpolationInfo;
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
int8_t p = pMeterMetaInfo->pMeterMeta->precision;
if (taosHasRemainsDataForInterpolation(pInterpoInfo)) {
assert(pCmd->interpoType != TSDB_INTERPO_NONE);
assert(pQueryInfo->interpoType != TSDB_INTERPO_NONE);
tFilePage *pFinalDataBuf = pLocalReducer->pResultBuf;
int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pInterpoInfo->numOfRawDataInRows - 1));
int32_t remain = taosNumOfRemainPoints(pInterpoInfo);
TSKEY ekey = taosGetRevisedEndKey(etime, pCmd->order.order, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, p);
TSKEY ekey = taosGetRevisedEndKey(etime, pQueryInfo->order.order, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, p);
int32_t rows = taosGetNumOfResultWithInterpo(pInterpoInfo, (TSKEY *)pLocalReducer->pBufForInterpo, remain,
pCmd->nAggTimeInterval, ekey, pLocalReducer->resColModel->maxCapacity);
pQueryInfo->nAggTimeInterval, ekey, pLocalReducer->resColModel->maxCapacity);
if (rows > 0) { // do interpo
doInterpolateResult(pSql, pLocalReducer, false);
}
@ -1253,17 +1290,18 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
bool prevGroupCompleted = (!pLocalReducer->discard) && pLocalReducer->hasUnprocessedRow;
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
int8_t precision = pMeterMetaInfo->pMeterMeta->precision;
if ((isAllSourcesCompleted(pLocalReducer) && !pLocalReducer->hasPrevRow) || pLocalReducer->pLocalDataSrc[0] == NULL ||
prevGroupCompleted) {
// if interpoType == TSDB_INTERPO_NONE, return directly
if (pCmd->interpoType != TSDB_INTERPO_NONE) {
int64_t etime = (pCmd->stime < pCmd->etime) ? pCmd->etime : pCmd->stime;
if (pQueryInfo->interpoType != TSDB_INTERPO_NONE) {
int64_t etime = (pQueryInfo->stime < pQueryInfo->etime) ? pQueryInfo->etime : pQueryInfo->stime;
etime = taosGetRevisedEndKey(etime, pCmd->order.order, pCmd->nAggTimeInterval, pCmd->intervalTimeUnit, precision);
int32_t rows = taosGetNumOfResultWithInterpo(pInterpoInfo, NULL, 0, pCmd->nAggTimeInterval, etime,
etime = taosGetRevisedEndKey(etime, pQueryInfo->order.order, pQueryInfo->nAggTimeInterval, pQueryInfo->intervalTimeUnit, precision);
int32_t rows = taosGetNumOfResultWithInterpo(pInterpoInfo, NULL, 0, pQueryInfo->nAggTimeInterval, etime,
pLocalReducer->resColModel->maxCapacity);
if (rows > 0) { // do interpo
doInterpolateResult(pSql, pLocalReducer, true);
@ -1294,10 +1332,12 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
static void doMergeWithPrevRows(SSqlObj *pSql, int32_t numOfRes) {
SSqlCmd * pCmd = &pSql->cmd;
SSqlRes * pRes = &pSql->res;
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pCmd, k);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k);
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
pCtx->aOutputBuf += pCtx->outputBytes * numOfRes;
@ -1311,26 +1351,22 @@ static void doMergeWithPrevRows(SSqlObj *pSql, int32_t numOfRes) {
doExecuteSecondaryMerge(pCmd, pLocalReducer, true);
}
int32_t tscLocalDoReduce(SSqlObj *pSql) {
int32_t tscDoLocalreduce(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
tscResetForNextRetrieve(pRes);
if (pSql->signature != pSql || pRes == NULL || pRes->pLocalReducer == NULL) { // all data has been processed
tscTrace("%s call the drop local reducer", __FUNCTION__);
tscDestroyLocalReducer(pSql);
if (pRes) {
pRes->numOfRows = 0;
pRes->row = 0;
}
return 0;
}
pRes->row = 0;
pRes->numOfRows = 0;
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// set the data merge in progress
int32_t prevStatus =
atomic_val_compare_exchange_32(&pLocalReducer->status, TSC_LOCALREDUCE_READY, TSC_LOCALREDUCE_IN_PROGRESS);
@ -1377,7 +1413,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) {
#if defined(_DEBUG_VIEW)
printf("chosen row:\t");
SSrcColumnInfo colInfo[256] = {0};
tscGetSrcColumnInfo(colInfo, pCmd);
tscGetSrcColumnInfo(colInfo, pQueryInfo);
tColModelDisplayEx(pModel, tmpBuffer->data, tmpBuffer->numOfElems, pModel->maxCapacity, colInfo);
#endif
@ -1413,7 +1449,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) {
}
if (pLocalReducer->hasPrevRow) {
if (needToMerge(pCmd, pLocalReducer, tmpBuffer)) {
if (needToMerge(pQueryInfo, pLocalReducer, tmpBuffer)) {
// belong to the group of the previous row, continue process it
doExecuteSecondaryMerge(pCmd, pLocalReducer, false);
@ -1424,7 +1460,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) {
* current row does not belong to the group of previous row.
* so the processing of previous group is completed.
*/
int32_t numOfRes = finalizeRes(pCmd, pLocalReducer);
int32_t numOfRes = finalizeRes(pQueryInfo, pLocalReducer);
bool sameGroup = isSameGroup(pCmd, pLocalReducer, pLocalReducer->prevRowOfInput, tmpBuffer);
tFilePage *pResBuf = pLocalReducer->pResultBuf;
@ -1447,7 +1483,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) {
pLocalReducer->hasUnprocessedRow = true;
}
resetOutputBuf(pCmd, pLocalReducer);
resetOutputBuf(pQueryInfo, pLocalReducer);
pOneDataSrc->rowIdx += 1;
// here we do not check the return value
@ -1501,7 +1537,7 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) {
}
if (pLocalReducer->hasPrevRow) {
finalizeRes(pCmd, pLocalReducer);
finalizeRes(pQueryInfo, pLocalReducer);
}
if (pLocalReducer->pResultBuf->numOfElems) {

File diff suppressed because it is too large Load Diff

View File

@ -14,6 +14,7 @@
*/
#include "os.h"
#include "hash.h"
#include "tcache.h"
#include "tlog.h"
#include "tnote.h"
@ -28,7 +29,6 @@
#include "tsocket.h"
#include "ttimer.h"
#include "tutil.h"
#include "ihash.h"
TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const char *db, uint16_t port,
void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) {
@ -126,8 +126,7 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const
}
pSql->cmd.command = TSDB_SQL_CONNECT;
int ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE);
if (TSDB_CODE_SUCCESS != ret) {
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY;
free(pSql);
free(pObj);
@ -194,15 +193,17 @@ int taos_query_imp(STscObj *pObj, SSqlObj *pSql) {
pRes->numOfRows = 1;
pRes->numOfTotal = 0;
pRes->numOfTotalInCurrentClause = 0;
pSql->asyncTblPos = NULL;
if (NULL != pSql->pTableHashList) {
taosCleanUpIntHash(pSql->pTableHashList);
taosCleanUpHashTable(pSql->pTableHashList);
pSql->pTableHashList = NULL;
}
tscDump("%p pObj:%p, SQL: %s", pSql, pObj, pSql->sqlstr);
pRes->code = (uint8_t)tsParseSql(pSql, pObj->acctId, pObj->db, false);
pRes->code = (uint8_t)tsParseSql(pSql, false);
/*
* set the qhandle to 0 before return in order to erase the qhandle value assigned in the previous successful query.
@ -287,8 +288,12 @@ int taos_num_fields(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
if (pSql == NULL || pSql->signature != pSql) return 0;
SFieldInfo *pFieldsInfo = &pSql->cmd.fieldsInfo;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo == NULL) {
return 0;
}
SFieldInfo *pFieldsInfo = &pQueryInfo->fieldsInfo;
return (pFieldsInfo->numOfOutputCols - pFieldsInfo->numOfHiddenCols);
}
@ -310,7 +315,8 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
if (pSql == NULL || pSql->signature != pSql) return 0;
return pSql->cmd.fieldsInfo.pFields;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
return pQueryInfo->fieldsInfo.pFields;
}
int taos_retrieve(TAOS_RES *res) {
@ -337,7 +343,7 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) {
SSqlRes *pRes = &pSql->res;
STscObj *pObj = pSql->pTscObj;
if (pRes->qhandle == 0) {
if (pRes->qhandle == 0 || pObj->pSql != pSql) {
*rows = NULL;
return 0;
}
@ -356,47 +362,54 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) {
// secondary merge has handle this situation
if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) {
pRes->numOfTotal += pRes->numOfRows;
pRes->numOfTotalInCurrentClause += pRes->numOfRows;
}
for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) +
pRes->bytes[i] * (1 - pCmd->order.order) * (pRes->numOfRows - 1);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
// pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) +
// pRes->bytes[i] * (1 - pQueryInfo->order.order) * (pRes->numOfRows - 1);
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order);
}
*rows = pRes->tsrow;
return (pCmd->order.order == TSQL_SO_DESC) ? pRes->numOfRows : -pRes->numOfRows;
return (pQueryInfo->order.order == TSQL_SO_DESC) ? pRes->numOfRows : -pRes->numOfRows;
}
static void **doSetResultRowData(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
assert(pRes->row >= 0 && pRes->row <= pRes->numOfRows);
if (pRes->row >= pRes->numOfRows) { // all the results has returned to invoker
tfree(pRes->tsrow);
return pRes->tsrow;
}
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
int32_t num = 0;
for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row;
for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + pRes->bytes[i] * pRes->row;
// primary key column cannot be null in interval query, no need to check
if (i == 0 && pCmd->nAggTimeInterval > 0) {
if (i == 0 && pQueryInfo->nAggTimeInterval > 0) {
continue;
}
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i);
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i);
if (isNull(pRes->tsrow[i], pField->type)) {
pRes->tsrow[i] = NULL;
} else if (pField->type == TSDB_DATA_TYPE_NCHAR) {
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
if (pRes->buffer[num] == NULL) {
pRes->buffer[num] = malloc(pField->bytes + 1);
} else {
pRes->buffer[num] = realloc(pRes->buffer[num], pField->bytes + 1);
pRes->buffer[num] = malloc(pField->bytes + TSDB_NCHAR_SIZE);
}
/* string terminated */
memset(pRes->buffer[num], 0, pField->bytes + 1);
/* string terminated char for binary data*/
memset(pRes->buffer[num], 0, pField->bytes + TSDB_NCHAR_SIZE);
if (taosUcs4ToMbs(pRes->tsrow[i], pField->bytes, pRes->buffer[num])) {
pRes->tsrow[i] = pRes->buffer[num];
@ -404,21 +417,14 @@ static void **doSetResultRowData(SSqlObj *pSql) {
tscError("%p charset:%s to %s. val:%ls convert failed.", pSql, DEFAULT_UNICODE_ENCODEC, tsCharset, pRes->tsrow);
pRes->tsrow[i] = NULL;
}
num++;
}
}
assert(num <= pCmd->fieldsInfo.numOfOutputCols);
return pRes->tsrow;
}
static void **getOneRowFromBuf(SSqlObj *pSql) {
doSetResultRowData(pSql);
SSqlRes *pRes = &pSql->res;
pRes->row++;
assert(num <= pQueryInfo->fieldsInfo.numOfOutputCols);
pRes->row++; // index increase one-step
return pRes->tsrow;
}
@ -426,22 +432,29 @@ static bool tscHashRemainDataInSubqueryResultSet(SSqlObj *pSql) {
bool hasData = true;
SSqlCmd *pCmd = &pSql->cmd;
if (tscProjectionQueryOnMetric(pCmd)) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
bool allSubqueryExhausted = true;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
if (pSql->pSubs[i] == NULL) {
continue;
}
SSqlRes *pRes1 = &pSql->pSubs[i]->res;
SSqlCmd *pCmd1 = &pSql->pSubs[i]->cmd;
SMeterMetaInfo *pMetaInfo = tscGetMeterMetaInfo(pCmd1, 0);
assert(pCmd1->numOfTables == 1);
SQueryInfo * pQueryInfo1 = tscGetQueryInfoDetail(pCmd1, pCmd1->clauseIndex);
SMeterMetaInfo *pMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo1, 0);
assert(pQueryInfo1->numOfTables == 1);
/*
* if the global limitation is not reached, and current result has not exhausted, or next more vnodes are
* available, go on
* available, goes on
*/
if (pMetaInfo->vnodeIndex < pMetaInfo->pMetricMeta->numOfVnodes && pRes1->row < pRes1->numOfRows &&
(!tscHasReachLimitation(pSql->pSubs[i]))) {
(!tscHasReachLimitation(pQueryInfo1, pRes1))) {
allSubqueryExhausted = false;
break;
}
@ -450,12 +463,16 @@ static bool tscHashRemainDataInSubqueryResultSet(SSqlObj *pSql) {
hasData = !allSubqueryExhausted;
} else { // otherwise, in case inner join, if any subquery exhausted, query completed.
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlRes *pRes1 = &pSql->pSubs[i]->res;
if (pSql->pSubs[i] == 0) {
continue;
}
SSqlRes * pRes1 = &pSql->pSubs[i]->res;
SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0);
if ((pRes1->row >= pRes1->numOfRows && tscHasReachLimitation(pSql->pSubs[i]) &&
tscProjectionQueryOnTable(&pSql->pSubs[i]->cmd)) ||
if ((pRes1->row >= pRes1->numOfRows && tscHasReachLimitation(pQueryInfo1, pRes1) &&
tscProjectionQueryOnTable(pQueryInfo1)) ||
(pRes1->numOfRows == 0)) {
hasData = false;
break;
}
@ -465,57 +482,42 @@ static bool tscHashRemainDataInSubqueryResultSet(SSqlObj *pSql) {
return hasData;
}
static void **tscJoinResultsetFromBuf(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
static void **tscBuildResFromSubqueries(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
while (1) {
if (!tscHashRemainDataInSubqueryResultSet(pSql)) { // free all sub sqlobj
tscTrace("%p at least one subquery exhausted, free all other %d subqueries", pSql, pSql->numOfSubs - 1);
SSubqueryState *pState = NULL;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlObj * pChildObj = pSql->pSubs[i];
SJoinSubquerySupporter *pSupporter = (SJoinSubquerySupporter *)pChildObj->param;
pState = pSupporter->pState;
tscDestroyJoinSupporter(pChildObj->param);
taos_free_result(pChildObj);
}
free(pState);
return NULL;
}
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
if (pRes->tsrow == NULL) {
pRes->tsrow = malloc(POINTER_BYTES * pCmd->exprsInfo.numOfExprs);
pRes->tsrow = calloc(pQueryInfo->exprsInfo.numOfExprs, POINTER_BYTES);
}
bool success = false;
if (pSql->numOfSubs >= 2) { // do merge result
SSqlRes *pRes1 = &pSql->pSubs[0]->res;
SSqlRes *pRes2 = &pSql->pSubs[1]->res;
if (pRes1->row < pRes1->numOfRows && pRes2->row < pRes2->numOfRows) {
doSetResultRowData(pSql->pSubs[0]);
doSetResultRowData(pSql->pSubs[1]);
int32_t numOfTableHasRes = 0;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
if (pSql->pSubs[i] != 0) {
numOfTableHasRes++;
}
}
if (numOfTableHasRes >= 2) { // do merge result
success = (doSetResultRowData(pSql->pSubs[0]) != NULL) &&
(doSetResultRowData(pSql->pSubs[1]) != NULL);
// TSKEY key1 = *(TSKEY *)pRes1->tsrow[0];
// TSKEY key2 = *(TSKEY *)pRes2->tsrow[0];
// printf("first:%" PRId64 ", second:%" PRId64 "\n", key1, key2);
success = true;
pRes1->row++;
pRes2->row++;
}
} else { // only one subquery
SSqlRes *pRes1 = &pSql->pSubs[0]->res;
doSetResultRowData(pSql->pSubs[0]);
SSqlObj *pSub = pSql->pSubs[0];
if (pSub == NULL) {
pSub = pSql->pSubs[1];
}
success = (pRes1->row++ < pRes1->numOfRows);
success = (doSetResultRowData(pSub) != NULL);
}
if (success) { // current row of final output has been built, return to app
for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) {
for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) {
int32_t tableIndex = pRes->pColumnIndex[i].tableIndex;
int32_t columnIndex = pRes->pColumnIndex[i].columnIndex;
@ -523,8 +525,32 @@ static void **tscJoinResultsetFromBuf(SSqlObj *pSql) {
pRes->tsrow[i] = pRes1->tsrow[columnIndex];
}
pRes->numOfTotalInCurrentClause++;
break;
} else { // continue retrieve data from vnode
if (!tscHashRemainDataInSubqueryResultSet(pSql)) {
tscTrace("%p at least one subquery exhausted, free all other %d subqueries", pSql, pSql->numOfSubs - 1);
SSubqueryState *pState = NULL;
// free all sub sqlobj
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlObj *pChildObj = pSql->pSubs[i];
if (pChildObj == NULL) {
continue;
}
SJoinSubquerySupporter *pSupporter = (SJoinSubquerySupporter *)pChildObj->param;
pState = pSupporter->pState;
tscDestroyJoinSupporter(pChildObj->param);
taos_free_result(pChildObj);
}
free(pState);
return NULL;
}
tscFetchDatablockFromSubquery(pSql);
if (pRes->code != TSDB_CODE_SUCCESS) {
return NULL;
@ -549,81 +575,72 @@ TAOS_ROW taos_fetch_row_impl(TAOS_RES *res) {
if (pRes->code == TSDB_CODE_SUCCESS) {
tscTrace("%p data from all subqueries have been retrieved to client", pSql);
return tscJoinResultsetFromBuf(pSql);
return tscBuildResFromSubqueries(pSql);
} else {
tscTrace("%p retrieve data from subquery failed, code:%d", pSql, pRes->code);
return NULL;
}
} else if (pRes->row >= pRes->numOfRows) {
/**
* NOT a join query
*
* If the data block of current result set have been consumed already, try fetch next result
* data block from virtual node.
*/
tscResetForNextRetrieve(pRes);
if (pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
tscProcessSql(pSql);
tscProcessSql(pSql); // retrieve data from virtual node
//if failed to retrieve data from current virtual node, try next one if exists
if (hasMoreVnodesToTry(pSql)) {
tscTryQueryNextVnode(pSql, NULL);
}
/*
* local reducer has handle this case,
* so no need to add the pRes->numOfRows for super table query
*/
if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) {
pRes->numOfTotalInCurrentClause += pRes->numOfRows;
}
if (pRes->numOfRows == 0) {
return NULL;
}
// local reducer has handle this situation
if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) {
pRes->numOfTotal += pRes->numOfRows;
}
}
return getOneRowFromBuf(pSql);
return doSetResultRowData(pSql);
}
TAOS_ROW taos_fetch_row(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
if (pSql == NULL || pSql->signature != pSql) {
globalCode = TSDB_CODE_DISCONNECTED;
return NULL;
}
// projection query on metric, pipeline retrieve data from vnode list, instead of two-stage merge
/*
* projection query on super table, access each virtual node sequentially retrieve data from vnode list,
* instead of two-stage merge
*/
TAOS_ROW rows = taos_fetch_row_impl(res);
while (rows == NULL && tscProjectionQueryOnMetric(pCmd)) {
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
if (rows != NULL) {
return rows;
}
// reach the maximum number of output rows, abort
if (tscHasReachLimitation(pSql)) {
return NULL;
}
/*
* update the limit and offset value according to current retrieval results
* Note: if pRes->offset > 0, pRes->numOfRows = 0, pRes->numOfTotal = 0;
*/
pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal;
pCmd->limit.offset = pRes->offset;
assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0));
/*
* For project query with super table join, the numOfSub is equalled to the number of all subqueries, so
* we need to reset the value of numOfSubs to be 0.
*
* For super table join with projection query, if anyone of the subquery is exhausted, the query completed.
*/
pSql->numOfSubs = 0;
if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
pCmd->command = TSDB_SQL_SELECT;
assert(pSql->fp == NULL);
tscProcessSql(pSql);
rows = taos_fetch_row_impl(res);
}
// check!!!
if (rows != NULL || pMeterMetaInfo->vnodeIndex >= pMeterMetaInfo->pMetricMeta->numOfVnodes) {
break;
}
// current subclause is completed, try the next subclause
while (rows == NULL && pCmd->clauseIndex < pCmd->numOfClause - 1) {
tscTryQueryNextClause(pSql, NULL);
// if the rows is not NULL, return immediately
rows = taos_fetch_row_impl(res);
}
return rows;
@ -645,36 +662,34 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) {
// projection query on metric, pipeline retrieve data from vnode list,
// instead of two-stage mergevnodeProcessMsgFromShell free qhandle
nRows = taos_fetch_block_impl(res, rows);
while (*rows == NULL && tscProjectionQueryOnMetric(pCmd)) {
/* reach the maximum number of output rows, abort */
if (tscHasReachLimitation(pSql)) {
return 0;
}
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
// current subclause is completed, try the next subclause
while (rows == NULL && pCmd->clauseIndex < pCmd->numOfClause - 1) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
/* update the limit value according to current retrieval results */
pCmd->limit.limit = pSql->cmd.globalLimit - pRes->numOfTotal;
pCmd->limit.offset = pRes->offset;
pSql->cmd.command = pQueryInfo->command;
pCmd->clauseIndex++;
if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
pSql->cmd.command = TSDB_SQL_SELECT;
assert(pSql->fp == NULL);
tscProcessSql(pSql);
nRows = taos_fetch_block_impl(res, rows);
}
pRes->numOfTotal += pRes->numOfTotalInCurrentClause;
pRes->numOfTotalInCurrentClause = 0;
pRes->rspType = 0;
// check!!!
if (*rows != NULL || pMeterMetaInfo->vnodeIndex >= pMeterMetaInfo->pMetricMeta->numOfVnodes) {
break;
}
pSql->numOfSubs = 0;
tfree(pSql->pSubs);
assert(pSql->fp == NULL);
tscTrace("%p try data in the next subclause:%d, total subclause:%d", pSql, pCmd->clauseIndex, pCmd->numOfClause);
tscProcessSql(pSql);
nRows = taos_fetch_block_impl(res, rows);
}
return nRows;
}
int taos_select_db(TAOS *taos, const char *db) {
char sql[64];
char sql[256] = {0};
STscObj *pObj = (STscObj *)taos;
if (pObj == NULL || pObj->signature != pObj) {
@ -682,8 +697,7 @@ int taos_select_db(TAOS *taos, const char *db) {
return TSDB_CODE_DISCONNECTED;
}
sprintf(sql, "use %s", db);
snprintf(sql, tListLen(sql), "use %s", db);
return taos_query(taos, sql);
}
@ -714,9 +728,15 @@ void taos_free_result_imp(TAOS_RES* res, int keepCmd) {
}
// set freeFlag to 1 in retrieve message if there are un-retrieved results
pCmd->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo == NULL) {
tscFreeSqlObjPartial(pSql);
return;
}
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
/*
* case 1. Partial data have been retrieved from vnodes, but not all data has been retrieved yet.
@ -800,26 +820,47 @@ int taos_errno(TAOS *taos) {
return code;
}
static bool validErrorCode(int32_t code) {
return code >= TSDB_CODE_SUCCESS && code < TSDB_CODE_MAX_ERROR_CODE;
}
/*
* In case of invalid sql error, additional information is attached to explain
* why the sql is invalid
*/
static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd* pCmd) {
if (code != TSDB_CODE_INVALID_SQL) {
return false;
}
size_t len = strlen(pCmd->payload);
char* z = NULL;
if (len > 0) {
z = strstr (pCmd->payload, "invalid SQL");
}
return z != NULL;
}
char *taos_errstr(TAOS *taos) {
STscObj *pObj = (STscObj *)taos;
uint8_t code;
if (pObj == NULL || pObj->signature != pObj) return tsError[globalCode];
if ((int8_t)(pObj->pSql->res.code) == -1)
code = TSDB_CODE_OTHERS;
else
code = pObj->pSql->res.code;
// for invalid sql, additional information is attached to explain why the sql is invalid
if (code == TSDB_CODE_INVALID_SQL) {
return pObj->pSql->cmd.payload;
SSqlObj* pSql = pObj->pSql;
if (validErrorCode(pSql->res.code)) {
code = pSql->res.code;
} else {
if (code < 0 || code > TSDB_CODE_MAX_ERROR_CODE) {
return tsError[TSDB_CODE_SUCCESS];
} else {
return tsError[code];
}
code = TSDB_CODE_OTHERS; //unknown error
}
if (hasAdditionalErrorInfo(code, &pSql->cmd)) {
return pSql->cmd.payload;
} else {
return tsError[code];
}
}
@ -842,12 +883,15 @@ void taos_stop_query(TAOS_RES *res) {
if (res == NULL) return;
SSqlObj *pSql = (SSqlObj *)res;
SSqlCmd *pCmd = &pSql->cmd;
if (pSql->signature != pSql) return;
tscTrace("%p start to cancel query", res);
pSql->res.code = TSDB_CODE_QUERY_CANCELLED;
if (tscIsTwoStageMergeMetricQuery(&pSql->cmd)) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (tscIsTwoStageMergeMetricQuery(pQueryInfo, 0)) {
tscKillMetricQuery(pSql);
return;
}
@ -898,14 +942,13 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
float fv = 0;
fv = GET_FLOAT_VAL(row[i]);
len += sprintf(str + len, "%f", fv);
}
break;
} break;
case TSDB_DATA_TYPE_DOUBLE:{
case TSDB_DATA_TYPE_DOUBLE: {
double dv = 0;
dv = GET_DOUBLE_VAL(row[i]);
len += sprintf(str + len, "%lf", dv);
}
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
@ -916,8 +959,7 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
str[len++] = c;
}
str[len] = 0;
}
break;
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i]));
@ -945,6 +987,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
pRes->numOfRows = 1;
pRes->numOfTotal = 0;
pRes->numOfTotalInCurrentClause = 0;
tscTrace("%p Valid SQL: %s pObj:%p", pSql, sql, pObj);
@ -967,11 +1010,11 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
pSql->asyncTblPos = NULL;
if (NULL != pSql->pTableHashList) {
taosCleanUpIntHash(pSql->pTableHashList);
taosCleanUpHashTable(pSql->pTableHashList);
pSql->pTableHashList = NULL;
}
pRes->code = (uint8_t)tsParseSql(pSql, pObj->acctId, pObj->db, false);
pRes->code = (uint8_t)tsParseSql(pSql, false);
int code = pRes->code;
tscTrace("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj);
@ -982,7 +1025,6 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) {
// must before clean the sqlcmd object
tscRemoveAllMeterMetaInfo(&pSql->cmd, false);
tscCleanSqlCmd(&pSql->cmd);
SSqlCmd *pCmd = &pSql->cmd;
@ -993,7 +1035,10 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
int code = TSDB_CODE_INVALID_METER_ID;
char *str = (char *)tblNameList;
SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd);
SQueryInfo *pQueryInfo = NULL;
tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo);
SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pQueryInfo);
if ((code = tscAllocPayload(pCmd, tblListLen + 16)) != TSDB_CODE_SUCCESS) {
return code;
@ -1028,7 +1073,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
return code;
}
if ((code = setMeterID(pSql, &sToken, 0)) != TSDB_CODE_SUCCESS) {
if ((code = setMeterID(pMeterMetaInfo, &sToken, pSql)) != TSDB_CODE_SUCCESS) {
return code;
}
@ -1073,6 +1118,8 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
SSqlRes *pRes = &pSql->res;
pRes->numOfTotal = 0; // the number of getting table meta from server
pRes->numOfTotalInCurrentClause = 0;
pRes->code = 0;
assert(pSql->fp == NULL);

View File

@ -31,9 +31,13 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql);
static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer);
static bool isProjectStream(SSqlCmd *pCmd) {
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
SSqlExpr *pExpr = tscSqlExprGet(pCmd, i);
static int64_t getDelayValueAfterTimewindowClosed(SSqlStream* pStream, int64_t launchDelay) {
return taosGetTimestamp(pStream->precision) + launchDelay - pStream->stime - 1;
}
static bool isProjectStream(SQueryInfo* pQueryInfo) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr->functionId != TSDB_FUNC_PRJ) {
return false;
}
@ -66,27 +70,29 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
pSql->fp = tscProcessStreamQueryCallback;
pSql->param = pStream;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
int code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0);
int code = tscGetMeterMeta(pSql, pMeterMetaInfo);
pSql->res.code = code;
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
if (code == 0 && UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
code = tscGetMetricMeta(pSql);
if (code == 0 && UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) {
code = tscGetMetricMeta(pSql, 0);
pSql->res.code = code;
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
}
tscTansformSQLFunctionForMetricQuery(&pSql->cmd);
tscTansformSQLFunctionForSTableQuery(pQueryInfo);
// failed to get meter/metric meta, retry in 10sec.
if (code != TSDB_CODE_SUCCESS) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
tscError("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
return;
}
@ -105,22 +111,23 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
pStream->numOfRes = 0; // reset the numOfRes.
SSqlObj *pSql = pStream->pSql;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
tscTrace("%p add into timer", pSql);
if (isProjectStream(&pSql->cmd)) {
if (isProjectStream(pQueryInfo)) {
/*
* pSql->cmd.etime, which is the start time, does not change in case of
* pQueryInfo->etime, which is the start time, does not change in case of
* repeat first execution, once the first execution failed.
*/
pSql->cmd.stime = pStream->stime; // start time
pQueryInfo->stime = pStream->stime; // start time
pSql->cmd.etime = taosGetTimestamp(pStream->precision); // end time
if (pSql->cmd.etime > pStream->etime) {
pSql->cmd.etime = pStream->etime;
pQueryInfo->etime = taosGetTimestamp(pStream->precision); // end time
if (pQueryInfo->etime > pStream->etime) {
pQueryInfo->etime = pStream->etime;
}
} else {
pSql->cmd.stime = pStream->stime - pStream->interval;
pSql->cmd.etime = pStream->stime - 1;
pQueryInfo->stime = pStream->stime - pStream->interval;
pQueryInfo->etime = pStream->stime - 1;
}
// launch stream computing in a new thread
@ -139,9 +146,9 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
tscError("%p stream:%p, query data failed, code:%d, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
retryDelay);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pStream->pSql->cmd, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pStream->pSql->cmd, 0, 0);
tscClearMeterMetaInfo(pMeterMetaInfo, true);
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
return;
}
@ -165,24 +172,25 @@ static void tscSetTimestampForRes(SSqlStream *pStream, SSqlObj *pSql) {
static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOfRows) {
SSqlStream * pStream = (SSqlStream *)param;
SSqlObj * pSql = (SSqlObj *)res;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0);
if (pSql == NULL || numOfRows < 0) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
tscError("%p stream:%p, retrieve data failed, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
tscClearMeterMetaInfo(pMeterMetaInfo, true);
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
return;
}
if (numOfRows > 0) { // when reaching here the first execution of stream computing is successful.
pStream->numOfRes += numOfRows;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
for(int32_t i = 0; i < numOfRows; ++i) {
TAOS_ROW row = taos_fetch_row(res);
tscTrace("%p stream:%p fetch result", pSql, pStream);
if (isProjectStream(&pSql->cmd)) {
if (isProjectStream(pQueryInfo)) {
pStream->stime = *(TSKEY *)row[0];
} else {
tscSetTimestampForRes(pStream, pSql);
@ -197,9 +205,10 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
} else { // numOfRows == 0, all data has been retrieved
pStream->useconds += pSql->res.useconds;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pStream->numOfRes == 0) {
if (pSql->cmd.interpoType == TSDB_INTERPO_SET_VALUE || pSql->cmd.interpoType == TSDB_INTERPO_NULL) {
SSqlCmd *pCmd = &pSql->cmd;
if (pQueryInfo->interpoType == TSDB_INTERPO_SET_VALUE || pQueryInfo->interpoType == TSDB_INTERPO_NULL) {
SSqlRes *pRes = &pSql->res;
/* failed to retrieve any result in this retrieve */
@ -209,12 +218,12 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
void *oldPtr = pSql->res.data;
pSql->res.data = tmpRes;
for (int32_t i = 1; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i);
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i);
for (int32_t i = 1; i < pSql->cmd.fieldsInfo.numOfOutputCols; ++i) {
int16_t offset = tscFieldInfoGetOffset(pCmd, i);
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i);
assignVal(pSql->res.data + offset, (char *)(&pCmd->defaultVal[i]), pField->bytes, pField->type);
assignVal(pSql->res.data + offset, (char *)(&pQueryInfo->defaultVal[i]), pField->bytes, pField->type);
row[i] = pSql->res.data + offset;
}
@ -222,7 +231,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
row[0] = pRes->data;
// char result[512] = {0};
// taos_print_row(result, row, pSql->cmd.fieldsInfo.pFields, pSql->cmd.fieldsInfo.numOfOutputCols);
// taos_print_row(result, row, pQueryInfo->fieldsInfo.pFields, pQueryInfo->fieldsInfo.numOfOutputCols);
// tscPrint("%p stream:%p query result: %s", pSql, pStream, result);
tscTrace("%p stream:%p fetch result", pSql, pStream);
@ -231,18 +240,19 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
pRes->numOfRows = 0;
pRes->data = oldPtr;
} else if (isProjectStream(&pSql->cmd)) {
} else if (isProjectStream(pQueryInfo)) {
/* no resuls in the query range, retry */
// todo set retry dynamic time
int32_t retry = tsProjectExecInterval;
tscError("%p stream:%p, retrieve no data, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retry);
tscClearSqlMetaInfoForce(&(pStream->pSql->cmd));
tscSetRetryTimer(pStream, pStream->pSql, retry);
return;
}
} else {
if (isProjectStream(&pSql->cmd)) {
if (isProjectStream(pQueryInfo)) {
pStream->stime += 1;
}
}
@ -257,7 +267,10 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
}
static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) {
if (isProjectStream(&pSql->cmd)) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
int64_t delay = getDelayValueAfterTimewindowClosed(pStream, timer);
if (isProjectStream(pQueryInfo)) {
int64_t now = taosGetTimestamp(pStream->precision);
int64_t etime = now > pStream->etime ? pStream->etime : now;
@ -275,12 +288,12 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
taos_close_stream(pStream);
return;
}
tscTrace("%p stream:%p, next query start at %" PRId64 ", in %" PRId64 "ms. query range %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
now + timer, timer, pStream->stime, etime);
tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
now + timer, timer, delay, pStream->stime, etime);
} else {
tscTrace("%p stream:%p, next query start at %" PRId64 ", in %" PRId64 "ms. query range %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
pStream->stime, timer, pStream->stime - pStream->interval, pStream->stime - 1);
tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
pStream->stime, timer, delay, pStream->stime - pStream->interval, pStream->stime - 1);
}
pSql->cmd.command = TSDB_SQL_SELECT;
@ -289,10 +302,34 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
taosTmrReset(tscProcessStreamTimer, timer, pStream, tscTmr, &pStream->pTimer);
}
static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
int64_t delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
int64_t maxDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
if (delayDelta > maxDelay) {
delayDelta = maxDelay;
}
int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
if (maxDelay > remainTimeWindow) {
maxDelay = (remainTimeWindow / 1.5);
}
int64_t currentDelay = (rand() % maxDelay); // a random number
currentDelay += delayDelta;
assert(currentDelay < pStream->slidingTime);
return currentDelay;
}
static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
int64_t timer = 0;
if (isProjectStream(&pSql->cmd)) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (isProjectStream(pQueryInfo)) {
/*
* for project query, no mater fetch data successfully or not, next launch will issue
* more than the sliding time window
@ -322,24 +359,15 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
taos_close_stream(pStream);
return;
}
timer = pStream->stime - taosGetTimestamp(pStream->precision);
if (timer < 0) {
timer = 0;
}
}
int64_t delayDelta = (int64_t)(pStream->slidingTime * 0.1);
delayDelta = (rand() % delayDelta);
int64_t maxDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
if (delayDelta > maxDelay) {
delayDelta = maxDelay;
}
timer += delayDelta; // a random number
timer += getLaunchTimeDelay(pStream);
if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
timer = timer / 1000L;
}
@ -348,56 +376,57 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
}
static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
SSqlCmd *pCmd = &pSql->cmd;
int64_t minIntervalTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime;
if (pCmd->nAggTimeInterval < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
pCmd->nAggTimeInterval, minIntervalTime);
pCmd->nAggTimeInterval = minIntervalTime;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo->nAggTimeInterval < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64 "", pSql, pStream,
pQueryInfo->nAggTimeInterval, minIntervalTime);
pQueryInfo->nAggTimeInterval = minIntervalTime;
}
pStream->interval = pCmd->nAggTimeInterval; // it shall be derived from sql string
pStream->interval = pQueryInfo->nAggTimeInterval; // it shall be derived from sql string
if (pCmd->nSlidingTime == 0) {
pCmd->nSlidingTime = pCmd->nAggTimeInterval;
if (pQueryInfo->nSlidingTime == 0) {
pQueryInfo->nSlidingTime = pQueryInfo->nAggTimeInterval;
}
int64_t minSlidingTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
if (pCmd->nSlidingTime < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64 "", pSql, pStream, pCmd->nSlidingTime,
minSlidingTime);
if (pQueryInfo->nSlidingTime < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64 "", pSql, pStream,
pQueryInfo->nSlidingTime, minSlidingTime);
pCmd->nSlidingTime = minSlidingTime;
pQueryInfo->nSlidingTime = minSlidingTime;
}
if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) {
if (pQueryInfo->nSlidingTime > pQueryInfo->nAggTimeInterval) {
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64 "", pSql, pStream,
pCmd->nSlidingTime, pCmd->nAggTimeInterval);
pQueryInfo->nSlidingTime, pQueryInfo->nAggTimeInterval);
pCmd->nSlidingTime = pCmd->nAggTimeInterval;
pQueryInfo->nSlidingTime = pQueryInfo->nAggTimeInterval;
}
pStream->slidingTime = pCmd->nSlidingTime;
pCmd->nAggTimeInterval = 0; // clear the interval value to avoid the force time window split by query processor
pStream->slidingTime = pQueryInfo->nSlidingTime;
pQueryInfo->nAggTimeInterval = 0; // clear the interval value to avoid the force time window split by query processor
}
static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) {
SSqlCmd *pCmd = &pSql->cmd;
if (isProjectStream(pCmd)) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (isProjectStream(pQueryInfo)) {
// no data in table, flush all data till now to destination meter, 10sec delay
pStream->interval = tsProjectExecInterval;
pStream->slidingTime = tsProjectExecInterval;
if (stime != 0) { // first projection start from the latest event timestamp
assert(stime >= pCmd->stime);
assert(stime >= pQueryInfo->stime);
stime += 1; // exclude the last records from table
} else {
stime = pCmd->stime;
stime = pQueryInfo->stime;
}
} else { // timewindow based aggregation stream
if (stime == 0) { // no data in meter till now
@ -419,24 +448,12 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) {
int64_t timer = pStream->stime - taosGetTimestamp(pStream->precision);
if (timer < 0) timer = 0;
int64_t delayDelta = (int64_t)(pStream->interval * 0.1);
int64_t maxDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
if (delayDelta > maxDelay) {
delayDelta = maxDelay;
}
int64_t startDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsStreamCompStartDelay * 1000L : tsStreamCompStartDelay;
srand(time(NULL));
timer += (rand() % delayDelta); // a random number
if (timer < startDelay || timer > maxDelay) {
timer = (timer % startDelay) + startDelay;
}
timer += getLaunchTimeDelay(pStream);
timer += startDelay;
return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer;
}
@ -499,8 +516,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
return NULL;
}
// TODO later refactor use enum
pSql->cmd.count = 1; // 1 means sql in stream, allowed the sliding clause.
pSql->cmd.inStream = 1; // 1 means sql in stream, allowed the sliding clause.
pRes->code = tscToSQLCmd(pSql, &SQLInfo);
SQLInfoDestroy(&SQLInfo);
@ -521,7 +537,8 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
return NULL;
}
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
pStream->fp = fp;
pStream->callback = callback;
@ -530,7 +547,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
pStream->precision = pMeterMetaInfo->pMeterMeta->precision;
pStream->ctime = taosGetTimestamp(pStream->precision);
pStream->etime = pCmd->etime;
pStream->etime = pQueryInfo->etime;
pSql->pStream = pStream;
tscAddIntoStreamList(pStream);

View File

@ -26,6 +26,7 @@
#include "tutil.h"
#include "tscUtil.h"
#include "tcache.h"
#include "tscProfile.h"
typedef struct SSubscriptionProgress {
int64_t uid;
@ -162,7 +163,7 @@ static void tscProcessSubscriptionTimer(void *handle, void *tmrId) {
int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
int code = (uint8_t)tsParseSql(pSub->pSql, pObj->acctId, pObj->db, false);
int code = (uint8_t)tsParseSql(pSub->pSql, false);
if (code != TSDB_CODE_SUCCESS) {
tscError("failed to parse sql statement: %s", pSub->topic);
return 0;
@ -174,7 +175,7 @@ int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
return 0;
}
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0, 0);
int numOfMeters = 0;
if (!UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) {
SMetricMeta* pMetricMeta = pMeterMetaInfo->pMetricMeta;
@ -374,16 +375,18 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
if (!tscUpdateSubscription(pSub->taos, pSub)) return NULL;
tscTrace("meter synchronization completed");
} else {
uint16_t type = pSql->cmd.type;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
uint16_t type = pQueryInfo->type;
taos_free_result_imp(pSql, 1);
pRes->numOfRows = 1;
pRes->numOfTotal = 0;
pRes->qhandle = 0;
pSql->thandle = NULL;
pSql->cmd.command = TSDB_SQL_SELECT;
pSql->cmd.type = type;
pQueryInfo->type = type;
tscGetMeterMetaInfo(&pSql->cmd, 0)->vnodeIndex = 0;
tscGetMeterMetaInfo(&pSql->cmd, 0, 0)->vnodeIndex = 0;
}
tscDoQuery(pSql);

File diff suppressed because it is too large Load Diff

72
src/inc/hash.h Normal file
View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_HASH_H
#define TDENGINE_HASH_H
#include "hashutil.h"
#define HASH_MAX_CAPACITY (1024 * 1024 * 16)
#define HASH_VALUE_IN_TRASH (-1)
#define HASH_DEFAULT_LOAD_FACTOR (0.75)
#define HASH_INDEX(v, c) ((v) & ((c)-1))
typedef struct SHashNode {
char *key; // null-terminated string
union {
struct SHashNode * prev;
struct SHashEntry *prev1;
};
struct SHashNode *next;
uint32_t hashVal; // the hash value of key, if hashVal == HASH_VALUE_IN_TRASH, this node is moved to trash
uint32_t keyLen; // length of the key
char data[];
} SHashNode;
typedef struct SHashEntry {
SHashNode *next;
uint32_t num;
} SHashEntry;
typedef struct HashObj {
SHashEntry **hashList;
uint32_t capacity;
int size;
_hash_fn_t hashFp;
bool multithreadSafe; // enable lock
#if defined LINUX
pthread_rwlock_t lock;
#else
pthread_mutex_t lock;
#endif
} HashObj;
void *taosInitHashTable(uint32_t capacity, _hash_fn_t fn, bool multithreadSafe);
int32_t taosAddToHashTable(HashObj *pObj, const char *key, uint32_t keyLen, void *data, uint32_t size);
void taosDeleteFromHashTable(HashObj *pObj, const char *key, uint32_t keyLen);
char *taosGetDataFromHash(HashObj *pObj, const char *key, uint32_t keyLen);
void taosCleanUpHashTable(void *handle);
int32_t taosGetHashMaxOverflowLength(HashObj *pObj);
int32_t taosCheckHashTable(HashObj *pObj);
#endif // TDENGINE_HASH_H

44
src/inc/hashutil.h Normal file
View File

@ -0,0 +1,44 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_HASHUTIL_H
#define TDENGINE_HASHUTIL_H
#include "os.h"
typedef uint32_t (*_hash_fn_t)(const char *, uint32_t);
/**
* murmur hash algorithm
* @key usually string
* @len key length
* @seed hash seed
* @out an int32 value
*/
uint32_t MurmurHash3_32(const char *key, uint32_t len);
/**
*
* @param key
* @param len
* @return
*/
uint32_t taosIntHash_32(const char *key, uint32_t len);
uint32_t taosIntHash_64(const char *key, uint32_t len);
_hash_fn_t taosGetDefaultHashFunction(int32_t type);
#endif //TDENGINE_HASHUTIL_H

View File

@ -31,7 +31,7 @@
}
%syntax_error {
pInfo->validSql = false;
pInfo->valid = false;
int32_t outputBufLen = tListLen(pInfo->pzErrMsg);
int32_t len = 0;
@ -59,25 +59,25 @@
program ::= cmd. {}
//////////////////////////////////THE SHOW STATEMENT///////////////////////////////////////////
cmd ::= SHOW DATABASES. { setDCLSQLElems(pInfo, SHOW_DATABASES, 0);}
cmd ::= SHOW MNODES. { setDCLSQLElems(pInfo, SHOW_MNODES, 0);}
cmd ::= SHOW DNODES. { setDCLSQLElems(pInfo, SHOW_DNODES, 0);}
cmd ::= SHOW ACCOUNTS. { setDCLSQLElems(pInfo, SHOW_ACCOUNTS, 0);}
cmd ::= SHOW USERS. { setDCLSQLElems(pInfo, SHOW_USERS, 0);}
cmd ::= SHOW DATABASES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_DB, 0, 0);}
cmd ::= SHOW MNODES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_MNODE, 0, 0);}
cmd ::= SHOW DNODES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_DNODE, 0, 0);}
cmd ::= SHOW ACCOUNTS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_ACCT, 0, 0);}
cmd ::= SHOW USERS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_USER, 0, 0);}
cmd ::= SHOW MODULES. { setDCLSQLElems(pInfo, SHOW_MODULES, 0); }
cmd ::= SHOW QUERIES. { setDCLSQLElems(pInfo, SHOW_QUERIES, 0); }
cmd ::= SHOW CONNECTIONS.{ setDCLSQLElems(pInfo, SHOW_CONNECTIONS, 0);}
cmd ::= SHOW STREAMS. { setDCLSQLElems(pInfo, SHOW_STREAMS, 0); }
cmd ::= SHOW CONFIGS. { setDCLSQLElems(pInfo, SHOW_CONFIGS, 0); }
cmd ::= SHOW SCORES. { setDCLSQLElems(pInfo, SHOW_SCORES, 0); }
cmd ::= SHOW GRANTS. { setDCLSQLElems(pInfo, SHOW_GRANTS, 0); }
cmd ::= SHOW MODULES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_MODULE, 0, 0); }
cmd ::= SHOW QUERIES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_QUERIES, 0, 0); }
cmd ::= SHOW CONNECTIONS.{ setShowOptions(pInfo, TSDB_MGMT_TABLE_CONNS, 0, 0);}
cmd ::= SHOW STREAMS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_STREAMS, 0, 0); }
cmd ::= SHOW CONFIGS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_CONFIGS, 0, 0); }
cmd ::= SHOW SCORES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_SCORES, 0, 0); }
cmd ::= SHOW GRANTS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_GRANTS, 0, 0); }
cmd ::= SHOW VNODES. { setDCLSQLElems(pInfo, SHOW_VNODES, 0); }
cmd ::= SHOW VNODES IPTOKEN(X). { setDCLSQLElems(pInfo, SHOW_VNODES, 1, &X); }
cmd ::= SHOW VNODES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, 0, 0); }
cmd ::= SHOW VNODES IPTOKEN(X). { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, &X, 0); }
%type dbPrefix {SSQLToken}
dbPrefix(A) ::=. {A.n = 0;}
dbPrefix(A) ::=. {A.n = 0; A.type = 0;}
dbPrefix(A) ::= ids(X) DOT. {A = X; }
%type cpxName {SSQLToken}
@ -85,66 +85,66 @@ cpxName(A) ::= . {A.n = 0; }
cpxName(A) ::= DOT ids(Y). {A = Y; A.n += 1; }
cmd ::= SHOW dbPrefix(X) TABLES. {
setDCLSQLElems(pInfo, SHOW_TABLES, 1, &X);
setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &X, 0);
}
cmd ::= SHOW dbPrefix(X) TABLES LIKE ids(Y). {
setDCLSQLElems(pInfo, SHOW_TABLES, 2, &X, &Y);
setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &X, &Y);
}
cmd ::= SHOW dbPrefix(X) STABLES. {
setDCLSQLElems(pInfo, SHOW_STABLES, 1, &X);
setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &X, 0);
}
cmd ::= SHOW dbPrefix(X) STABLES LIKE ids(Y). {
SSQLToken token;
setDBName(&token, &X);
setDCLSQLElems(pInfo, SHOW_STABLES, 2, &token, &Y);
setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &Y);
}
cmd ::= SHOW dbPrefix(X) VGROUPS. {
SSQLToken token;
setDBName(&token, &X);
setDCLSQLElems(pInfo, SHOW_VGROUPS, 1, &token);
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0);
}
cmd ::= SHOW dbPrefix(X) VGROUPS ids(Y). {
SSQLToken token;
setDBName(&token, &X);
setDCLSQLElems(pInfo, SHOW_VGROUPS, 2, &token, &Y);
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &Y);
}
//drop configure for tables
cmd ::= DROP TABLE ifexists(Y) ids(X) cpxName(Z). {
X.n += Z.n;
setDCLSQLElems(pInfo, DROP_TABLE, 2, &X, &Y);
setDropDBTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &X, &Y);
}
cmd ::= DROP DATABASE ifexists(Y) ids(X). { setDCLSQLElems(pInfo, DROP_DATABASE, 2, &X, &Y); }
cmd ::= DROP DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, DROP_DNODE, 1, &X); }
cmd ::= DROP USER ids(X). { setDCLSQLElems(pInfo, DROP_USER, 1, &X); }
cmd ::= DROP ACCOUNT ids(X). { setDCLSQLElems(pInfo, DROP_ACCOUNT, 1, &X); }
cmd ::= DROP DATABASE ifexists(Y) ids(X). { setDropDBTableInfo(pInfo, TSDB_SQL_DROP_DB, &X, &Y); }
cmd ::= DROP DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &X); }
cmd ::= DROP USER ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_USER, 1, &X); }
cmd ::= DROP ACCOUNT ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &X); }
/////////////////////////////////THE USE STATEMENT//////////////////////////////////////////
cmd ::= USE ids(X). { setDCLSQLElems(pInfo, USE_DATABASE, 1, &X);}
cmd ::= USE ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_USE_DB, 1, &X);}
/////////////////////////////////THE DESCRIBE STATEMENT/////////////////////////////////////
cmd ::= DESCRIBE ids(X) cpxName(Y). {
X.n += Y.n;
setDCLSQLElems(pInfo, DESCRIBE_TABLE, 1, &X);
setDCLSQLElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &X);
}
/////////////////////////////////THE ALTER STATEMENT////////////////////////////////////////
cmd ::= ALTER USER ids(X) PASS ids(Y). { setDCLSQLElems(pInfo, ALTER_USER_PASSWD, 2, &X, &Y); }
cmd ::= ALTER USER ids(X) PRIVILEGE ids(Y). { setDCLSQLElems(pInfo, ALTER_USER_PRIVILEGES, 2, &X, &Y);}
cmd ::= ALTER DNODE IPTOKEN(X) ids(Y). { setDCLSQLElems(pInfo, ALTER_DNODE, 2, &X, &Y); }
cmd ::= ALTER DNODE IPTOKEN(X) ids(Y) ids(Z). { setDCLSQLElems(pInfo, ALTER_DNODE, 3, &X, &Y, &Z); }
cmd ::= ALTER LOCAL ids(X). { setDCLSQLElems(pInfo, ALTER_LOCAL, 1, &X); }
cmd ::= ALTER LOCAL ids(X) ids(Y). { setDCLSQLElems(pInfo, ALTER_LOCAL, 2, &X, &Y); }
cmd ::= ALTER DATABASE ids(X) alter_db_optr(Y). { SSQLToken t = {0}; setCreateDBSQL(pInfo, ALTER_DATABASE, &X, &Y, &t);}
cmd ::= ALTER USER ids(X) PASS ids(Y). { setAlterUserSQL(pInfo, TSDB_ALTER_USER_PASSWD, &X, &Y, NULL); }
cmd ::= ALTER USER ids(X) PRIVILEGE ids(Y). { setAlterUserSQL(pInfo, TSDB_ALTER_USER_PRIVILEGES, &X, NULL, &Y);}
cmd ::= ALTER DNODE IPTOKEN(X) ids(Y). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &X, &Y); }
cmd ::= ALTER DNODE IPTOKEN(X) ids(Y) ids(Z). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &X, &Y, &Z); }
cmd ::= ALTER LOCAL ids(X). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &X); }
cmd ::= ALTER LOCAL ids(X) ids(Y). { setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &X, &Y); }
cmd ::= ALTER DATABASE ids(X) alter_db_optr(Y). { SSQLToken t = {0}; setCreateDBSQL(pInfo, TSDB_SQL_ALTER_DB, &X, &Y, &t);}
cmd ::= ALTER ACCOUNT ids(X) acct_optr(Z). { SSQLToken t = {0}; setCreateAcctSQL(pInfo, ALTER_ACCT, &X, &t, &Z);}
cmd ::= ALTER ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). { setCreateAcctSQL(pInfo, ALTER_ACCT, &X, &Y, &Z);}
cmd ::= ALTER ACCOUNT ids(X) acct_optr(Z). { setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &X, NULL, &Z);}
cmd ::= ALTER ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). { setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &X, &Y, &Z);}
// An IDENTIFIER can be a generic identifier, or one of several keywords.
// Any non-standard keyword can also be an identifier.
@ -163,11 +163,11 @@ ifnotexists(X) ::= . {X.n = 0;}
/////////////////////////////////THE CREATE STATEMENT///////////////////////////////////////
//create option for dnode/db/user/account
cmd ::= CREATE DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, CREATE_DNODE, 1, &X);}
cmd ::= CREATE DNODE IPTOKEN(X). { setDCLSQLElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &X);}
cmd ::= CREATE ACCOUNT ids(X) PASS ids(Y) acct_optr(Z).
{ setCreateAcctSQL(pInfo, CREATE_ACCOUNT, &X, &Y, &Z);}
cmd ::= CREATE DATABASE ifnotexists(Z) ids(X) db_optr(Y). { setCreateDBSQL(pInfo, CREATE_DATABASE, &X, &Y, &Z);}
cmd ::= CREATE USER ids(X) PASS ids(Y). { setDCLSQLElems(pInfo, CREATE_USER, 2, &X, &Y);}
{ setCreateAcctSQL(pInfo, TSDB_SQL_CREATE_ACCT, &X, &Y, &Z);}
cmd ::= CREATE DATABASE ifnotexists(Z) ids(X) db_optr(Y). { setCreateDBSQL(pInfo, TSDB_SQL_CREATE_DB, &X, &Y, &Z);}
cmd ::= CREATE USER ids(X) PASS ids(Y). { setCreateUserSQL(pInfo, &X, &Y);}
pps(Y) ::= . {Y.n = 0; }
pps(Y) ::= PPS INTEGER(X). {Y = X; }
@ -198,14 +198,14 @@ state(Y) ::= STATE ids(X). {Y = X; }
%type acct_optr {SCreateAcctSQL}
acct_optr(Y) ::= pps(C) tseries(D) storage(P) streams(F) qtime(Q) dbs(E) users(K) conns(L) state(M). {
Y.users = (K.n>0)?atoi(K.z):-1;
Y.dbs = (E.n>0)?atoi(E.z):-1;
Y.tseries = (D.n>0)?atoi(D.z):-1;
Y.streams = (F.n>0)?atoi(F.z):-1;
Y.pps = (C.n>0)?atoi(C.z):-1;
Y.storage = (P.n>0)?strtoll(P.z, NULL, 10):-1;
Y.qtime = (Q.n>0)?strtoll(Q.z, NULL, 10):-1;
Y.conns = (L.n>0)?atoi(L.z):-1;
Y.maxUsers = (K.n>0)?atoi(K.z):-1;
Y.maxDbs = (E.n>0)?atoi(E.z):-1;
Y.maxTimeSeries = (D.n>0)?atoi(D.z):-1;
Y.maxStreams = (F.n>0)?atoi(F.z):-1;
Y.maxPointsPerSecond = (C.n>0)?atoi(C.z):-1;
Y.maxStorage = (P.n>0)?strtoll(P.z, NULL, 10):-1;
Y.maxQueryTime = (Q.n>0)?strtoll(Q.z, NULL, 10):-1;
Y.maxConnections = (L.n>0)?atoi(L.z):-1;
Y.stat = M;
}
@ -270,29 +270,29 @@ cmd ::= CREATE TABLE ifnotexists(Y) ids(X) cpxName(Z) create_table_args. {
%type create_table_args{SCreateTableSQL*}
create_table_args(A) ::= LP columnlist(X) RP. {
A = tSetCreateSQLElems(X, NULL, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METER);
setSQLInfo(pInfo, A, NULL, TSQL_CREATE_NORMAL_METER);
A = tSetCreateSQLElems(X, NULL, NULL, NULL, NULL, TSQL_CREATE_TABLE);
setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE);
}
// create metric
// create super table
create_table_args(A) ::= LP columnlist(X) RP TAGS LP columnlist(Y) RP. {
A = tSetCreateSQLElems(X, Y, NULL, NULL, NULL, TSQL_CREATE_NORMAL_METRIC);
setSQLInfo(pInfo, A, NULL, TSQL_CREATE_NORMAL_METRIC);
A = tSetCreateSQLElems(X, Y, NULL, NULL, NULL, TSQL_CREATE_STABLE);
setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE);
}
// create meter by using metric
// create meter meter_name using metric_name tags(tag_values1, tag_values2)
// create table by using super table
// create table table_name using super_table_name tags(tag_values1, tag_values2)
create_table_args(A) ::= USING ids(X) cpxName(F) TAGS LP tagitemlist(Y) RP. {
X.n += F.n;
A = tSetCreateSQLElems(NULL, NULL, &X, Y, NULL, TSQL_CREATE_METER_FROM_METRIC);
setSQLInfo(pInfo, A, NULL, TSQL_CREATE_METER_FROM_METRIC);
A = tSetCreateSQLElems(NULL, NULL, &X, Y, NULL, TSQL_CREATE_TABLE_FROM_STABLE);
setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE);
}
// create stream
// create table table_name as select count(*) from metric_name interval(time)
// create table table_name as select count(*) from super_table_name interval(time)
create_table_args(A) ::= AS select(S). {
A = tSetCreateSQLElems(NULL, NULL, NULL, NULL, S, TSQL_CREATE_STREAM);
setSQLInfo(pInfo, A, NULL, TSQL_CREATE_STREAM);
setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE);
}
%type column{TAOS_FIELD}
@ -349,16 +349,22 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
}
//////////////////////// The SELECT statement /////////////////////////////////
cmd ::= select(X). {
setSQLInfo(pInfo, X, NULL, TSQL_QUERY_METER);
}
%type select {SQuerySQL*}
%destructor select {destroyQuerySql($$);}
%destructor select {doDestroyQuerySql($$);}
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
A = tSetQuerySQLElems(&T, W, X, Y, P, Z, &K, &S, F, &L, &G);
}
%type union {SSubclauseInfo*}
%destructor union {destroyAllSelectClause($$);}
union(Y) ::= select(X). { Y = setSubclause(NULL, X); }
union(Y) ::= LP union(X) RP. { Y = X; }
union(Y) ::= union(Z) UNION ALL select(X). { Y = appendSelectClause(Z, X); }
union(Y) ::= union(Z) UNION ALL LP select(X) RP. { Y = appendSelectClause(Z, X); }
cmd ::= union(X). { setSQLInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
// Support for the SQL exprssion without from & where subclauses, e.g.,
// select current_database(),
// select server_version(), select client_version(),
@ -578,34 +584,14 @@ exprlist(A) ::= expritem(X). {A = tSQLExprListAppend(0,X,0);}
expritem(A) ::= expr(X). {A = X;}
expritem(A) ::= . {A = 0;}
////////////////////////// The INSERT command /////////////////////////////////
// add support "values() values() values() tags()" operation....
cmd ::= INSERT INTO cpxName(X) insert_value_list(K). {
tSetInsertSQLElems(pInfo, &X, K);
}
%type insert_value_list {tSQLExprListList*}
insert_value_list(X) ::= VALUES LP itemlist(Y) RP. {X = tSQLListListAppend(NULL, Y);}
insert_value_list(X) ::= insert_value_list(K) VALUES LP itemlist(Y) RP.
{X = tSQLListListAppend(K, Y);}
//cmd ::= INSERT INTO cpxName(X) select(S).
// {sqliteInsert(pParse, sqliteSrcListAppend(0,&X,&D), 0, S, F, R);}
%type itemlist {tSQLExprList*}
%destructor itemlist {tSQLExprListDestroy($$);}
itemlist(A) ::= itemlist(X) COMMA expr(Y). {A = tSQLExprListAppend(X,Y,0);}
itemlist(A) ::= expr(X). {A = tSQLExprListAppend(0,X,0);}
///////////////////////////////////reset query cache//////////////////////////////////////
cmd ::= RESET QUERY CACHE. { setDCLSQLElems(pInfo, RESET_QUERY_CACHE, 0);}
cmd ::= RESET QUERY CACHE. { setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
///////////////////////////////////ALTER TABLE statement//////////////////////////////////
cmd ::= ALTER TABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
X.n += F.n;
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, ALTER_TABLE_ADD_COLUMN);
setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_ADD_COLUMN);
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_COLUMN);
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
@ -614,15 +600,15 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
toTSDBType(A.type);
tVariantList* K = tVariantListAppendToken(NULL, &A, -1);
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, K, ALTER_TABLE_DROP_COLUMN);
setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_DROP_COLUMN);
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN);
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
cmd ::= ALTER TABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
X.n += Y.n;
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, ALTER_TABLE_TAGS_ADD);
setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_ADD);
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN);
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). {
X.n += Z.n;
@ -630,8 +616,8 @@ cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). {
toTSDBType(Y.type);
tVariantList* A = tVariantListAppendToken(NULL, &Y, -1);
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, ALTER_TABLE_TAGS_DROP);
setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_DROP);
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN);
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). {
@ -643,8 +629,8 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). {
toTSDBType(Z.type);
A = tVariantListAppendToken(A, &Z, -1);
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, ALTER_TABLE_TAGS_CHG);
setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_CHG);
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN);
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
@ -654,17 +640,18 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
tVariantList* A = tVariantListAppendToken(NULL, &Y, -1);
A = tVariantListAppend(A, &Z, -1);
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, ALTER_TABLE_TAGS_SET);
setSQLInfo(pInfo, pAlterTable, NULL, ALTER_TABLE_TAGS_SET);
SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL);
setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
////////////////////////////////////////kill statement///////////////////////////////////////
cmd ::= KILL CONNECTION IPTOKEN(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setDCLSQLElems(pInfo, KILL_CONNECTION, 1, &X);}
cmd ::= KILL STREAM IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setDCLSQLElems(pInfo, KILL_STREAM, 1, &X);}
cmd ::= KILL QUERY IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setDCLSQLElems(pInfo, KILL_QUERY, 1, &X);}
cmd ::= KILL CONNECTION IPTOKEN(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &X);}
cmd ::= KILL STREAM IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &X);}
cmd ::= KILL QUERY IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &X);}
%fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED
DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD
LIKE MATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL
COUNT SUM AVG MIN MAX FIRST LAST TOP BOTTOM STDDEV PERCENTILE APERCENTILE LEASTSQUARES HISTOGRAM DIFF
SPREAD TWA INTERP LAST_ROW NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT METRIC TBNAME JOIN METRICS STABLE NULL.
SPREAD TWA INTERP LAST_ROW RATE IRATE SUM_RATE SUM_IRATE AVG_RATE AVG_IRATE NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT
METRIC TBNAME JOIN METRICS STABLE NULL INSERT INTO VALUES.

View File

@ -125,7 +125,7 @@ extern "C" {
#define TSDB_CODE_BATCH_SIZE_TOO_BIG 104
#define TSDB_CODE_TIMESTAMP_OUT_OF_RANGE 105
#define TSDB_CODE_INVALID_QUERY_MSG 106 // failed to validate the sql expression msg by vnode
#define TSDB_CODE_CACHE_BLOCK_TS_DISORDERED 107 // time stamp in cache block is disordered
#define TSDB_CODE_SORTED_RES_TOO_MANY 107 // too many result for ordered super table projection query
#define TSDB_CODE_FILE_BLOCK_TS_DISORDERED 108 // time stamp in file block is disordered
#define TSDB_CODE_INVALID_COMMIT_LOG 109 // commit log init failed
#define TSDB_CODE_SERV_NO_DISKSPACE 110
@ -137,7 +137,7 @@ extern "C" {
#define TSDB_CODE_INVALID_VNODE_STATUS 116
#define TSDB_CODE_FAILED_TO_LOCK_RESOURCES 117
#define TSDB_CODE_TABLE_ID_MISMATCH 118
#define TSDB_CODE_QUERY_CACHE_ERASED 119
#define TSDB_CODE_QUERY_CACHE_ERASED 119
#define TSDB_CODE_MAX_ERROR_CODE 120

View File

@ -74,10 +74,10 @@ extern "C" {
#define TSDB_MSG_TYPE_CREATE_MNODE_RSP 44
#define TSDB_MSG_TYPE_DROP_MNODE 45
#define TSDB_MSG_TYPE_DROP_MNODE_RSP 46
#define TSDB_MSG_TYPE_CREATE_PNODE 47
#define TSDB_MSG_TYPE_CREATE_PNODE_RSP 48
#define TSDB_MSG_TYPE_DROP_PNODE 49
#define TSDB_MSG_TYPE_DROP_PNODE_RSP 50
#define TSDB_MSG_TYPE_CREATE_DNODE 47
#define TSDB_MSG_TYPE_CREATE_DNODE_RSP 48
#define TSDB_MSG_TYPE_DROP_DNODE 49
#define TSDB_MSG_TYPE_DROP_DNODE_RSP 50
#define TSDB_MSG_TYPE_CREATE_DB 51
#define TSDB_MSG_TYPE_CREATE_DB_RSP 52
#define TSDB_MSG_TYPE_DROP_DB 53
@ -147,7 +147,7 @@ enum _mgmt_table {
TSDB_MGMT_TABLE_USER,
TSDB_MGMT_TABLE_DB,
TSDB_MGMT_TABLE_TABLE,
TSDB_MGMT_TABLE_PNODE,
TSDB_MGMT_TABLE_DNODE,
TSDB_MGMT_TABLE_MNODE,
TSDB_MGMT_TABLE_VGROUP,
TSDB_MGMT_TABLE_METRIC,
@ -312,7 +312,7 @@ typedef struct {
typedef struct {
char db[TSDB_METER_ID_LEN];
short ignoreNotExists;
uint8_t ignoreNotExists;
} SDropDbMsg, SUseDbMsg;
typedef struct {
@ -507,7 +507,6 @@ typedef struct {
uint64_t uid;
TSKEY skey;
TSKEY ekey;
int32_t num;
int16_t order;
int16_t orderColId;
@ -516,7 +515,8 @@ typedef struct {
char intervalTimeUnit; // time interval type, for revisement of interval(1d)
int64_t nAggTimeInterval; // time interval for aggregation, in million second
int64_t slidingTime; // value for sliding window
// tag schema, used to parse tag information in pSidExtInfo
uint64_t pTagSchema;

View File

@ -86,6 +86,26 @@ void taosCleanUpDataCache(void *handle);
*/
void taosClearDataCache(void *handle);
/**
* Add one reference count for the exist data, and assign this data for a new owner.
* The new owner needs to invoke the taosRemoveDataFromCache when it does not need this data anymore.
* This procedure is a faster version of taosGetDataFromCache function, which avoids the sideeffect of the problem of the
* data is moved to trash, and taosGetDataFromCache will fail to retrieve it again.
*
* @param handle
* @param data
* @return
*/
void* taosGetDataFromExists(void* handle, void* data);
/**
* transfer the ownership of data in cache to another object without increasing reference count.
* @param handle
* @param data
* @return
*/
void* taosTransferDataInCache(void* handle, void** data);
#ifdef __cplusplus
}
#endif

View File

@ -127,6 +127,7 @@ extern int tsEnableMonitorModule;
extern int tsRestRowLimit;
extern int tsCompressMsgSize;
extern int tsMaxSQLStringLen;
extern int tsMaxNumOfOrderedResults;
extern char tsSocketType[4];
@ -136,6 +137,7 @@ extern int tsMinIntervalTime;
extern int tsMaxStreamComputDelay;
extern int tsStreamCompStartDelay;
extern int tsStreamCompRetryDelay;
extern float tsStreamComputDelayRatio; // the delayed computing ration of the whole time window
extern int tsProjectExecInterval;
extern int64_t tsMaxRetentWindow;

View File

@ -190,6 +190,7 @@ extern "C" {
#define TSDB_MAX_TABLES_PER_VNODE 220000
#define TSDB_MAX_JOIN_TABLE_NUM 5
#define TSDB_MAX_UNION_CLAUSE 5
#define TSDB_MAX_BINARY_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE)
#define TSDB_MAX_NCHAR_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE)
@ -211,7 +212,7 @@ extern "C" {
#define TSDB_MAX_RPC_THREADS 5
#define TSDB_QUERY_TYPE_QUERY 0 // normal query
#define TSDB_QUERY_TYPE_NON_TYPE 0x00U // none type
#define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01U // free qhandle at vnode
/*
@ -227,6 +228,13 @@ extern "C" {
#define TSDB_QUERY_TYPE_PROJECTION_QUERY 0x40U // select *,columns... query
#define TSDB_QUERY_TYPE_JOIN_SEC_STAGE 0x80U // join sub query at the second stage
#define TSDB_QUERY_TYPE_INSERT 0x100U // insert type
#define TSDB_QUERY_TYPE_IMPORT 0x200U // import data
#define TSDB_QUERY_HAS_TYPE(x, _type) (((x) & (_type)) != 0)
#define TSDB_QUERY_SET_TYPE(x, _type) ((x) |= (_type))
#define TSDB_QUERY_RESET_TYPE(x) ((x) = TSDB_QUERY_TYPE_NON_TYPE)
#define TSQL_SO_ASC 1
#define TSQL_SO_DESC 0

View File

@ -119,97 +119,106 @@
#define TK_COMMA 101
#define TK_NULL 102
#define TK_SELECT 103
#define TK_FROM 104
#define TK_VARIABLE 105
#define TK_INTERVAL 106
#define TK_FILL 107
#define TK_SLIDING 108
#define TK_ORDER 109
#define TK_BY 110
#define TK_ASC 111
#define TK_DESC 112
#define TK_GROUP 113
#define TK_HAVING 114
#define TK_LIMIT 115
#define TK_OFFSET 116
#define TK_SLIMIT 117
#define TK_SOFFSET 118
#define TK_WHERE 119
#define TK_NOW 120
#define TK_INSERT 121
#define TK_INTO 122
#define TK_VALUES 123
#define TK_RESET 124
#define TK_QUERY 125
#define TK_ADD 126
#define TK_COLUMN 127
#define TK_TAG 128
#define TK_CHANGE 129
#define TK_SET 130
#define TK_KILL 131
#define TK_CONNECTION 132
#define TK_COLON 133
#define TK_STREAM 134
#define TK_ABORT 135
#define TK_AFTER 136
#define TK_ATTACH 137
#define TK_BEFORE 138
#define TK_BEGIN 139
#define TK_CASCADE 140
#define TK_CLUSTER 141
#define TK_CONFLICT 142
#define TK_COPY 143
#define TK_DEFERRED 144
#define TK_DELIMITERS 145
#define TK_DETACH 146
#define TK_EACH 147
#define TK_END 148
#define TK_EXPLAIN 149
#define TK_FAIL 150
#define TK_FOR 151
#define TK_IGNORE 152
#define TK_IMMEDIATE 153
#define TK_INITIALLY 154
#define TK_INSTEAD 155
#define TK_MATCH 156
#define TK_KEY 157
#define TK_OF 158
#define TK_RAISE 159
#define TK_REPLACE 160
#define TK_RESTRICT 161
#define TK_ROW 162
#define TK_STATEMENT 163
#define TK_TRIGGER 164
#define TK_VIEW 165
#define TK_ALL 166
#define TK_COUNT 167
#define TK_SUM 168
#define TK_AVG 169
#define TK_MIN 170
#define TK_MAX 171
#define TK_FIRST 172
#define TK_LAST 173
#define TK_TOP 174
#define TK_BOTTOM 175
#define TK_STDDEV 176
#define TK_PERCENTILE 177
#define TK_APERCENTILE 178
#define TK_LEASTSQUARES 179
#define TK_HISTOGRAM 180
#define TK_DIFF 181
#define TK_SPREAD 182
#define TK_TWA 183
#define TK_INTERP 184
#define TK_LAST_ROW 185
#define TK_SEMI 186
#define TK_NONE 187
#define TK_PREV 188
#define TK_LINEAR 189
#define TK_IMPORT 190
#define TK_METRIC 191
#define TK_TBNAME 192
#define TK_JOIN 193
#define TK_METRICS 194
#define TK_STABLE 195
#define TK_UNION 104
#define TK_ALL 105
#define TK_FROM 106
#define TK_VARIABLE 107
#define TK_INTERVAL 108
#define TK_FILL 109
#define TK_SLIDING 110
#define TK_ORDER 111
#define TK_BY 112
#define TK_ASC 113
#define TK_DESC 114
#define TK_GROUP 115
#define TK_HAVING 116
#define TK_LIMIT 117
#define TK_OFFSET 118
#define TK_SLIMIT 119
#define TK_SOFFSET 120
#define TK_WHERE 121
#define TK_NOW 122
#define TK_RESET 123
#define TK_QUERY 124
#define TK_ADD 125
#define TK_COLUMN 126
#define TK_TAG 127
#define TK_CHANGE 128
#define TK_SET 129
#define TK_KILL 130
#define TK_CONNECTION 131
#define TK_COLON 132
#define TK_STREAM 133
#define TK_ABORT 134
#define TK_AFTER 135
#define TK_ATTACH 136
#define TK_BEFORE 137
#define TK_BEGIN 138
#define TK_CASCADE 139
#define TK_CLUSTER 140
#define TK_CONFLICT 141
#define TK_COPY 142
#define TK_DEFERRED 143
#define TK_DELIMITERS 144
#define TK_DETACH 145
#define TK_EACH 146
#define TK_END 147
#define TK_EXPLAIN 148
#define TK_FAIL 149
#define TK_FOR 150
#define TK_IGNORE 151
#define TK_IMMEDIATE 152
#define TK_INITIALLY 153
#define TK_INSTEAD 154
#define TK_MATCH 155
#define TK_KEY 156
#define TK_OF 157
#define TK_RAISE 158
#define TK_REPLACE 159
#define TK_RESTRICT 160
#define TK_ROW 161
#define TK_STATEMENT 162
#define TK_TRIGGER 163
#define TK_VIEW 164
#define TK_COUNT 165
#define TK_SUM 166
#define TK_AVG 167
#define TK_MIN 168
#define TK_MAX 169
#define TK_FIRST 170
#define TK_LAST 171
#define TK_TOP 172
#define TK_BOTTOM 173
#define TK_STDDEV 174
#define TK_PERCENTILE 175
#define TK_APERCENTILE 176
#define TK_LEASTSQUARES 177
#define TK_HISTOGRAM 178
#define TK_DIFF 179
#define TK_SPREAD 180
#define TK_TWA 181
#define TK_INTERP 182
#define TK_LAST_ROW 183
#define TK_RATE 184
#define TK_IRATE 185
#define TK_SUM_RATE 186
#define TK_SUM_IRATE 187
#define TK_AVG_RATE 188
#define TK_AVG_IRATE 189
#define TK_SEMI 190
#define TK_NONE 191
#define TK_PREV 192
#define TK_LINEAR 193
#define TK_IMPORT 194
#define TK_METRIC 195
#define TK_TBNAME 196
#define TK_JOIN 197
#define TK_METRICS 198
#define TK_STABLE 199
#define TK_INSERT 200
#define TK_INTO 201
#define TK_VALUES 202
#endif

View File

@ -162,8 +162,8 @@ typedef struct SExtTagsInfo {
// sql function runtime context
typedef struct SQLFunctionCtx {
int32_t startOffset;
int32_t size;
int32_t order;
int32_t size; // number of rows
int32_t order; // asc|desc
int32_t scanFlag; // TODO merge with currentStage
int16_t inputType;

View File

@ -73,7 +73,7 @@ void tVariantCreateFromBinary(tVariant *pVar, char *pz, uint32_t len, uint32_t t
void tVariantDestroy(tVariant *pV);
void tVariantAssign(tVariant *pDst, tVariant *pSrc);
void tVariantAssign(tVariant *pDst, const tVariant *pSrc);
int32_t tVariantToString(tVariant *pVar, char *dst);

View File

@ -102,8 +102,8 @@ extern "C" {
#define GET_FLOAT_VAL(x) taos_align_get_float(x)
#define GET_DOUBLE_VAL(x) taos_align_get_double(x)
float taos_align_get_float(char* pBuf);
double taos_align_get_double(char* pBuf);
float taos_align_get_float(const char* pBuf);
double taos_align_get_double(const char* pBuf);
//#define __float_align_declear() float __underlyFloat = 0.0;
//#define __float_align_declear()
@ -162,15 +162,6 @@ int32_t taosFileRename(char *fullPath, char *suffix, char delimiter, char **dstP
int32_t taosInitTimer(void (*callback)(int), int32_t ms);
/**
* murmur hash algorithm
* @key usually string
* @len key length
* @seed hash seed
* @out an int32 value
*/
uint32_t MurmurHash3_32(const void *key, int32_t len);
bool taosMbsToUcs4(char *mbs, int32_t mbs_len, char *ucs4, int32_t ucs4_max_len);
int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes);

View File

@ -734,7 +734,7 @@ int isCommentLine(char *line) {
void source_file(TAOS *con, char *fptr) {
wordexp_t full_path;
int read_len = 0;
char * cmd = malloc(MAX_COMMAND_SIZE);
char * cmd = calloc(1, MAX_COMMAND_SIZE);
size_t cmd_len = 0;
char * line = NULL;
size_t line_len = 0;

View File

@ -231,7 +231,7 @@ char *tsError[] = {"success",
"batch size too big",
"timestamp out of range", //105
"invalid query message",
"timestamp disordered in cache block",
"too many results from vnodes for sort",
"timestamp disordered in file block",
"invalid commit log",
"no disk space on server", //110

View File

@ -239,10 +239,19 @@ typedef struct SQuery {
int lfd; // only for query in file, last file handle
SCompBlock *pBlock; // only for query in file
SField ** pFields;
int numOfBlocks; // only for query in file
int blockBufferSize; // length of pBlock buffer
int currentSlot;
int firstSlot;
/*
* the two parameters are utilized to handle the data missing situation, caused by import operation.
* When the commit slot is the first slot, and commitPoints != 0
*/
int32_t commitSlot; // which slot is committed,
int32_t commitPoint; // starting point for next commit
int slot;
int pos;
TSKEY key;
@ -251,6 +260,7 @@ typedef struct SQuery {
TSKEY skey;
TSKEY ekey;
int64_t nAggTimeInterval;
int64_t slidingTime; // sliding time for sliding window query
char intervalTimeUnit; // interval data type, used for daytime revise
int8_t precision;
int16_t numOfOutputCols;

View File

@ -22,7 +22,8 @@ extern "C" {
#include "os.h"
#include "ihash.h"
#include "hash.h"
#include "hashutil.h"
#define GET_QINFO_ADDR(x) ((char*)(x)-offsetof(SQInfo, query))
#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0)
@ -63,7 +64,7 @@ typedef enum {
* the next query.
*
* this status is only exist in group-by clause and
* diff/add/division/mulitply/ query.
* diff/add/division/multiply/ query.
*/
QUERY_RESBUF_FULL = 0x2,
@ -117,11 +118,9 @@ typedef enum {
#define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN)
typedef int (*__block_search_fn_t)(char* data, int num, int64_t key, int order);
typedef int32_t (*__read_data_fn_t)(int fd, SQInfo* pQInfo, SQueryFilesInfo* pQueryFile, char* buf, uint64_t offset,
int32_t size);
static FORCE_INLINE SMeterObj* getMeterObj(void* hashHandle, int32_t sid) {
return *(SMeterObj**)taosGetIntHashData(hashHandle, sid);
return *(SMeterObj**)taosGetDataFromHash(hashHandle, (const char*) &sid, sizeof(sid));
}
bool isQueryKilled(SQuery* pQuery);
@ -130,6 +129,7 @@ bool isPointInterpoQuery(SQuery* pQuery);
bool isTopBottomQuery(SQuery* pQuery);
bool isFirstLastRowQuery(SQuery* pQuery);
bool isTSCompQuery(SQuery* pQuery);
bool notHasQueryTimeRange(SQuery *pQuery);
bool needSupplementaryScan(SQuery* pQuery);
bool onDemandLoadDatablock(SQuery* pQuery, int16_t queryRangeSet);
@ -149,7 +149,6 @@ void vnodeScanAllData(SQueryRuntimeEnv* pRuntimeEnv);
int32_t vnodeQueryResultInterpolate(SQInfo* pQInfo, tFilePage** pDst, tFilePage** pDataSrc, int32_t numOfRows,
int32_t* numOfInterpo);
void copyResToQueryResultBuf(SMeterQuerySupportObj* pSupporter, SQuery* pQuery);
void moveDescOrderResultsToFront(SQueryRuntimeEnv* pRuntimeEnv);
void doSkipResults(SQueryRuntimeEnv* pRuntimeEnv);
void doFinalizeResult(SQueryRuntimeEnv* pRuntimeEnv);
@ -159,7 +158,7 @@ void forwardIntervalQueryRange(SMeterQuerySupportObj* pSupporter, SQueryRuntimeE
void forwardQueryStartPosition(SQueryRuntimeEnv* pRuntimeEnv);
bool normalizedFirstQueryRange(bool dataInDisk, bool dataInCache, SMeterQuerySupportObj* pSupporter,
SPointInterpoSupporter* pPointInterpSupporter);
SPointInterpoSupporter* pPointInterpSupporter, int64_t* key);
void pointInterpSupporterInit(SQuery* pQuery, SPointInterpoSupporter* pInterpoSupport);
void pointInterpSupporterDestroy(SPointInterpoSupporter* pPointInterpSupport);
@ -173,10 +172,10 @@ void enableFunctForMasterScan(SQueryRuntimeEnv* pRuntimeEnv, int32_t order);
int32_t mergeMetersResultToOneGroups(SMeterQuerySupportObj* pSupporter);
void copyFromGroupBuf(SQInfo* pQInfo, SOutputRes* result);
SBlockInfo getBlockBasicInfo(void* pBlock, int32_t blockType);
SCacheBlock* getCacheDataBlock(SMeterObj* pMeterObj, SQuery* pQuery, int32_t slot);
SBlockInfo getBlockBasicInfo(SQueryRuntimeEnv* pRuntimeEnv, void* pBlock, int32_t blockType);
SCacheBlock* getCacheDataBlock(SMeterObj* pMeterObj, SQueryRuntimeEnv* pRuntimeEnv, int32_t slot);
void queryOnBlock(SMeterQuerySupportObj* pSupporter, int64_t* primaryKeys, int32_t blockStatus, char* data,
void queryOnBlock(SMeterQuerySupportObj* pSupporter, int64_t* primaryKeys, int32_t blockStatus,
SBlockInfo* pBlockBasicInfo, SMeterDataInfo* pDataHeadInfoEx, SField* pFields,
__block_search_fn_t searchFn);
@ -278,6 +277,13 @@ void displayInterResult(SData** pdata, SQuery* pQuery, int32_t numOfRows);
void vnodePrintQueryStatistics(SMeterQuerySupportObj* pSupporter);
void clearGroupResultBuf(SOutputRes* pOneOutputRes, int32_t nOutputCols);
void copyGroupResultBuf(SOutputRes* dst, const SOutputRes* src, int32_t nOutputCols);
void resetSlidingWindowInfo(SSlidingWindowInfo* pSlidingWindowInfo, int32_t numOfCols);
void clearCompletedSlidingWindows(SSlidingWindowInfo* pSlidingWindowInfo, int32_t numOfCols);
int32_t numOfClosedSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo);
void closeSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo, int32_t slot);
void closeAllSlidingWindow(SSlidingWindowInfo* pSlidingWindowInfo);
#ifdef __cplusplus
}

View File

@ -35,19 +35,19 @@ typedef struct {
int32_t fileId;
} SPositionInfo;
typedef struct SQueryLoadBlockInfo {
typedef struct SLoadDataBlockInfo {
int32_t fileListIndex; /* index of this file in files list of this vnode */
int32_t fileId;
int32_t slotIdx;
int32_t sid;
bool tsLoaded; // if timestamp column of current block is loaded or not
} SQueryLoadBlockInfo;
} SLoadDataBlockInfo;
typedef struct SQueryLoadCompBlockInfo {
typedef struct SLoadCompBlockInfo {
int32_t sid; /* meter sid */
int32_t fileId;
int32_t fileListIndex;
} SQueryLoadCompBlockInfo;
} SLoadCompBlockInfo;
/*
* the header file info for one vnode
@ -112,7 +112,31 @@ typedef struct SQueryFilesInfo {
char dbFilePathPrefix[PATH_MAX];
} SQueryFilesInfo;
typedef struct RuntimeEnvironment {
typedef struct STimeWindow {
TSKEY skey;
TSKEY ekey;
} STimeWindow;
typedef struct SWindowStatus {
STimeWindow window;
bool closed;
} SWindowStatus;
typedef struct SSlidingWindowInfo {
SOutputRes* pResult; // reference to SQuerySupporter->pResult
SWindowStatus* pStatus; // current query window closed or not?
void* hashList; // hash list for quick access
int16_t type; // data type for hash key
int32_t capacity; // max capacity
int32_t curIndex; // current start active index
int32_t size;
int64_t startTime; // start time of the first time window for sliding query
int64_t prevSKey; // previous (not completed) sliding window start key
int64_t threshold; // threshold for return completed results.
} SSlidingWindowInfo;
typedef struct SQueryRuntimeEnv {
SPositionInfo startPos; /* the start position, used for secondary/third iteration */
SPositionInfo endPos; /* the last access position in query, served as the start pos of reversed order query */
SPositionInfo nextPos; /* start position of the next scan */
@ -126,20 +150,30 @@ typedef struct RuntimeEnvironment {
SQuery* pQuery;
SMeterObj* pMeterObj;
SQLFunctionCtx* pCtx;
SQueryLoadBlockInfo loadBlockInfo; /* record current block load information */
SQueryLoadCompBlockInfo loadCompBlockInfo; /* record current compblock information in SQuery */
SQueryFilesInfo vnodeFileInfo;
int16_t numOfRowsPerPage;
int16_t offset[TSDB_MAX_COLUMNS];
int16_t scanFlag; // denotes reversed scan of data or not
SInterpolationInfo interpoInfo;
SData** pInterpoBuf;
SOutputRes* pResult; // reference to SQuerySupporter->pResult
void* hashList;
int32_t usedIndex; // assigned SOutputRes in list
STSBuf* pTSBuf;
STSCursor cur;
SQueryCostSummary summary;
SLoadDataBlockInfo loadBlockInfo; /* record current block load information */
SLoadCompBlockInfo loadCompBlockInfo; /* record current compblock information in SQuery */
SQueryFilesInfo vnodeFileInfo;
int16_t numOfRowsPerPage;
int16_t offset[TSDB_MAX_COLUMNS];
int16_t scanFlag; // denotes reversed scan of data or not
SInterpolationInfo interpoInfo;
SData** pInterpoBuf;
SSlidingWindowInfo swindowResInfo;
STSBuf* pTSBuf;
STSCursor cur;
SQueryCostSummary summary;
STimeWindow intervalWindow; // the complete time window, not affected by the actual data distribution
/*
* Temporarily hold the in-memory cache block info during scan cache blocks
* Here we do not use the cacheblock info from pMeterObj, simple because it may change anytime
* during the query by the subumit/insert handling threads.
* So we keep a copy of the support structure as well as the cache block data itself.
*/
SCacheBlock cacheBlock;
} SQueryRuntimeEnv;
/* intermediate result during multimeter query involves interval */
@ -172,7 +206,7 @@ typedef struct SMeterDataInfo {
} SMeterDataInfo;
typedef struct SMeterQuerySupportObj {
void* pMeterObj;
void* pMetersHashTable; // meter table hash list
SMeterSidExtInfo** pMeterSidExtInfo;
int32_t numOfMeters;
@ -229,7 +263,6 @@ typedef struct _qinfo {
int killed;
struct _qinfo *prev, *next;
SQuery query;
int num;
int totalPoints;
int pointsRead;
int pointsReturned;
@ -262,7 +295,7 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo* pQInfo, SQuery* pQuery, void* param)
void vnodeDecMeterRefcnt(SQInfo* pQInfo);
/* sql query handle in dnode */
void vnodeSingleMeterQuery(SSchedMsg* pMsg);
void vnodeSingleTableQuery(SSchedMsg* pMsg);
/*
* handle multi-meter query process

View File

@ -900,7 +900,7 @@ int mgmtProcessShowMsg(char *pMsg, int msgLen, SConnObj *pConn) {
SShowRspMsg *pShowRsp;
SShowObj * pShow = NULL;
if (pShowMsg->type == TSDB_MGMT_TABLE_PNODE || TSDB_MGMT_TABLE_GRANTS || TSDB_MGMT_TABLE_SCORES) {
if (pShowMsg->type == TSDB_MGMT_TABLE_DNODE || TSDB_MGMT_TABLE_GRANTS || TSDB_MGMT_TABLE_SCORES) {
if (mgmtCheckRedirectMsg(pConn, TSDB_MSG_TYPE_SHOW_RSP) != 0) {
return 0;
}
@ -1487,8 +1487,8 @@ void mgmtInitProcessShellMsg() {
mgmtProcessShellMsg[TSDB_MSG_TYPE_SHOW] = mgmtProcessShowMsg;
mgmtProcessShellMsg[TSDB_MSG_TYPE_CONNECT] = mgmtProcessConnectMsg;
mgmtProcessShellMsg[TSDB_MSG_TYPE_HEARTBEAT] = mgmtProcessHeartBeatMsg;
mgmtProcessShellMsg[TSDB_MSG_TYPE_CREATE_PNODE] = mgmtProcessCreateDnodeMsg;
mgmtProcessShellMsg[TSDB_MSG_TYPE_DROP_PNODE] = mgmtProcessDropDnodeMsg;
mgmtProcessShellMsg[TSDB_MSG_TYPE_CREATE_DNODE] = mgmtProcessCreateDnodeMsg;
mgmtProcessShellMsg[TSDB_MSG_TYPE_DROP_DNODE] = mgmtProcessDropDnodeMsg;
mgmtProcessShellMsg[TSDB_MSG_TYPE_CREATE_MNODE] = mgmtProcessCreateMnodeMsg;
mgmtProcessShellMsg[TSDB_MSG_TYPE_DROP_MNODE] = mgmtProcessDropMnodeMsg;
mgmtProcessShellMsg[TSDB_MSG_TYPE_CFG_MNODE] = mgmtProcessCfgMnodeMsg;

View File

@ -630,7 +630,14 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) {
setNullN(pData, type, bytes, pCacheBlock->numOfPoints);
} else {
pRead = pCacheBlock->offset[colIdx] + startPos * bytes;
memcpy(pData, pRead, numOfReads * bytes);
if (QUERY_IS_ASC_QUERY(pQuery)) {
memcpy(pData, pRead, numOfReads * bytes);
} else {
for(int32_t j = 0; j < numOfReads; ++j) {
memcpy(pData + bytes * j, pRead + (numOfReads - 1 - j) * bytes, bytes);
}
}
}
}
numOfQualifiedPoints = numOfReads;
@ -668,8 +675,7 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) {
}
ids[numOfQualifiedPoints] = j;
if (++numOfQualifiedPoints == numOfReads) {
// qualified data are enough
if (++numOfQualifiedPoints == numOfReads) { // qualified data are enough
break;
}
}
@ -691,23 +697,22 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) {
if (!vnodeFilterData(pQuery, &numOfActualRead, j)) {
continue;
}
ids[numOfReads - numOfQualifiedPoints - 1] = j;
if (++numOfQualifiedPoints == numOfReads) {
// qualified data are enough
ids[numOfQualifiedPoints] = j;
if (++numOfQualifiedPoints == numOfReads) { // qualified data are enough
break;
}
}
}
int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints;
// int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints;
for (int32_t j = 0; j < numOfQualifiedPoints; ++j) {
for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) {
int16_t colIndex = pQuery->pSelectExpr[col].pBase.colInfo.colIdx;
int32_t bytes = pObj->schema[colIndex].bytes;
pData = pQuery->sdata[col]->data + (pQuery->pointsOffset + j) * bytes;
pRead = pCacheBlock->offset[colIndex] + ids[j + start] * bytes;
pRead = pCacheBlock->offset[colIndex] + ids[j/* + start*/] * bytes;
memcpy(pData, pRead, bytes);
}

View File

@ -1640,11 +1640,15 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) {
pData = pQuery->sdata[i]->data + pQuery->pointsOffset * bytes;
pRead = sdata[colBufferIndex]->data + startPos * bytes;
memcpy(pData, pRead, numOfReads * bytes);
if (QUERY_IS_ASC_QUERY(pQuery)) {
memcpy(pData, pRead, numOfReads * bytes);
} else { //reversed copy to output buffer
for(int32_t j = 0; j < numOfReads; ++j) {
memcpy(pData + bytes * j, pRead + (numOfReads - 1 - j) * bytes, bytes);
}
}
}
numOfQualifiedPoints = numOfReads;
} else {
// check each data one by one set the input column data
for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) {
@ -1675,8 +1679,7 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) {
}
ids[numOfQualifiedPoints] = j;
if (++numOfQualifiedPoints == numOfReads) {
// qualified data are enough
if (++numOfQualifiedPoints == numOfReads) { // qualified data are enough
break;
}
}
@ -1698,22 +1701,21 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) {
if (!vnodeFilterData(pQuery, &numOfActualRead, j)) {
continue;
}
ids[numOfReads - numOfQualifiedPoints - 1] = j;
if (++numOfQualifiedPoints == numOfReads) {
// qualified data are enough
ids[numOfQualifiedPoints] = j;
if (++numOfQualifiedPoints == numOfReads) { // qualified data are enough
break;
}
}
}
int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints;
// int32_t start = QUERY_IS_ASC_QUERY(pQuery) ? 0 : numOfReads - numOfQualifiedPoints;
for (int32_t j = 0; j < numOfQualifiedPoints; ++j) {
for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) {
int16_t colIndexInBuffer = pQuery->pSelectExpr[col].pBase.colInfo.colIdxInBuf;
int32_t bytes = GET_COLUMN_BYTES(pQuery, col);
pData = pQuery->sdata[col]->data + (pQuery->pointsOffset + j) * bytes;
pRead = sdata[colIndexInBuffer]->data + ids[j + start] * bytes;
pRead = sdata[colIndexInBuffer]->data + ids[j/* + start*/] * bytes;
memcpy(pData, pRead, bytes);
}

View File

@ -605,6 +605,11 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi
vnodeSendMeterCfgMsg(pObj->vnode, pObj->sid);
code = TSDB_CODE_ACTION_IN_PROGRESS;
return code;
} else if (pObj->sversion > sversion) {
dTrace("vid:%d sid:%d id:%s, client schema out of date, sql is invalid. client sversion:%d vnode sversion:%d",
pObj->vnode, pObj->sid, pObj->meterId, pObj->sversion, sversion);
code = TSDB_CODE_INVALID_SQL;
return code;
}
pData = pSubmit->payLoad;

File diff suppressed because it is too large Load Diff

View File

@ -85,6 +85,17 @@ static void setStartPositionForCacheBlock(SQuery *pQuery, SCacheBlock *pBlock, b
}
}
static void enableExecutionForNextTable(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery* pQuery = pRuntimeEnv->pQuery;
for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) {
SResultInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[i]);
if (pResInfo != NULL) {
pResInfo->complete = false;
}
}
}
static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) {
SQuery * pQuery = &pQInfo->query;
SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter;
@ -92,7 +103,7 @@ static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) {
SMeterSidExtInfo **pMeterSidExtInfo = pSupporter->pMeterSidExtInfo;
SMeterObj *pTempMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[0]->sid);
SMeterObj *pTempMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[0]->sid);
assert(pTempMeterObj != NULL);
__block_search_fn_t searchFn = vnodeSearchKeyFunc[pTempMeterObj->searchAlgorithm];
@ -111,7 +122,7 @@ static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) {
}
for (int32_t k = start; k <= end; ++k) {
SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[k]->sid);
SMeterObj *pMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[k]->sid);
if (pMeterObj == NULL) {
dError("QInfo:%p failed to find meterId:%d, continue", pQInfo, pMeterSidExtInfo[k]->sid);
continue;
@ -147,7 +158,7 @@ static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) {
if ((pQuery->lastKey > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) ||
(pQuery->lastKey < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) {
dTrace(
"QInfo:%p vid:%d sid:%d id:%s, query completed, no need to scan data in cache. qrange:%" PRId64 "-%" PRId64 ", "
"QInfo:%p vid:%d sid:%d id:%s, query completed, ignore data in cache. qrange:%" PRId64 "-%" PRId64 ", "
"lastKey:%" PRId64,
pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey,
pQuery->lastKey);
@ -183,7 +194,7 @@ static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) {
// data in this block may be flushed to disk and this block is allocated to other meter
// todo try with remain cache blocks
SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot);
SCacheBlock *pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot);
if (pBlock == NULL) {
continue;
}
@ -196,16 +207,13 @@ static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) {
SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache;
for (int32_t i = 0; i < pCacheInfo->maxBlocks; ++i) {
pBlock = getCacheDataBlock(pMeterObj, pQuery, pQuery->slot);
pBlock = getCacheDataBlock(pMeterObj, pRuntimeEnv, pQuery->slot);
/*
* 1. pBlock == NULL. The cache block may be flushed to disk, so it is not available, skip and try next
*
* 2. pBlock->numOfPoints == 0. There is a empty block, which is caused by allocate-and-write data into cache
* procedure. The block has been allocated but data has not been put into yet. If the block is the last
* block(newly allocated block), abort query. Otherwise, skip it and go on.
* The check for empty block is refactor to getCacheDataBlock function
*/
if ((pBlock == NULL) || (pBlock->numOfPoints == 0)) {
if (pBlock == NULL) {
if (ALL_CACHE_BLOCKS_CHECKED(pQuery)) {
break;
}
@ -216,8 +224,8 @@ static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) {
setStartPositionForCacheBlock(pQuery, pBlock, &firstCheckSlot);
TSKEY *primaryKeys = (TSKEY *)pBlock->offset[0];
TSKEY* primaryKeys = (TSKEY*) pRuntimeEnv->primaryColBuffer->data;
// in handling file data block, the timestamp range validation is done during fetching candidate file blocks
if ((primaryKeys[pQuery->pos] > pSupporter->rawEKey && QUERY_IS_ASC_QUERY(pQuery)) ||
(primaryKeys[pQuery->pos] < pSupporter->rawEKey && !QUERY_IS_ASC_QUERY(pQuery))) {
@ -226,15 +234,14 @@ static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) {
// only record the key on last block
SET_CACHE_BLOCK_FLAG(pRuntimeEnv->blockStatus);
SBlockInfo binfo = getBlockBasicInfo(pBlock, BLK_CACHE_BLOCK);
SBlockInfo binfo = getBlockBasicInfo(pRuntimeEnv, pBlock, BLK_CACHE_BLOCK);
dTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", fileId:%d, slot:%d, pos:%d, bstatus:%d",
GET_QINFO_ADDR(pQuery), binfo.keyFirst, binfo.keyLast, pQuery->fileId, pQuery->slot, pQuery->pos,
pRuntimeEnv->blockStatus);
totalBlocks++;
queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, (char *)pBlock, &binfo, &pMeterInfo[k], NULL,
searchFn);
queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, &binfo, &pMeterInfo[k], NULL, searchFn);
if (ALL_CACHE_BLOCKS_CHECKED(pQuery)) {
break;
@ -264,7 +271,7 @@ static void queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMeterDataInfo
SMeterDataBlockInfoEx *pDataBlockInfoEx = NULL;
int32_t nAllocBlocksInfoSize = 0;
SMeterObj * pTempMeter = getMeterObj(pSupporter->pMeterObj, pSupporter->pMeterSidExtInfo[0]->sid);
SMeterObj * pTempMeter = getMeterObj(pSupporter->pMetersHashTable, pSupporter->pMeterSidExtInfo[0]->sid);
__block_search_fn_t searchFn = vnodeSearchKeyFunc[pTempMeter->searchAlgorithm];
int32_t vnodeId = pTempMeter->vnode;
@ -423,7 +430,7 @@ static void queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMeterDataInfo
continue;
}
SBlockInfo binfo = getBlockBasicInfo(pBlock, BLK_FILE_BLOCK);
SBlockInfo binfo = getBlockBasicInfo(pRuntimeEnv, pBlock, BLK_FILE_BLOCK);
assert(pQuery->pos >= 0 && pQuery->pos < pBlock->numOfPoints);
TSKEY *primaryKeys = (TSKEY *)pRuntimeEnv->primaryColBuffer->data;
@ -439,8 +446,8 @@ static void queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMeterDataInfo
(pBlock->keyFirst >= pQuery->ekey && pBlock->keyLast <= pQuery->lastKey && !QUERY_IS_ASC_QUERY(pQuery)));
}
queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, (char *)pRuntimeEnv->colDataBuffer, &binfo,
pOneMeterDataInfo, pInfoEx->pBlock.fields, searchFn);
queryOnBlock(pSupporter, primaryKeys, pRuntimeEnv->blockStatus, &binfo, pOneMeterDataInfo, pInfoEx->pBlock.fields,
searchFn);
}
tfree(pReqMeterDataInfo);
@ -471,7 +478,7 @@ static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool *
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[index]->sid);
SMeterObj *pMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[index]->sid);
if (pMeterObj == NULL) {
dError("QInfo:%p do not find required meter id: %d, all meterObjs id is:", pQInfo, pMeterSidExtInfo[index]->sid);
return false;
@ -485,6 +492,9 @@ static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool *
pQInfo->pObj = pMeterObj;
pQuery->lastKey = pQuery->skey;
pRuntimeEnv->pMeterObj = pMeterObj;
vnodeUpdateQueryColumnIndex(pQuery, pRuntimeEnv->pMeterObj);
vnodeUpdateFilterColumnIndex(pQuery);
vnodeCheckIfDataExists(pRuntimeEnv, pMeterObj, dataInDisk, dataInCache);
@ -509,6 +519,7 @@ static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool *
}
}
initCtxOutputBuf(pRuntimeEnv);
return true;
}
@ -532,7 +543,7 @@ static int64_t doCheckMetersInGroup(SQInfo *pQInfo, int32_t index, int32_t start
SPointInterpoSupporter pointInterpSupporter = {0};
pointInterpSupporterInit(pQuery, &pointInterpSupporter);
if (!normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter)) {
if (!normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter, NULL)) {
pointInterpSupporterDestroy(&pointInterpSupporter);
return 0;
}
@ -545,11 +556,9 @@ static int64_t doCheckMetersInGroup(SQInfo *pQInfo, int32_t index, int32_t start
pointInterpSupporterDestroy(&pointInterpSupporter);
vnodeScanAllData(pRuntimeEnv);
// first/last_row query, do not invoke the finalize for super table query
if (!isFirstLastRowQuery(pQuery)) {
doFinalizeResult(pRuntimeEnv);
}
doFinalizeResult(pRuntimeEnv);
int64_t numOfRes = getNumOfResult(pRuntimeEnv);
assert(numOfRes == 1 || numOfRes == 0);
@ -563,7 +572,14 @@ static int64_t doCheckMetersInGroup(SQInfo *pQInfo, int32_t index, int32_t start
return numOfRes;
}
static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
/**
* super table query handler
* 1. super table projection query, group-by on normal columns query, ts-comp query
* 2. point interpolation query, last row query
*
* @param pQInfo
*/
static void vnodeSTableSeqProcessor(SQInfo *pQInfo) {
SMeterQuerySupportObj *pSupporter = pQInfo->pMeterQuerySupporter;
SMeterSidExtInfo **pMeterSidExtInfo = pSupporter->pMeterSidExtInfo;
@ -572,7 +588,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
SQuery * pQuery = &pQInfo->query;
tSidSet *pSids = pSupporter->pSidSet;
SMeterObj *pOneMeter = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[0]->sid);
int32_t vid = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[0]->sid)->vnode;
if (isPointInterpoQuery(pQuery)) {
resetCtxOutputBuf(pRuntimeEnv);
@ -584,7 +600,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
int32_t end = pSids->starterPos[pSupporter->subgroupIdx + 1] - 1;
if (isFirstLastRowQuery(pQuery)) {
dTrace("QInfo:%p last_row query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, pOneMeter->vnode,
dTrace("QInfo:%p last_row query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, vid,
pSids->numOfSubSet, pSupporter->subgroupIdx);
TSKEY key = -1;
@ -600,7 +616,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
}
// get the last key of meters that belongs to this group
SMeterObj *pMeterObj = getMeterObj(pSupporter->pMeterObj, pMeterSidExtInfo[k]->sid);
SMeterObj *pMeterObj = getMeterObj(pSupporter->pMetersHashTable, pMeterSidExtInfo[k]->sid);
if (pMeterObj != NULL) {
if (key < pMeterObj->lastKey) {
key = pMeterObj->lastKey;
@ -617,7 +633,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
int64_t num = doCheckMetersInGroup(pQInfo, index, start);
assert(num >= 0);
} else {
dTrace("QInfo:%p interp query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, pOneMeter->vnode,
dTrace("QInfo:%p interp query on vid:%d, numOfGroups:%d, current group:%d", pQInfo, vid,
pSids->numOfSubSet, pSupporter->subgroupIdx);
for (int32_t k = start; k <= end; ++k) {
@ -644,7 +660,9 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
}
}
} else {
// this procedure treats all tables as single group
/*
* 1. super table projection query, 2. group-by on normal columns query, 3. ts-comp query
*/
assert(pSupporter->meterIdx >= 0);
/*
@ -665,18 +683,8 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
}
resetCtxOutputBuf(pRuntimeEnv);
for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) {
SOutputRes *pOneRes = &pRuntimeEnv->pResult[i];
clearGroupResultBuf(pOneRes, pQuery->numOfOutputCols);
}
pRuntimeEnv->usedIndex = 0;
taosCleanUpIntHash(pRuntimeEnv->hashList);
int32_t primeHashSlot = 10039;
pRuntimeEnv->hashList = taosInitIntHash(primeHashSlot, POINTER_BYTES, taosHashInt);
resetSlidingWindowInfo(&pRuntimeEnv->swindowResInfo, pQuery->numOfOutputCols);
while (pSupporter->meterIdx < pSupporter->numOfMeters) {
int32_t k = pSupporter->meterIdx;
@ -684,6 +692,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK);
return;
}
TSKEY skey = pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[k]->key;
if (skey > 0) {
@ -707,7 +716,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
#endif
SPointInterpoSupporter pointInterpSupporter = {0};
if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter) == false) {
if (normalizedFirstQueryRange(dataInDisk, dataInCache, pSupporter, &pointInterpSupporter, NULL) == false) {
pQuery->skey = pSupporter->rawSKey;
pQuery->ekey = pSupporter->rawEKey;
@ -728,9 +737,6 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
}
}
vnodeUpdateQueryColumnIndex(pQuery, pRuntimeEnv->pMeterObj);
vnodeUpdateFilterColumnIndex(pQuery);
vnodeScanAllData(pRuntimeEnv);
pQuery->pointsRead = getNumOfResult(pRuntimeEnv);
@ -741,7 +747,10 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
pSupporter->meterIdx = pSupporter->pSidSet->numOfSids;
break;
}
// enable execution for next table, when handling the projection query
enableExecutionForNextTable(pRuntimeEnv);
if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK | QUERY_COMPLETED)) {
/*
* query range is identical in terms of all meters involved in query,
@ -761,8 +770,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
break;
}
} else {
// forward query range
} else { // forward query range
pQuery->skey = pQuery->lastKey;
// all data in the result buffer are skipped due to the offset, continue to retrieve data from current meter
@ -779,7 +787,18 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
}
}
if (!isGroupbyNormalCol(pQuery->pGroupbyExpr) && !isFirstLastRowQuery(pQuery)) {
/*
* 1. super table projection query, group-by on normal columns query, ts-comp query
* 2. point interpolation query, last row query
*
* group-by on normal columns query and last_row query do NOT invoke the finalizer here,
* since the finalize stage will be done at the client side.
*
* projection query, point interpolation query do not need the finalizer.
*
* Only the ts-comp query requires the finalizer function to be executed here.
*/
if (isTSCompQuery(pQuery)) {
doFinalizeResult(pRuntimeEnv);
}
@ -787,9 +806,14 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
pRuntimeEnv->cur = pRuntimeEnv->pTSBuf->cur;
}
// todo refactor
if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
for (int32_t i = 0; i < pRuntimeEnv->usedIndex; ++i) {
SOutputRes *buf = &pRuntimeEnv->pResult[i];
SSlidingWindowInfo* pSlidingWindowInfo = &pRuntimeEnv->swindowResInfo;
for (int32_t i = 0; i < pSlidingWindowInfo->size; ++i) {
SOutputRes *buf = &pSlidingWindowInfo->pResult[i];
pSlidingWindowInfo->pStatus[i].closed = true; // enable return all results for group by normal columns
for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) {
buf->numOfRows = MAX(buf->numOfRows, buf->resultInfo[j].numOfRes);
}
@ -797,18 +821,16 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
pQInfo->pMeterQuerySupporter->subgroupIdx = 0;
pQuery->pointsRead = 0;
copyFromGroupBuf(pQInfo, pRuntimeEnv->pResult);
copyFromGroupBuf(pQInfo, pSlidingWindowInfo->pResult);
}
pQInfo->pointsRead += pQuery->pointsRead;
pQuery->pointsOffset = pQuery->pointsToRead;
moveDescOrderResultsToFront(pRuntimeEnv);
dTrace(
"QInfo %p vid:%d, numOfMeters:%d, index:%d, numOfGroups:%d, %d points returned, totalRead:%d totalReturn:%d,"
"next skey:%" PRId64 ", offset:%" PRId64,
pQInfo, pOneMeter->vnode, pSids->numOfSids, pSupporter->meterIdx, pSids->numOfSubSet, pQuery->pointsRead,
pQInfo, vid, pSids->numOfSids, pSupporter->meterIdx, pSids->numOfSubSet, pQuery->pointsRead,
pQInfo->pointsRead, pQInfo->pointsReturned, pQuery->skey, pQuery->limit.offset);
}
@ -966,7 +988,7 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) {
* select count(*)/top(field,k)/avg(field name) from table_name [where ts>now-1a];
* select count(*) from table_name group by status_column;
*/
static void vnodeSingleMeterFixedOutputProcessor(SQInfo *pQInfo) {
static void vnodeSingleTableFixedOutputProcessor(SQInfo *pQInfo) {
SQuery * pQuery = &pQInfo->query;
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->pMeterQuerySupporter->runtimeEnv;
@ -989,20 +1011,13 @@ static void vnodeSingleMeterFixedOutputProcessor(SQInfo *pQInfo) {
assert(isTopBottomQuery(pQuery));
}
if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
pQInfo->pMeterQuerySupporter->subgroupIdx = 0;
pQuery->pointsRead = 0;
copyFromGroupBuf(pQInfo, pRuntimeEnv->pResult);
}
doSkipResults(pRuntimeEnv);
doRevisedResultsByLimit(pQInfo);
moveDescOrderResultsToFront(pRuntimeEnv);
pQInfo->pointsRead = pQuery->pointsRead;
}
static void vnodeSingleMeterMultiOutputProcessor(SQInfo *pQInfo) {
static void vnodeSingleTableMultiOutputProcessor(SQInfo *pQInfo) {
SQuery * pQuery = &pQInfo->query;
SMeterObj *pMeterObj = pQInfo->pObj;
@ -1044,8 +1059,6 @@ static void vnodeSingleMeterMultiOutputProcessor(SQInfo *pQInfo) {
}
doRevisedResultsByLimit(pQInfo);
moveDescOrderResultsToFront(pRuntimeEnv);
pQInfo->pointsRead += pQuery->pointsRead;
if (Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)) {
@ -1073,7 +1086,8 @@ static void vnodeSingleMeterIntervalMainLooper(SMeterQuerySupportObj *pSupporter
(pQuery->skey >= pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery)));
initCtxOutputBuf(pRuntimeEnv);
clearCompletedSlidingWindows(&pRuntimeEnv->swindowResInfo, pQuery->numOfOutputCols);
vnodeScanAllData(pRuntimeEnv);
if (isQueryKilled(pQuery)) {
return;
@ -1104,7 +1118,7 @@ static void vnodeSingleMeterIntervalMainLooper(SMeterQuerySupportObj *pSupporter
}
forwardIntervalQueryRange(pSupporter, pRuntimeEnv);
if (Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED)) {
if (Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED|QUERY_RESBUF_FULL)) {
break;
}
@ -1122,7 +1136,7 @@ static void vnodeSingleMeterIntervalMainLooper(SMeterQuerySupportObj *pSupporter
}
/* handle time interval query on single table */
static void vnodeSingleMeterIntervalProcessor(SQInfo *pQInfo) {
static void vnodeSingleTableIntervalProcessor(SQInfo *pQInfo) {
SQuery * pQuery = &(pQInfo->query);
SMeterObj *pMeterObj = pQInfo->pObj;
@ -1143,17 +1157,8 @@ static void vnodeSingleMeterIntervalProcessor(SQInfo *pQInfo) {
taosInterpoSetStartInfo(&pRuntimeEnv->interpoInfo, pQuery->pointsRead, pQuery->interpoType);
SData **pInterpoBuf = pRuntimeEnv->pInterpoBuf;
if (QUERY_IS_ASC_QUERY(pQuery)) {
for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) {
memcpy(pInterpoBuf[i]->data, pQuery->sdata[i]->data, pQuery->pointsRead * pQuery->pSelectExpr[i].resBytes);
}
} else {
int32_t size = pMeterObj->pointsPerFileBlock;
for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) {
memcpy(pInterpoBuf[i]->data,
pQuery->sdata[i]->data + (size - pQuery->pointsRead) * pQuery->pSelectExpr[i].resBytes,
pQuery->pointsRead * pQuery->pSelectExpr[i].resBytes);
}
for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) {
memcpy(pInterpoBuf[i]->data, pQuery->sdata[i]->data, pQuery->pointsRead * pQuery->pSelectExpr[i].resBytes);
}
numOfInterpo = 0;
@ -1170,18 +1175,22 @@ static void vnodeSingleMeterIntervalProcessor(SQInfo *pQInfo) {
pQuery->pointsRead = 0;
}
}
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || (pQuery->slidingTime > 0 && pQuery->nAggTimeInterval > 0)) {
pQInfo->pMeterQuerySupporter->subgroupIdx = 0;
pQuery->pointsRead = 0;
copyFromGroupBuf(pQInfo, pRuntimeEnv->swindowResInfo.pResult);
}
pQInfo->pointsRead += pQuery->pointsRead;
pQInfo->pointsInterpo += numOfInterpo;
moveDescOrderResultsToFront(pRuntimeEnv);
dTrace("%p vid:%d sid:%d id:%s, %d points returned %d points interpo, totalRead:%d totalInterpo:%d totalReturn:%d",
pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead, numOfInterpo,
pQInfo->pointsRead - pQInfo->pointsInterpo, pQInfo->pointsInterpo, pQInfo->pointsReturned);
}
void vnodeSingleMeterQuery(SSchedMsg *pMsg) {
void vnodeSingleTableQuery(SSchedMsg *pMsg) {
SQInfo *pQInfo = (SQInfo *)pMsg->ahandle;
if (pQInfo == NULL || pQInfo->pMeterQuerySupporter == NULL) {
@ -1219,7 +1228,6 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) {
(tFilePage **)pRuntimeEnv->pInterpoBuf, remain, &numOfInterpo);
doRevisedResultsByLimit(pQInfo);
moveDescOrderResultsToFront(pRuntimeEnv);
pQInfo->pointsInterpo += numOfInterpo;
pQInfo->pointsRead += pQuery->pointsRead;
@ -1275,16 +1283,17 @@ void vnodeSingleMeterQuery(SSchedMsg *pMsg) {
int64_t st = taosGetTimestampUs();
if (pQuery->nAggTimeInterval != 0) { // interval (down sampling operation)
// group by normal column, sliding window query, interval query are handled by interval query processor
if (pQuery->nAggTimeInterval != 0 || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // interval (down sampling operation)
assert(pQuery->checkBufferInLoop == 0 && pQuery->pointsOffset == pQuery->pointsToRead);
vnodeSingleMeterIntervalProcessor(pQInfo);
vnodeSingleTableIntervalProcessor(pQInfo);
} else {
if (isFixedOutputQuery(pQuery)) {
assert(pQuery->checkBufferInLoop == 0);
vnodeSingleMeterFixedOutputProcessor(pQInfo);
vnodeSingleTableFixedOutputProcessor(pQInfo);
} else { // diff/add/multiply/subtract/division
assert(pQuery->checkBufferInLoop == 1);
vnodeSingleMeterMultiOutputProcessor(pQInfo);
vnodeSingleTableMultiOutputProcessor(pQInfo);
}
}
@ -1331,7 +1340,7 @@ void vnodeMultiMeterQuery(SSchedMsg *pMsg) {
assert((pQuery->checkBufferInLoop == 1 && pQuery->nAggTimeInterval == 0) || isPointInterpoQuery(pQuery) ||
isGroupbyNormalCol(pQuery->pGroupbyExpr));
vnodeMultiMeterMultiOutputProcessor(pQInfo);
vnodeSTableSeqProcessor(pQInfo);
}
/* record the total elapsed time */

View File

@ -25,6 +25,8 @@
#include "vnode.h"
#include "vnodeRead.h"
#include "vnodeUtil.h"
#include "hash.h"
#include "hashutil.h"
int (*pQueryFunc[])(SMeterObj *, SQuery *) = {vnodeQueryFromCache, vnodeQueryFromFile};
@ -265,6 +267,7 @@ static SQInfo *vnodeAllocateQInfoEx(SQueryMeterMsg *pQueryMsg, SSqlGroupbyExpr *
pQuery->pGroupbyExpr = pGroupbyExpr;
pQuery->nAggTimeInterval = pQueryMsg->nAggTimeInterval;
pQuery->slidingTime = pQueryMsg->slidingTime;
pQuery->interpoType = pQueryMsg->interpoType;
pQuery->intervalTimeUnit = pQueryMsg->intervalTimeUnit;
@ -390,11 +393,6 @@ __clean_memory:
return NULL;
}
//static void vnodeFreeQInfoInQueueImpl(SSchedMsg *pMsg) {
// SQInfo *pQInfo = (SQInfo *)pMsg->ahandle;
// vnodeFreeQInfo(pQInfo, true);
//}
void vnodeFreeQInfoInQueue(void *param) {
SQInfo *pQInfo = (SQInfo *)param;
@ -404,15 +402,6 @@ void vnodeFreeQInfoInQueue(void *param) {
dTrace("QInfo:%p set kill flag to free QInfo");
vnodeDecRefCount(pQInfo);
// dTrace("QInfo:%p set kill flag and add to queue, stop query ASAP", pQInfo);
// SSchedMsg schedMsg = {0};
// schedMsg.fp = vnodeFreeQInfoInQueueImpl;
// schedMsg.msg = NULL;
// schedMsg.thandle = (void *)1;
// schedMsg.ahandle = param;
// taosScheduleTask(queryQhandle, &schedMsg);
}
void vnodeFreeQInfo(void *param, bool decQueryRef) {
@ -616,7 +605,7 @@ void *vnodeQueryOnSingleTable(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE
bool isProjQuery = vnodeIsProjectionQuery(pSqlExprs, pQueryMsg->numOfOutputCols);
// todo pass the correct error code
if (isProjQuery) {
if (isProjQuery && pQueryMsg->tsLen == 0) {
pQInfo = vnodeAllocateQInfo(pQueryMsg, pMeterObj, pSqlExprs);
} else {
pQInfo = vnodeAllocateQInfoEx(pQueryMsg, pGroupbyExpr, pSqlExprs, pMetersObj[0]);
@ -641,7 +630,6 @@ void *vnodeQueryOnSingleTable(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE
pQuery->lastKey = pQuery->skey;
pQInfo->fp = pQueryFunc[pQueryMsg->order];
pQInfo->num = pQueryMsg->num;
if (sem_init(&(pQInfo->dataReady), 0, 0) != 0) {
dError("QInfo:%p vid:%d sid:%d meterId:%s, init dataReady sem failed, reason:%s", pQInfo, pMeterObj->vnode,
@ -652,7 +640,9 @@ void *vnodeQueryOnSingleTable(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE
SSchedMsg schedMsg = {0};
if (!isProjQuery) {
if (isProjQuery && pQueryMsg->tsLen == 0) {
schedMsg.fp = vnodeQueryData;
} else {
if (vnodeParametersSafetyCheck(pQuery) == false) {
*code = TSDB_CODE_APP_ERROR;
goto _error;
@ -661,8 +651,9 @@ void *vnodeQueryOnSingleTable(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE
SMeterQuerySupportObj *pSupporter = (SMeterQuerySupportObj *)calloc(1, sizeof(SMeterQuerySupportObj));
pSupporter->numOfMeters = 1;
pSupporter->pMeterObj = taosInitIntHash(pSupporter->numOfMeters, POINTER_BYTES, taosHashInt);
taosAddIntHash(pSupporter->pMeterObj, pMetersObj[0]->sid, (char *)&pMetersObj[0]);
pSupporter->pMetersHashTable = taosInitHashTable(pSupporter->numOfMeters, taosIntHash_32, false);
taosAddToHashTable(pSupporter->pMetersHashTable, (const char*) &pMetersObj[0]->sid, sizeof(pMeterObj[0].sid),
(char *)&pMetersObj[0], POINTER_BYTES);
pSupporter->pSidSet = NULL;
pSupporter->subgroupIdx = -1;
@ -688,9 +679,7 @@ void *vnodeQueryOnSingleTable(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE
return pQInfo;
}
schedMsg.fp = vnodeSingleMeterQuery;
} else {
schedMsg.fp = vnodeQueryData;
schedMsg.fp = vnodeSingleTableQuery;
}
/*
@ -740,7 +729,6 @@ void *vnodeQueryOnMultiMeters(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE
pQuery->ekey = pQueryMsg->ekey;
pQInfo->fp = pQueryFunc[pQueryMsg->order];
pQInfo->num = pQueryMsg->num;
if (sem_init(&(pQInfo->dataReady), 0, 0) != 0) {
dError("QInfo:%p vid:%d sid:%d id:%s, init dataReady sem failed, reason:%s", pQInfo, pMetersObj[0]->vnode,
@ -754,9 +742,10 @@ void *vnodeQueryOnMultiMeters(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE
SMeterQuerySupportObj *pSupporter = (SMeterQuerySupportObj *)calloc(1, sizeof(SMeterQuerySupportObj));
pSupporter->numOfMeters = pQueryMsg->numOfSids;
pSupporter->pMeterObj = taosInitIntHash(pSupporter->numOfMeters, POINTER_BYTES, taosHashInt);
pSupporter->pMetersHashTable = taosInitHashTable(pSupporter->numOfMeters, taosIntHash_32, false);
for (int32_t i = 0; i < pSupporter->numOfMeters; ++i) {
taosAddIntHash(pSupporter->pMeterObj, pMetersObj[i]->sid, (char *)&pMetersObj[i]);
taosAddToHashTable(pSupporter->pMetersHashTable, (const char*) &pMetersObj[i]->sid, sizeof(pMetersObj[i]->sid), (char *)&pMetersObj[i],
POINTER_BYTES);
}
int32_t sidElemLen = pQueryMsg->tagLength + sizeof(SMeterSidExtInfo);
@ -911,7 +900,7 @@ int vnodeSaveQueryResult(void *handle, char *data, int32_t *size) {
if (pQInfo->pMeterQuerySupporter != NULL) {
if (pQInfo->pMeterQuerySupporter->pSidSet == NULL) {
schedMsg.fp = vnodeSingleMeterQuery;
schedMsg.fp = vnodeSingleTableQuery;
} else { // group by tag
schedMsg.fp = vnodeMultiMeterQuery;
}
@ -981,14 +970,14 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) {
pQueryMsg->ekey = htobe64(pQueryMsg->ekey);
#endif
pQueryMsg->num = htonl(pQueryMsg->num);
pQueryMsg->order = htons(pQueryMsg->order);
pQueryMsg->orderColId = htons(pQueryMsg->orderColId);
pQueryMsg->queryType = htons(pQueryMsg->queryType);
pQueryMsg->nAggTimeInterval = htobe64(pQueryMsg->nAggTimeInterval);
pQueryMsg->slidingTime = htobe64(pQueryMsg->slidingTime);
pQueryMsg->numOfTagsCols = htons(pQueryMsg->numOfTagsCols);
pQueryMsg->numOfCols = htons(pQueryMsg->numOfCols);
pQueryMsg->numOfOutputCols = htons(pQueryMsg->numOfOutputCols);

View File

@ -311,7 +311,7 @@ int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj) {
if (pVnode->cfg.maxSessions == 0) {
dError("qmsg:%p,vid:%d is not activated yet", pQueryMsg, pQueryMsg->vnode);
vnodeSendVpeerCfgMsg(pQueryMsg->vnode);
code = TSDB_CODE_NOT_ACTIVE_TABLE;
code = TSDB_CODE_NOT_ACTIVE_VNODE;
goto _query_over;
}
@ -355,7 +355,7 @@ int vnodeProcessQueryRequest(char *pMsg, int msgLen, SShellObj *pObj) {
assert(incNumber <= pQueryMsg->numOfSids);
pthread_mutex_unlock(&pVnode->vmutex);
if (code != TSDB_CODE_SUCCESS) {
if (code != TSDB_CODE_SUCCESS || pQueryMsg->numOfSids == 0) { // all the meters may have been dropped.
goto _query_over;
}
@ -511,7 +511,8 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) {
assert(code != TSDB_CODE_ACTION_IN_PROGRESS);
if (numOfRows == 0 && (pRetrieve->qhandle == (uint64_t)pObj->qhandle) && (code != TSDB_CODE_ACTION_IN_PROGRESS) && pRetrieve->qhandle != NULL) {
if (numOfRows == 0 && (pRetrieve->qhandle == (uint64_t)pObj->qhandle) && (code != TSDB_CODE_ACTION_IN_PROGRESS) &&
pRetrieve->qhandle != 0) {
dTrace("QInfo:%p %s free qhandle code:%d", pObj->qhandle, __FUNCTION__, code);
vnodeDecRefCount(pObj->qhandle);
pObj->qhandle = NULL;
@ -623,6 +624,7 @@ int vnodeProcessShellSubmitRequest(char *pMsg, int msgLen, SShellObj *pObj) {
SShellSubmitMsg *pSubmit = &shellSubmit;
SShellSubmitBlock *pBlocks = NULL;
pSubmit->import = htons(pSubmit->import);
pSubmit->vnode = htons(pSubmit->vnode);
pSubmit->numOfSid = htonl(pSubmit->numOfSid);

View File

@ -195,9 +195,9 @@ static int32_t vnodeBuildExprFromArithmeticStr(SSqlFunctionExpr* pExpr, SQueryMe
num = i + 1;
pBinaryExprInfo->pReqColumns = malloc(sizeof(SColIndexEx) * num);
for (int32_t i = 0; i < num; ++i) {
SColIndexEx* pColIndex = &pBinaryExprInfo->pReqColumns[i];
pColIndex->colId = ids[i];
for (int32_t k = 0; k < num; ++k) {
SColIndexEx* pColIndex = &pBinaryExprInfo->pReqColumns[k];
pColIndex->colId = ids[k];
}
pBinaryExprInfo->numOfCols = num;
@ -252,7 +252,7 @@ SSqlFunctionExpr* vnodeCreateSqlFunctionExpr(SQueryMeterMsg* pQueryMsg, int32_t*
if (pColumnIndexExInfo->colIdx >= pQueryMsg->numOfTagsCols) {
*code = TSDB_CODE_INVALID_QUERY_MSG;
tfree(pExprs);
break;
return NULL;
}
type = pTagSchema[pColumnIndexExInfo->colIdx].type;
@ -264,7 +264,7 @@ SSqlFunctionExpr* vnodeCreateSqlFunctionExpr(SQueryMeterMsg* pQueryMsg, int32_t*
if (*code != TSDB_CODE_SUCCESS) {
tfree(pExprs);
break;
return NULL;
}
type = TSDB_DATA_TYPE_DOUBLE;
@ -539,18 +539,25 @@ bool vnodeIsProjectionQuery(SSqlFunctionExpr* pExpr, int32_t numOfOutput) {
* 3. insert has nothing to do with the query processing.
*/
int32_t vnodeIncQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterSidExtInfo** pSids, SMeterObj** pMeterObjList,
int32_t* numOfInc) {
int32_t* numOfIncTables) {
SVnodeObj* pVnode = &vnodeList[pQueryMsg->vnode];
int32_t num = 0;
int32_t index = 0;
int32_t code = TSDB_CODE_SUCCESS;
for (int32_t i = 0; i < pQueryMsg->numOfSids; ++i) {
SMeterObj* pMeter = pVnode->meterList[pSids[i]->sid];
/*
* If table is missing or is in dropping status, config it from management node, and ignore it
* during query processing. The error code of TSDB_CODE_NOT_ACTIVE_TABLE will never return to client.
* The missing table needs to be removed from pSids list
*/
if (pMeter == NULL || vnodeIsMeterState(pMeter, TSDB_METER_STATE_DROPPING)) {
code = TSDB_CODE_NOT_ACTIVE_TABLE;
dError("qmsg:%p, vid:%d sid:%d, not there or will be dropped", pQueryMsg, pQueryMsg->vnode, pSids[i]->sid);
dWarn("qmsg:%p, vid:%d sid:%d, not there or will be dropped, ignore this table in query", pQueryMsg,
pQueryMsg->vnode, pSids[i]->sid);
vnodeSendMeterCfgMsg(pQueryMsg->vnode, pSids[i]->sid);
continue;
@ -572,9 +579,11 @@ int32_t vnodeIncQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterSidExtInfo** pSid
* vnodeIsSafeToDeleteMeter will wait for this function complete, and then it can
* check if the numOfQueries is 0 or not.
*/
pMeterObjList[(*numOfInc)++] = pMeter;
pMeterObjList[(*numOfIncTables)++] = pMeter;
atomic_fetch_add_32(&pMeter->numOfQueries, 1);
pSids[index++] = pSids[i];
// output for meter more than one query executed
if (pMeter->numOfQueries > 1) {
dTrace("qmsg:%p, vid:%d sid:%d id:%s, inc query ref, numOfQueries:%d", pQueryMsg, pMeter->vnode, pMeter->sid,
@ -583,16 +592,19 @@ int32_t vnodeIncQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterSidExtInfo** pSid
}
}
dTrace("qmsg:%p, query meters: %d, inc query ref %d, numOfQueries on %d meters are 1", pQueryMsg,
pQueryMsg->numOfSids, *numOfInc, (*numOfInc) - num);
dTrace("qmsg:%p, query meters: %d, inc query ref %d, numOfQueries on %d meters are 1, queried meters:%d after "
"filter missing meters", pQueryMsg, pQueryMsg->numOfSids, *numOfIncTables, (*numOfIncTables) - num, index);
assert(pQueryMsg->numOfSids >= (*numOfIncTables) && pQueryMsg->numOfSids >= index);
pQueryMsg->numOfSids = index;
return code;
}
void vnodeDecQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterObj** pMeterObjList, int32_t numOfInc) {
void vnodeDecQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterObj** pMeterObjList, int32_t numOfIncTables) {
int32_t num = 0;
for (int32_t i = 0; i < numOfInc; ++i) {
for (int32_t i = 0; i < numOfIncTables; ++i) {
SMeterObj* pMeter = pMeterObjList[i];
if (pMeter != NULL) { // here, do not need to lock to perform operations
@ -606,7 +618,7 @@ void vnodeDecQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterObj** pMeterObjList,
}
}
dTrace("qmsg:%p, dec query ref for %d meters, numOfQueries on %d meters are 0", pQueryMsg, numOfInc, numOfInc - num);
dTrace("qmsg:%p, dec query ref for %d meters, numOfQueries on %d meters are 0", pQueryMsg, numOfIncTables, numOfIncTables - num);
}
void vnodeUpdateQueryColumnIndex(SQuery* pQuery, SMeterObj* pMeterObj) {

View File

@ -24,7 +24,7 @@ int mgmtProcessAlterAcctMsg(char *pMsg, int msgLen, SConnObj *pConn) {
}
int mgmtProcessCreateDnodeMsg(char *pMsg, int msgLen, SConnObj *pConn) {
return taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_CREATE_PNODE_RSP, TSDB_CODE_OPS_NOT_SUPPORT);
return taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_CREATE_DNODE_RSP, TSDB_CODE_OPS_NOT_SUPPORT);
}
int mgmtProcessCfgMnodeMsg(char *pMsg, int msgLen, SConnObj *pConn) {
@ -36,7 +36,7 @@ int mgmtProcessDropMnodeMsg(char *pMsg, int msgLen, SConnObj *pConn) {
}
int mgmtProcessDropDnodeMsg(char *pMsg, int msgLen, SConnObj *pConn) {
return taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_PNODE_RSP, TSDB_CODE_OPS_NOT_SUPPORT);
return taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_DROP_DNODE_RSP, TSDB_CODE_OPS_NOT_SUPPORT);
}
int mgmtProcessDropAcctMsg(char *pMsg, int msgLen, SConnObj *pConn) {

545
src/util/src/hash.c Normal file
View File

@ -0,0 +1,545 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "os.h"
#include "hash.h"
#include "tlog.h"
#include "ttime.h"
#include "tutil.h"
static FORCE_INLINE void __wr_lock(void *lock) {
#if defined LINUX
pthread_rwlock_wrlock(lock);
#else
pthread_mutex_lock(lock);
#endif
}
static FORCE_INLINE void __rd_lock(void *lock) {
#if defined LINUX
pthread_rwlock_rdlock(lock);
#else
pthread_mutex_lock(lock);
#endif
}
static FORCE_INLINE void __unlock(void *lock) {
#if defined LINUX
pthread_rwlock_unlock(lock);
#else
pthread_mutex_unlock(lock);
#endif
}
static FORCE_INLINE int32_t __lock_init(void *lock) {
#if defined LINUX
return pthread_rwlock_init(lock, NULL);
#else
return pthread_mutex_init(lock, NULL);
#endif
}
static FORCE_INLINE void __lock_destroy(void *lock) {
#if defined LINUX
pthread_rwlock_destroy(lock);
#else
pthread_mutex_destroy(lock);
#endif
}
static FORCE_INLINE int32_t taosHashCapacity(int32_t length) {
int32_t len = MIN(length, HASH_MAX_CAPACITY);
uint32_t i = 4;
while (i < len) i = (i << 1U);
return i;
}
/**
* hash key function
*
* @param key key string
* @param len length of key
* @return hash value
*/
static FORCE_INLINE uint32_t taosHashKey(const char *key, uint32_t len) { return MurmurHash3_32(key, len); }
/**
* inplace update node in hash table
* @param pObj hash table object
* @param pNode data node
*/
static void doUpdateHashTable(HashObj *pObj, SHashNode *pNode) {
if (pNode->prev1) {
pNode->prev1->next = pNode;
}
if (pNode->next) {
(pNode->next)->prev = pNode;
}
pTrace("key:%s %p update hash table", pNode->key, pNode);
}
/**
* get SHashNode from hashlist, nodes from trash are not included.
* @param pObj Cache objection
* @param key key for hash
* @param keyLen key length
* @return
*/
static SHashNode *doGetNodeFromHashTable(HashObj *pObj, const char *key, uint32_t keyLen, uint32_t *hashVal) {
uint32_t hash = (*pObj->hashFp)(key, keyLen);
int32_t slot = HASH_INDEX(hash, pObj->capacity);
SHashEntry *pEntry = pObj->hashList[slot];
SHashNode *pNode = pEntry->next;
while (pNode) {
if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) {
break;
}
pNode = pNode->next;
}
if (pNode) {
assert(HASH_INDEX(pNode->hashVal, pObj->capacity) == slot);
}
// return the calculated hash value, to avoid calculating it again in other functions
if (hashVal != NULL) {
*hashVal = hash;
}
return pNode;
}
/**
* resize the hash list if the threshold is reached
*
* @param pObj
*/
static void taosHashTableResize(HashObj *pObj) {
if (pObj->size < pObj->capacity * HASH_DEFAULT_LOAD_FACTOR) {
return;
}
// double the original capacity
SHashNode *pNode = NULL;
SHashNode *pNext = NULL;
int32_t newSize = pObj->capacity << 1U;
if (newSize > HASH_MAX_CAPACITY) {
pTrace("current capacity:%d, maximum capacity:%d, no resize applied due to limitation is reached", pObj->capacity,
HASH_MAX_CAPACITY);
return;
}
int64_t st = taosGetTimestampUs();
SHashEntry **pNewEntry = realloc(pObj->hashList, sizeof(SHashEntry*) * newSize);
if (pNewEntry == NULL) {
pTrace("cache resize failed due to out of memory, capacity remain:%d", pObj->capacity);
return;
}
pObj->hashList = pNewEntry;
for(int32_t i = pObj->capacity; i < newSize; ++i) {
pObj->hashList[i] = calloc(1, sizeof(SHashEntry));
}
pObj->capacity = newSize;
for (int32_t i = 0; i < pObj->capacity; ++i) {
SHashEntry *pEntry = pObj->hashList[i];
pNode = pEntry->next;
if (pNode != NULL) {
assert(pNode->prev1 == pEntry && pEntry->num > 0);
}
while (pNode) {
int32_t j = HASH_INDEX(pNode->hashVal, pObj->capacity);
if (j == i) { // this key resides in the same slot, no need to relocate it
pNode = pNode->next;
} else {
pNext = pNode->next;
// remove from current slot
assert(pNode->prev1 != NULL);
if (pNode->prev1 == pEntry) { // first node of the overflow linked list
pEntry->next = pNode->next;
} else {
pNode->prev->next = pNode->next;
}
pEntry->num--;
assert(pEntry->num >= 0);
if (pNode->next != NULL) {
(pNode->next)->prev = pNode->prev;
}
// added into new slot
pNode->next = NULL;
pNode->prev1 = NULL;
SHashEntry *pNewIndexEntry = pObj->hashList[j];
if (pNewIndexEntry->next != NULL) {
assert(pNewIndexEntry->next->prev1 == pNewIndexEntry);
pNewIndexEntry->next->prev = pNode;
}
pNode->next = pNewIndexEntry->next;
pNode->prev1 = pNewIndexEntry;
pNewIndexEntry->next = pNode;
pNewIndexEntry->num++;
// continue
pNode = pNext;
}
}
}
int64_t et = taosGetTimestampUs();
pTrace("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", pObj->capacity,
((double)pObj->size) / pObj->capacity, (et - st) / 1000.0);
}
/**
* @param capacity maximum slots available for hash elements
* @param fn hash function
* @return
*/
void *taosInitHashTable(uint32_t capacity, _hash_fn_t fn, bool multithreadSafe) {
if (capacity == 0 || fn == NULL) {
return NULL;
}
HashObj *pObj = (HashObj *)calloc(1, sizeof(HashObj));
if (pObj == NULL) {
pError("failed to allocate memory, reason:%s", strerror(errno));
return NULL;
}
// the max slots is not defined by user
pObj->capacity = taosHashCapacity(capacity);
assert((pObj->capacity & (pObj->capacity - 1)) == 0);
pObj->hashFp = fn;
pObj->hashList = (SHashEntry **)calloc(pObj->capacity, sizeof(SHashEntry*));
if (pObj->hashList == NULL) {
free(pObj);
pError("failed to allocate memory, reason:%s", strerror(errno));
return NULL;
}
for(int32_t i = 0; i < pObj->capacity; ++i) {
pObj->hashList[i] = calloc(1, sizeof(SHashEntry));
}
if (multithreadSafe && (__lock_init(pObj) != 0)) {
free(pObj->hashList);
free(pObj);
pError("failed to init lock, reason:%s", strerror(errno));
return NULL;
}
return (void *)pObj;
}
/**
* @param key key of object for hash, usually a null-terminated string
* @param keyLen length of key
* @param pData actually data. required a consecutive memory block, no pointer is allowed
* in pData. Pointer copy causes memory access error.
* @param size size of block
* @return SHashNode
*/
static SHashNode *doCreateHashNode(const char *key, uint32_t keyLen, const char *pData, size_t dataSize,
uint32_t hashVal) {
size_t totalSize = dataSize + sizeof(SHashNode) + keyLen;
SHashNode *pNewNode = calloc(1, totalSize);
if (pNewNode == NULL) {
pError("failed to allocate memory, reason:%s", strerror(errno));
return NULL;
}
memcpy(pNewNode->data, pData, dataSize);
pNewNode->key = pNewNode->data + dataSize;
memcpy(pNewNode->key, key, keyLen);
pNewNode->keyLen = keyLen;
pNewNode->hashVal = hashVal;
return pNewNode;
}
static SHashNode *doUpdateHashNode(SHashNode *pNode, const char *key, uint32_t keyLen, const char *pData,
size_t dataSize) {
size_t size = dataSize + sizeof(SHashNode) + keyLen;
SHashNode *pNewNode = (SHashNode *)realloc(pNode, size);
if (pNewNode == NULL) {
return NULL;
}
memcpy(pNewNode->data, pData, dataSize);
pNewNode->key = pNewNode->data + dataSize;
assert(memcmp(pNewNode->key, key, keyLen) == 0 && keyLen == pNewNode->keyLen);
memcpy(pNewNode->key, key, keyLen);
return pNewNode;
}
/**
* insert the hash node at the front of the linked list
*
* @param pObj
* @param pNode
*/
static void doAddToHashTable(HashObj *pObj, SHashNode *pNode) {
assert(pNode != NULL);
int32_t index = HASH_INDEX(pNode->hashVal, pObj->capacity);
SHashEntry *pEntry = pObj->hashList[index];
pNode->next = pEntry->next;
if (pEntry->next) {
pEntry->next->prev = pNode;
}
pEntry->next = pNode;
pNode->prev1 = pEntry;
pEntry->num++;
pObj->size++;
// char key[512] = {0};
// memcpy(key, pNode->key, MIN(512, pNode->keyLen));
// pTrace("key:%s %p add to hash table", key, pNode);
}
/**
* add data node into hash table
* @param pObj hash object
* @param pNode hash node
*/
int32_t taosAddToHashTable(HashObj *pObj, const char *key, uint32_t keyLen, void *data, uint32_t size) {
if (pObj->multithreadSafe) {
__wr_lock(&pObj->lock);
}
uint32_t hashVal = 0;
SHashNode *pNode = doGetNodeFromHashTable(pObj, key, keyLen, &hashVal);
if (pNode == NULL) { // no data in hash table with the specified key, add it into hash table
taosHashTableResize(pObj);
SHashNode *pNewNode = doCreateHashNode(key, keyLen, data, size, hashVal);
if (pNewNode == NULL) {
if (pObj->multithreadSafe) {
__unlock(&pObj->lock);
}
return -1;
}
doAddToHashTable(pObj, pNewNode);
} else {
SHashNode *pNewNode = doUpdateHashNode(pNode, key, keyLen, data, size);
if (pNewNode == NULL) {
if (pObj->multithreadSafe) {
__unlock(&pObj->lock);
}
return -1;
}
doUpdateHashTable(pObj, pNewNode);
}
if (pObj->multithreadSafe) {
__unlock(&pObj->lock);
}
return 0;
}
char *taosGetDataFromHash(HashObj *pObj, const char *key, uint32_t keyLen) {
if (pObj->multithreadSafe) {
__rd_lock(&pObj->lock);
}
uint32_t hashVal = 0;
SHashNode *pNode = doGetNodeFromHashTable(pObj, key, keyLen, &hashVal);
if (pObj->multithreadSafe) {
__unlock(&pObj->lock);
}
if (pNode != NULL) {
assert(pNode->hashVal == hashVal);
return pNode->data;
} else {
return NULL;
}
}
/**
* remove node in hash list
* @param pObj
* @param pNode
*/
void taosDeleteFromHashTable(HashObj *pObj, const char *key, uint32_t keyLen) {
if (pObj->multithreadSafe) {
__wr_lock(&pObj->lock);
}
uint32_t val = 0;
SHashNode *pNode = doGetNodeFromHashTable(pObj, key, keyLen, &val);
if (pNode == NULL) {
if (pObj->multithreadSafe) {
__unlock(&pObj->lock);
}
return;
}
SHashNode *pNext = pNode->next;
if (pNode->prev != NULL) {
int32_t slot = HASH_INDEX(val, pObj->capacity);
if (pObj->hashList[slot]->next == pNode) {
pObj->hashList[slot]->next = pNext;
} else {
pNode->prev->next = pNext;
}
}
if (pNext != NULL) {
pNext->prev = pNode->prev;
}
uint32_t index = HASH_INDEX(pNode->hashVal, pObj->capacity);
SHashEntry *pEntry = pObj->hashList[index];
pEntry->num--;
pObj->size--;
pNode->next = NULL;
pNode->prev = NULL;
pTrace("key:%s %p remove from hash table", pNode->key, pNode);
tfree(pNode);
if (pObj->multithreadSafe) {
__unlock(&pObj->lock);
}
}
void taosCleanUpHashTable(void *handle) {
HashObj *pObj = (HashObj *)handle;
if (pObj == NULL || pObj->capacity <= 0) return;
SHashNode *pNode, *pNext;
if (pObj->multithreadSafe) {
__wr_lock(&pObj->lock);
}
if (pObj->hashList) {
for (int32_t i = 0; i < pObj->capacity; ++i) {
SHashEntry *pEntry = pObj->hashList[i];
pNode = pEntry->next;
while (pNode) {
pNext = pNode->next;
free(pNode);
pNode = pNext;
}
tfree(pEntry);
}
free(pObj->hashList);
}
if (pObj->multithreadSafe) {
__unlock(&pObj->lock);
__lock_destroy(&pObj->lock);
}
memset(pObj, 0, sizeof(HashObj));
free(pObj);
}
// for profile only
int32_t taosGetHashMaxOverflowLength(HashObj* pObj) {
if (pObj == NULL || pObj->size == 0) {
return 0;
}
int32_t num = 0;
for(int32_t i = 0; i < pObj->size; ++i) {
SHashEntry *pEntry = pObj->hashList[i];
if (num < pEntry->num) {
num = pEntry->num;
}
}
return num;
}
int32_t taosCheckHashTable(HashObj *pObj) {
for(int32_t i = 0; i < pObj->capacity; ++i) {
SHashEntry *pEntry = pObj->hashList[i];
SHashNode* pNode = pEntry->next;
if (pNode != NULL) {
assert(pEntry == pNode->prev1);
int32_t num = 1;
SHashNode* pNext = pNode->next;
while(pNext) {
assert(pNext->prev == pNode);
pNode = pNext;
pNext = pNext->next;
num ++;
}
assert(num == pEntry->num);
}
}
return 0;
}

View File

@ -20,6 +20,7 @@
#include "ttime.h"
#include "ttimer.h"
#include "tutil.h"
#include "hashutil.h"
#define HASH_MAX_CAPACITY (1024*1024*16)
#define HASH_VALUE_IN_TRASH (-1)
@ -901,5 +902,46 @@ void taosCleanUpDataCache(void *handle) {
}
pObj->deleting = 1;
return;
}
void* taosGetDataFromExists(void* handle, void* data) {
SCacheObj *pObj = (SCacheObj *)handle;
if (pObj == NULL || data == NULL) return NULL;
size_t offset = offsetof(SDataNode, data);
SDataNode *ptNode = (SDataNode *)((char *)data - offset);
if (ptNode->signature != (uint64_t) ptNode) {
pError("key: %p the data from cache is invalid", ptNode);
return NULL;
}
int32_t ref = atomic_add_fetch_32(&ptNode->refCount, 1);
pTrace("%p add ref data in cache, refCnt:%d", data, ref)
// the data if referenced by at least one object, so the reference count must be greater than the value of 2.
assert(ref >= 2);
return data;
}
void* taosTransferDataInCache(void* handle, void** data) {
SCacheObj *pObj = (SCacheObj *)handle;
if (pObj == NULL || data == NULL) return NULL;
size_t offset = offsetof(SDataNode, data);
SDataNode *ptNode = (SDataNode *)((char *)(*data) - offset);
if (ptNode->signature != (uint64_t) ptNode) {
pError("key: %p the data from cache is invalid", ptNode);
return NULL;
}
assert(ptNode->refCount >= 1);
char* d = *data;
// clear its reference to old area
*data = NULL;
return d;
}

View File

@ -1603,7 +1603,7 @@ void tColModelAppend(tColModel *dstModel, tFilePage *dstPage, void *srcData, int
tOrderDescriptor *tOrderDesCreate(int32_t *orderColIdx, int32_t numOfOrderCols, tColModel *pModel,
int32_t tsOrderType) {
tOrderDescriptor *desc = (tOrderDescriptor *)malloc(sizeof(tOrderDescriptor) + sizeof(int32_t) * numOfOrderCols);
tOrderDescriptor *desc = (tOrderDescriptor *)calloc(1, sizeof(tOrderDescriptor) + sizeof(int32_t) * numOfOrderCols);
if (desc == NULL) {
return NULL;
}

View File

@ -130,6 +130,10 @@ int tsEnableMonitorModule = 1;
int tsRestRowLimit = 10240;
int tsMaxSQLStringLen = TSDB_MAX_SQL_LEN;
// the maximum number of results for projection query on super table that are returned from
// one virtual node, to order according to timestamp
int tsMaxNumOfOrderedResults = 100000;
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
* metricmeta rsp, and multi-meter query rsp message body. The client compress the submit message to server.
@ -140,18 +144,29 @@ int tsMaxSQLStringLen = TSDB_MAX_SQL_LEN;
*/
int tsCompressMsgSize = -1;
char tsSocketType[4] = "udp"; // use UDP by default[option: udp, tcp]
int tsTimePrecision = TSDB_TIME_PRECISION_MILLI; // time precision, millisecond by default
int tsMinSlidingTime = 10; // 10 ms for sliding time, the value will changed in
// case of time precision changed
int tsMinIntervalTime = 10; // 10 ms for interval time range, changed accordingly
int tsMaxStreamComputDelay = 20000; // 20sec, the maximum value of stream
// computing delay, changed accordingly
int tsStreamCompStartDelay = 10000; // 10sec, the first stream computing delay
// time after system launched successfully,
// changed accordingly
int tsStreamCompRetryDelay = 10; // the stream computing delay time after
// executing failed, change accordingly
// use UDP by default[option: udp, tcp]
char tsSocketType[4] = "udp";
// time precision, millisecond by default
int tsTimePrecision = TSDB_TIME_PRECISION_MILLI;
// 10 ms for sliding time, the value will changed in case of time precision changed
int tsMinSlidingTime = 10;
// 10 ms for interval time range, changed accordingly
int tsMinIntervalTime = 10;
// 20sec, the maximum value of stream computing delay, changed accordingly
int tsMaxStreamComputDelay = 20000;
// 10sec, the first stream computing delay time after system launched successfully, changed accordingly
int tsStreamCompStartDelay = 10000;
// the stream computing delay time after executing failed, change accordingly
int tsStreamCompRetryDelay = 10;
// The delayed computing ration. 10% of the whole computing time window by default.
float tsStreamComputDelayRatio = 0.1;
int tsProjectExecInterval = 10000; // every 10sec, the projection will be executed once
int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance
@ -622,9 +637,12 @@ static void doInitGlobalConfig() {
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW,
1000, 1000000000, 0, TSDB_CFG_UTYPE_MS);
tsInitConfigOption(cfg++, "retryStreamCompDelay", &tsStreamCompRetryDelay, TSDB_CFG_VTYPE_INT,
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW,
10, 1000000000, 0, TSDB_CFG_UTYPE_MS);
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 10, 1000000000, 0, TSDB_CFG_UTYPE_MS);
tsInitConfigOption(cfg++, "streamCompDelayRatio", &tsStreamComputDelayRatio, TSDB_CFG_VTYPE_FLOAT,
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW, 0.1, 0.9, 0, TSDB_CFG_UTYPE_NONE);
tsInitConfigOption(cfg++, "clog", &tsCommitLog, TSDB_CFG_VTYPE_SHORT,
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW,
0, 1, 0, TSDB_CFG_UTYPE_NONE);
@ -667,6 +685,10 @@ static void doInitGlobalConfig() {
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW,
TSDB_MAX_SQL_LEN, TSDB_MAX_ALLOWED_SQL_LEN, 0, TSDB_CFG_UTYPE_BYTE);
tsInitConfigOption(cfg++, "maxNumOfOrderedRes", &tsMaxNumOfOrderedResults, TSDB_CFG_VTYPE_INT,
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW,
TSDB_MAX_SQL_LEN, TSDB_MAX_ALLOWED_SQL_LEN, 0, TSDB_CFG_UTYPE_NONE);
// locale & charset
tsInitConfigOption(cfg++, "timezone", tsTimezone, TSDB_CFG_VTYPE_STRING,
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT,

View File

@ -8,6 +8,7 @@
*
*/
#include "tutil.h"
#include "hashutil.h"
#define ROTL32(x, r) ((x) << (r) | (x) >> (32 - (r)))
@ -67,7 +68,7 @@ static void MurmurHash3_32_s(const void *key, int len, uint32_t seed, void *out)
*(uint32_t *)out = h1;
}
uint32_t MurmurHash3_32(const void *key, int len) {
uint32_t MurmurHash3_32(const char *key, uint32_t len) {
const int32_t hashSeed = 0x12345678;
uint32_t val = 0;
@ -75,3 +76,31 @@ uint32_t MurmurHash3_32(const void *key, int len) {
return val;
}
uint32_t taosIntHash_32(const char *key, uint32_t UNUSED_PARAM(len)) { return *(uint32_t *)key; }
uint32_t taosIntHash_16(const char *key, uint32_t UNUSED_PARAM(len)) { return *(uint16_t *)key; }
uint32_t taosIntHash_8(const char *key, uint32_t UNUSED_PARAM(len)) { return *(uint8_t *)key; }
uint32_t taosIntHash_64(const char *key, uint32_t UNUSED_PARAM(len)) {
uint64_t val = *(uint64_t *)key;
uint64_t hash = val >> 16U;
hash += (val & 0xFFFFU);
return hash;
}
_hash_fn_t taosGetDefaultHashFunction(int32_t type) {
_hash_fn_t fn = NULL;
switch(type) {
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT: fn = taosIntHash_64;break;
case TSDB_DATA_TYPE_BINARY: fn = MurmurHash3_32;break;
case TSDB_DATA_TYPE_INT: fn = taosIntHash_32; break;
case TSDB_DATA_TYPE_SMALLINT: fn = taosIntHash_16; break;
case TSDB_DATA_TYPE_TINYINT: fn = taosIntHash_8; break;
default: fn = taosIntHash_32;break;
}
return fn;
}

View File

@ -37,7 +37,7 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t timeRange, char
* here we revised the start time of day according to the local time zone,
* but in case of DST, the start time of one day need to be dynamically decided.
*
* TODO dynmaically decide the start time of a day
* TODO dynamically decide the start time of a day
*/
#if defined(WINDOWS) && _MSC_VER >= 1900
@ -94,7 +94,7 @@ void taosInterpoSetStartInfo(SInterpolationInfo* pInterpoInfo, int32_t numOfRawD
return;
}
pInterpoInfo->rowIdx = INTERPOL_IS_ASC_INTERPOL(pInterpoInfo) ? 0 : numOfRawDataInRows - 1;
pInterpoInfo->rowIdx = 0;//INTERPOL_IS_ASC_INTERPOL(pInterpoInfo) ? 0 : numOfRawDataInRows - 1;
pInterpoInfo->numOfRawDataInRows = numOfRawDataInRows;
}
@ -118,14 +118,14 @@ int32_t taosGetNumOfResWithoutLimit(SInterpolationInfo* pInterpoInfo, int64_t* p
if (numOfAvailRawData > 0) {
int32_t finalNumOfResult = 0;
if (pInterpoInfo->order == TSQL_SO_ASC) {
// if (pInterpoInfo->order == TSQL_SO_ASC) {
// get last timestamp, calculate the result size
int64_t lastKey = pPrimaryKeyArray[pInterpoInfo->numOfRawDataInRows - 1];
finalNumOfResult = (int32_t)((lastKey - pInterpoInfo->startTimestamp) / nInterval) + 1;
} else { // todo error less than one!!!
TSKEY lastKey = pPrimaryKeyArray[0];
finalNumOfResult = (int32_t)((pInterpoInfo->startTimestamp - lastKey) / nInterval) + 1;
}
finalNumOfResult = (int32_t)(labs(lastKey - pInterpoInfo->startTimestamp) / nInterval) + 1;
// } else { // todo error less than one!!!
// TSKEY lastKey = pPrimaryKeyArray[0];
// finalNumOfResult = (int32_t)((pInterpoInfo->startTimestamp - lastKey) / nInterval) + 1;
// }
assert(finalNumOfResult >= numOfAvailRawData);
return finalNumOfResult;
@ -198,11 +198,11 @@ int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoi
}
static char* getPos(char* data, int32_t bytes, int32_t order, int32_t capacity, int32_t index) {
if (order == TSQL_SO_ASC) {
// if (order == TSQL_SO_ASC) {
return data + index * bytes;
} else {
return data + (capacity - index - 1) * bytes;
}
// } else {
// return data + (capacity - index - 1) * bytes;
// }
}
static void setTagsValueInInterpolation(tFilePage** data, char** pTags, tColModel* pModel, int32_t order, int32_t start,
@ -397,7 +397,7 @@ int32_t taosDoInterpoResult(SInterpolationInfo* pInterpoInfo, int16_t interpoTyp
}
pInterpoInfo->startTimestamp += (nInterval * step);
pInterpoInfo->rowIdx += step;
pInterpoInfo->rowIdx += 1;
num += 1;
if ((pInterpoInfo->rowIdx >= pInterpoInfo->numOfRawDataInRows && INTERPOL_IS_ASC_INTERPOL(pInterpoInfo)) ||

View File

@ -14,11 +14,13 @@
*/
#include "os.h"
#include "hashutil.h"
#include "shash.h"
#include "tutil.h"
#include "tsqldef.h"
#include "tstoken.h"
#include "ttypes.h"
#include "hash.h"
// All the keywords of the SQL language are stored in a hash table
typedef struct SKeyword {
@ -225,11 +227,14 @@ static SKeyword keywordTable[] = {
{"STABLE", TK_STABLE},
{"FILE", TK_FILE},
{"VNODES", TK_VNODES},
{"UNION", TK_UNION},
{"RATE", TK_RATE},
{"IRATE", TK_IRATE},
{"SUM_RATE", TK_SUM_RATE},
{"AVG_RATE", TK_AVG_RATE},
{"AVG_IRATE", TK_AVG_IRATE},
};
/* This is the hash table */
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static const char isIdChar[] = {
/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */
@ -243,27 +248,22 @@ static const char isIdChar[] = {
};
static void* KeywordHashTable = NULL;
int tSQLKeywordCode(const char* z, int n) {
int i;
static char needInit = 1;
if (needInit) {
// Initialize the keyword hash table
pthread_mutex_lock(&mutex);
// double check
if (needInit) {
int nk = tListLen(keywordTable);
KeywordHashTable = taosInitStrHash(nk, POINTER_BYTES, taosHashStringStep1);
for (i = 0; i < nk; i++) {
keywordTable[i].len = strlen(keywordTable[i].name);
void* ptr = &keywordTable[i];
taosAddStrHash(KeywordHashTable, (char*)keywordTable[i].name, (void*)&ptr);
}
needInit = 0;
}
pthread_mutex_unlock(&mutex);
static void doInitKeywordsTable() {
int numOfEntries = tListLen(keywordTable);
KeywordHashTable = taosInitHashTable(numOfEntries, MurmurHash3_32, false);
for (int32_t i = 0; i < numOfEntries; i++) {
keywordTable[i].len = strlen(keywordTable[i].name);
void* ptr = &keywordTable[i];
taosAddToHashTable(KeywordHashTable, keywordTable[i].name, keywordTable[i].len, (void*)&ptr, POINTER_BYTES);
}
}
static pthread_once_t keywordsHashTableInit = PTHREAD_ONCE_INIT;
int tSQLKeywordCode(const char* z, int n) {
pthread_once(&keywordsHashTableInit, doInitKeywordsTable);
char key[128] = {0};
for (int32_t j = 0; j < n; ++j) {
@ -274,7 +274,7 @@ int tSQLKeywordCode(const char* z, int n) {
}
}
SKeyword** pKey = (SKeyword**)taosGetStrHashData(KeywordHashTable, key);
SKeyword** pKey = (SKeyword**)taosGetDataFromHash(KeywordHashTable, key, n);
if (pKey != NULL) {
return (*pKey)->type;
} else {

View File

@ -164,12 +164,12 @@ void tVariantDestroy(tVariant *pVar) {
if (pVar == NULL) return;
if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) {
free(pVar->pz);
pVar->pz = NULL;
tfree(pVar->pz);
pVar->nLen = 0;
}
}
void tVariantAssign(tVariant *pDst, tVariant *pSrc) {
void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
if (pSrc == NULL || pDst == NULL) return;
*pDst = *pSrc;

View File

@ -568,13 +568,13 @@ char *taosIpStr(uint32_t ipInt) {
void taosCleanupTier() {}
#endif
FORCE_INLINE float taos_align_get_float(char* pBuf) {
FORCE_INLINE float taos_align_get_float(const char* pBuf) {
float fv = 0;
*(int32_t*)(&fv) = *(int32_t*)pBuf;
return fv;
}
FORCE_INLINE double taos_align_get_double(char* pBuf) {
FORCE_INLINE double taos_align_get_double(const char* pBuf) {
double dv = 0;
*(int64_t*)(&dv) = *(int64_t*)pBuf;
return dv;

View File

@ -1,7 +1,7 @@
char version[64] = "1.6.5.3";
char compatible_version[64] = "1.6.1.0";
char gitinfo[128] = "751fa0239baa49c3aaa9b49e15f7812b17519800";
char gitinfo[128] = "700305490a82228ec1b0244afb838bdbb9de9793";
char gitinfoOfInternal[128] = "";
char buildinfo[512] = "Built by guanshengliang at 2020-01-08 15:21";
char buildinfo[512] = "Built by at 2020-01-17 13:22";
void libtaos_edge_1_6_5_1_Darwin_x64() {};
void libtaos_edge_1_6_5_1_Linux_x64() {};