fix(rpc): avoid fd leak

This commit is contained in:
yihaoDeng 2022-05-13 12:18:57 +08:00
commit d0fe68b0e4
101 changed files with 4984 additions and 2245 deletions

View File

@ -22,7 +22,7 @@
static int running = 1;
static void msg_process(TAOS_RES* msg) {
char buf[1024];
memset(buf, 0, 1024);
/*memset(buf, 0, 1024);*/
printf("topic: %s\n", tmq_get_topic_name(msg));
printf("vg: %d\n", tmq_get_vgroup_id(msg));
while (1) {
@ -107,7 +107,7 @@ int32_t create_topic() {
taos_free_result(pRes);
/*pRes = taos_query(pConn, "create topic topic_ctb_column as abc1");*/
pRes = taos_query(pConn, "create topic topic_ctb_column with table as select ts, c1, c2, c3 from st1");
pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1");
if (taos_errno(pRes) != 0) {
printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
return -1;
@ -166,6 +166,7 @@ tmq_t* build_consumer() {
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
/*tmq_conf_set(conf, "td.connect.db", "abc1");*/
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set_offset_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
assert(tmq);

View File

@ -48,6 +48,7 @@ enum {
typedef enum EStreamType {
STREAM_NORMAL = 1,
STREAM_INVERT,
STREAM_REPROCESS,
STREAM_INVALID,
} EStreamType;

View File

@ -384,7 +384,7 @@ static FORCE_INLINE int32_t comparTagId(const void *key1, const void *key2) {
}
}
static FORCE_INLINE void *tdGetKVRowValOfCol(SKVRow row, int16_t colId) {
static FORCE_INLINE void *tdGetKVRowValOfCol(const SKVRow row, int16_t colId) {
void *ret = taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_EQ);
if (ret == NULL) return NULL;
return kvRowColVal(row, (SColIdx *)ret);

View File

@ -43,7 +43,7 @@ extern int32_t tsMaxNumOfDistinctResults;
extern int32_t tsCompatibleModel;
extern bool tsEnableSlaveQuery;
extern bool tsPrintAuth;
extern int64_t tsTickPerDay[3];
extern int64_t tsTickPerMin[3];
// multi-process
extern bool tsMultiProcess;

View File

@ -252,6 +252,7 @@ STSRow* tGetSubmitBlkNext(SSubmitBlkIter* pIter);
int32_t tPrintFixedSchemaSubmitReq(const SSubmitReq* pReq, STSchema* pSchema);
typedef struct {
int32_t code;
int8_t hashMeta;
int64_t uid;
char* tblFName;
@ -2380,6 +2381,7 @@ typedef struct {
typedef struct {
SMsgHead head;
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
int8_t withTbName;
int32_t epoch;
uint64_t reqId;
int64_t consumerId;

View File

@ -217,6 +217,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_UNKNOWN, "vnode-sync-unknown", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_COMMON_RESPONSE, "vnode-sync-common-response", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_APPLY_MSG, "vnode-sync-apply-msg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_CONFIG_CHANGE, "vnode-sync-config-change", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_VNODE, "vnode-sync-vnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_VNODE, "vnode-alter-vnode", NULL, NULL)

View File

@ -50,6 +50,7 @@ typedef struct {
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
#define IS_VAR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
#define varDataNetLen(v) (htons(((VarDataLenT *)(v))[0]))
#define varDataNetTLen(v) (sizeof(VarDataLenT) + varDataNetLen(v))

View File

@ -126,7 +126,7 @@ enum {
enum {
MAIN_SCAN = 0x0u,
REVERSE_SCAN = 0x1u,
REVERSE_SCAN = 0x1u, // todo remove it
REPEAT_SCAN = 0x2u, //repeat scan belongs to the master scan
MERGE_STAGE = 0x20u,
};
@ -222,13 +222,6 @@ enum {
typedef struct tExprNode {
int32_t nodeType;
union {
struct {
int32_t optr; // binary operator
void *info; // support filter operation on this expression only available for leaf node
struct tExprNode *pLeft; // left child pointer
struct tExprNode *pRight; // right child pointer
} _node;
SSchema *pSchema;// column node
struct SVariant *pVal; // value node
@ -237,12 +230,6 @@ typedef struct tExprNode {
int32_t functionId;
int32_t num;
struct SFunctionNode *pFunctNode;
// Note that the attribute of pChild is not the parameter of function, it is the columns that involved in the
// calculation instead.
// E.g., Cov(col1, col2), the column information, w.r.t. the col1 and col2, is kept in pChild nodes.
// The concat function, concat(col1, col2), is a binary scalar
// operator and is kept in the attribute of _node.
struct tExprNode **pChild;
} _function;
struct {
@ -273,6 +260,7 @@ typedef struct SAggFunctionInfo {
struct SScalarParam {
SColumnInfoData *columnData;
SHashObj *pHashFilter;
void *param; // other parameter, such as meta handle from vnode, to extract table name/tag value
int32_t numOfRows;
};
@ -281,10 +269,6 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
bool qIsValidUdf(SArray* pUdfInfo, const char* name, int32_t len, int32_t* functionId);
tExprNode* exprTreeFromBinary(const void* data, size_t size);
tExprNode* exprdup(tExprNode* pTree);
void resetResultRowEntryResult(SqlFunctionCtx* pCtx, int32_t num);
void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell);
int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock);

View File

@ -39,15 +39,6 @@ extern "C" {
//======================================================================================
//begin API to taosd and qworker
enum {
UDFC_CODE_STOPPING = -1,
UDFC_CODE_PIPE_READ_ERR = -2,
UDFC_CODE_CONNECT_PIPE_ERR = -3,
UDFC_CODE_LOAD_UDF_FAILURE = -4,
UDFC_CODE_INVALID_STATE = -5,
UDFC_CODE_NO_PIPE = -6,
};
typedef void *UdfcFuncHandle;
/**
@ -89,6 +80,7 @@ typedef struct SUdfColumnData {
typedef struct SUdfColumn {
SUdfColumnMeta colMeta;
bool hasNull;
SUdfColumnData colData;
} SUdfColumn;
@ -232,6 +224,7 @@ static FORCE_INLINE void udfColDataSetNull(SUdfColumn* pColumn, int32_t row) {
} else {
udfColDataSetNull_f(pColumn, row);
}
pColumn->hasNull = true;
}
static FORCE_INLINE int32_t udfColDataSet(SUdfColumn* pColumn, uint32_t currentRow, const char* pData, bool isNull) {

View File

@ -193,7 +193,6 @@ typedef struct SScanPhysiNode {
} SScanPhysiNode;
typedef SScanPhysiNode STagScanPhysiNode;
typedef SScanPhysiNode SStreamScanPhysiNode;
typedef struct SSystemTableScanPhysiNode {
SScanPhysiNode scan;
@ -217,6 +216,7 @@ typedef struct STableScanPhysiNode {
} STableScanPhysiNode;
typedef STableScanPhysiNode STableSeqScanPhysiNode;
typedef STableScanPhysiNode SStreamScanPhysiNode;
typedef struct SProjectPhysiNode {
SPhysiNode node;

View File

@ -232,9 +232,9 @@ typedef struct SSelectStmt {
char stmtName[TSDB_TABLE_NAME_LEN];
uint8_t precision;
bool isEmptyResult;
bool isTimeOrderQuery;
bool hasAggFuncs;
bool hasRepeatScanFuncs;
bool isTimeOrderQuery;
} SSelectStmt;
typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType;
@ -324,6 +324,7 @@ typedef struct SQuery {
SArray* pTableList;
bool showRewrite;
int32_t placeholderNum;
SArray* pPlaceholderValues;
} SQuery;
void nodesWalkSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeWalker walker, void* pContext);

View File

@ -47,7 +47,7 @@ typedef struct SParseContext {
bool isSuperUser;
} SParseContext;
int32_t qParseQuerySql(SParseContext* pCxt, SQuery** pQuery);
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
bool isInsertSql(const char* pStr, size_t length);
void qDestroyQuery(SQuery* pQueryNode);
@ -62,6 +62,8 @@ int32_t qRebuildStmtDataBlock(void** pDst, void* pSrc, uint64_t uid, int32_t vgI
void qDestroyStmtDataBlock(void* pBlock);
STableMeta* qGetTableMetaInDataBlock(void* pDataBlock);
int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx, uint64_t queryId);
int32_t qStmtParseQuerySql(SParseContext* pCxt, SQuery* pQuery);
int32_t qBindStmtColsValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen);
int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen, int32_t colIdx,
int32_t rowNum);
@ -75,7 +77,7 @@ int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char*
void* smlInitHandle(SQuery* pQuery);
void smlDestroyHandle(void* pHandle);
int32_t smlBindData(void* handle, SArray* tags, SArray* colsFormat, SArray* colsSchema, SArray* cols, bool format,
int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols, bool format,
STableMeta* pTableMeta, char* tableName, char* msgBuf, int16_t msgBufLen);
int32_t smlBuildOutput(void* handle, SHashObj* pVgHash);

View File

@ -634,6 +634,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_TAGS_NUM TAOS_DEF_ERROR_CODE(0, 0x2643)
#define TSDB_CODE_PAR_PERMISSION_DENIED TAOS_DEF_ERROR_CODE(0, 0x2644)
#define TSDB_CODE_PAR_INVALID_STREAM_QUERY TAOS_DEF_ERROR_CODE(0, 0x2645)
#define TSDB_CODE_PAR_INVALID_INTERNAL_PK TAOS_DEF_ERROR_CODE(0, 0x2646)
//planner
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
@ -645,7 +646,16 @@ int32_t* taosGetErrno();
#define TSDB_CODE_FUNC_FUNTION_PARA_NUM TAOS_DEF_ERROR_CODE(0, 0x2801)
#define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802)
#define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803)
#define TSDB_CODE_FUNC_INVALID_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2604)
#define TSDB_CODE_FUNC_INVALID_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
//udf
#define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901)
#define TSDB_CODE_UDF_PIPE_READ_ERR TAOS_DEF_ERROR_CODE(0, 0x2902)
#define TSDB_CODE_UDF_PIPE_CONNECT_ERR TAOS_DEF_ERROR_CODE(0, 0x2903)
#define TSDB_CODE_UDF_PIPE_NO_PIPE TAOS_DEF_ERROR_CODE(0, 0x2904)
#define TSDB_CODE_UDF_LOAD_UDF_FAILURE TAOS_DEF_ERROR_CODE(0, 0x2905)
#define TSDB_CODE_UDF_INVALID_STATE TAOS_DEF_ERROR_CODE(0, 0x2906)
#define TSDB_CODE_UDF_INVALID_INPUT TAOS_DEF_ERROR_CODE(0, 0x2907)
#define TSDB_CODE_SML_INVALID_PROTOCOL_TYPE TAOS_DEF_ERROR_CODE(0, 0x3000)
#define TSDB_CODE_SML_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x3001)

View File

@ -59,6 +59,21 @@ static FORCE_INLINE void *taosDecodeFixedI8(const void *buf, int8_t *value) {
static FORCE_INLINE void *taosSkipFixedLen(const void *buf, size_t len) { return POINTER_SHIFT(buf, len); }
// --- Bool
static FORCE_INLINE int32_t taosEncodeFixedBool(void **buf, bool value) {
if (buf != NULL) {
((int8_t *)(*buf))[0] = value ? 1 : 0;
*buf = POINTER_SHIFT(*buf, sizeof(int8_t));
}
return (int32_t)sizeof(int8_t);
}
static FORCE_INLINE void *taosDecodeFixedBool(const void *buf, bool *value) {
*value = ((int8_t *)buf)[0] == 0 ? false : true;
return POINTER_SHIFT(buf, sizeof(int8_t));
}
// ---- Fixed U16
static FORCE_INLINE int32_t taosEncodeFixedU16(void **buf, uint16_t value) {
if (buf != NULL) {

View File

@ -71,6 +71,7 @@ typedef struct SStmtBindInfo {
typedef struct SStmtExecInfo {
int32_t affectedRows;
bool emptyRes;
SRequestObj* pRequest;
SHashObj* pVgHash;
SHashObj* pBlockHash;

View File

@ -180,7 +180,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
return code;
}
code = qParseQuerySql(&cxt, pQuery);
code = qParseSql(&cxt, pQuery);
if (TSDB_CODE_SUCCESS == code) {
if ((*pQuery)->haveResultSet) {
setResSchemaInfo(&pRequest->body.resInfo, (*pQuery)->pResSchema, (*pQuery)->numOfResCols);

View File

@ -303,6 +303,7 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
break;
}
}
str[len] = 0;
return len;
}

File diff suppressed because it is too large Load Diff

View File

@ -279,6 +279,7 @@ int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool freeRequest) {
}
pStmt->exec.autoCreateTbl = false;
pStmt->exec.emptyRes = false;
if (keepTable) {
return TSDB_CODE_SUCCESS;
@ -628,8 +629,7 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
STMT_ERR_RET(stmtRestoreQueryFields(pStmt));
}
bool emptyResult = false;
STMT_RET(qStmtBindParam(pStmt->sql.pQueryPlan, bind, colIdx, pStmt->exec.pRequest->requestId, &emptyResult));
STMT_RET(qStmtBindParam(pStmt->sql.pQueryPlan, bind, colIdx, pStmt->exec.pRequest->requestId, &pStmt->exec.emptyRes));
}
STableDataBlocks **pDataBlock = (STableDataBlocks**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
@ -736,7 +736,11 @@ int stmtExec(TAOS_STMT *stmt) {
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_EXECUTE));
if (STMT_TYPE_QUERY == pStmt->sql.type) {
if (pStmt->exec.emptyRes) {
pStmt->exec.pRequest->type = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
} else {
scheduleQuery(pStmt->exec.pRequest, pStmt->sql.pQueryPlan, pStmt->sql.nodeList, NULL);
}
} else {
STMT_ERR_RET(qBuildStmtOutput(pStmt->sql.pQuery, pStmt->exec.pVgHash, pStmt->exec.pBlockHash));
launchQueryImpl(pStmt->exec.pRequest, pStmt->sql.pQuery, TSDB_CODE_SUCCESS, true, (autoCreateTbl ? (void**)&pRsp : NULL));

View File

@ -61,12 +61,13 @@ struct tmq_conf_t {
char groupId[TSDB_CGROUP_LEN];
int8_t autoCommit;
int8_t resetOffset;
int8_t withTbName;
uint16_t port;
int32_t autoCommitInterval;
char* ip;
char* user;
char* pass;
char* db;
/*char* db;*/
tmq_commit_cb* commitCb;
void* commitCbUserParam;
};
@ -75,6 +76,7 @@ struct tmq_t {
// conf
char groupId[TSDB_CGROUP_LEN];
char clientId[256];
int8_t withTbName;
int8_t autoCommit;
int32_t autoCommitInterval;
int32_t resetOffsetCfg;
@ -187,6 +189,7 @@ typedef struct {
tmq_conf_t* tmq_conf_new() {
tmq_conf_t* conf = taosMemoryCalloc(1, sizeof(tmq_conf_t));
conf->withTbName = -1;
conf->autoCommit = true;
conf->autoCommitInterval = 5000;
conf->resetOffset = TMQ_CONF__RESET_OFFSET__EARLIEAST;
@ -240,6 +243,18 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
}
}
if (strcmp(key, "msg.with.table.name") == 0) {
if (strcmp(value, "true") == 0) {
conf->withTbName = 1;
} else if (strcmp(value, "false") == 0) {
conf->withTbName = 0;
} else if (strcmp(value, "none") == 0) {
conf->withTbName = -1;
} else {
return TMQ_CONF_INVALID;
}
}
if (strcmp(key, "td.connect.ip") == 0) {
conf->ip = strdup(value);
return TMQ_CONF_OK;
@ -257,7 +272,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
return TMQ_CONF_OK;
}
if (strcmp(key, "td.connect.db") == 0) {
conf->db = strdup(value);
/*conf->db = strdup(value);*/
return TMQ_CONF_OK;
}
@ -485,6 +500,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
// set conf
strcpy(pTmq->clientId, conf->clientId);
strcpy(pTmq->groupId, conf->groupId);
pTmq->withTbName = conf->withTbName;
pTmq->autoCommit = conf->autoCommit;
pTmq->autoCommitInterval = conf->autoCommitInterval;
pTmq->commitCb = conf->commitCb;
@ -1104,6 +1120,7 @@ SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t waitTime, SMqClientTopic*
pReq->subKey[tlen] = TMQ_SEPARATOR;
strcpy(pReq->subKey + tlen + 1, pTopic->topicName);
pReq->withTbName = tmq->withTbName;
pReq->waitTime = waitTime;
pReq->consumerId = tmq->consumerId;
pReq->epoch = tmq->epoch;

View File

@ -33,7 +33,7 @@ int main(int argc, char **argv) {
return RUN_ALL_TESTS();
}
TEST(testCase, smlParseString_Test) {
TEST(testCase, smlParseInfluxString_Test) {
char msg[256] = {0};
SSmlMsgBuf msgBuf;
msgBuf.buf = msg;
@ -42,7 +42,7 @@ TEST(testCase, smlParseString_Test) {
// case 1
char *sql = "st,t1=3,t2=4,t3=t3 c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64 1626006833639000000 ,32,c=3";
int ret = smlParseString(sql, &elements, &msgBuf);
int ret = smlParseInfluxString(sql, &elements, &msgBuf);
ASSERT_EQ(ret, 0);
ASSERT_EQ(elements.measure, sql);
ASSERT_EQ(elements.measureLen, strlen("st"));
@ -60,13 +60,13 @@ TEST(testCase, smlParseString_Test) {
// case 2 false
sql = "st,t1=3,t2=4,t3=t3 c1=3i64,c3=\"passit hello,c1=2,c2=false,c4=4f64 1626006833639000000";
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseString(sql, &elements, &msgBuf);
ret = smlParseInfluxString(sql, &elements, &msgBuf);
ASSERT_NE(ret, 0);
// case 3 false
sql = "st, t1=3,t2=4,t3=t3 c1=3i64,c3=\"passit hello,c1=2,c2=false,c4=4f64 1626006833639000000";
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseString(sql, &elements, &msgBuf);
ret = smlParseInfluxString(sql, &elements, &msgBuf);
ASSERT_EQ(ret, 0);
ASSERT_EQ(elements.cols, sql + elements.measureTagsLen + 2);
ASSERT_EQ(elements.colsLen, strlen("t1=3,t2=4,t3=t3"));
@ -74,7 +74,7 @@ TEST(testCase, smlParseString_Test) {
// case 4 tag is null
sql = "st, c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64 1626006833639000000";
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseString(sql, &elements, &msgBuf);
ret = smlParseInfluxString(sql, &elements, &msgBuf);
ASSERT_EQ(ret, 0);
ASSERT_EQ(elements.measure, sql);
ASSERT_EQ(elements.measureLen, strlen("st"));
@ -92,7 +92,7 @@ TEST(testCase, smlParseString_Test) {
// case 5 tag is null
sql = " st c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64 1626006833639000000 ";
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseString(sql, &elements, &msgBuf);
ret = smlParseInfluxString(sql, &elements, &msgBuf);
sql++;
ASSERT_EQ(ret, 0);
ASSERT_EQ(elements.measure, sql);
@ -111,13 +111,13 @@ TEST(testCase, smlParseString_Test) {
// case 6
sql = " st c1=3i64,c3=\"passit hello,c1=2\",c2=false,c4=4f64 ";
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseString(sql, &elements, &msgBuf);
ret = smlParseInfluxString(sql, &elements, &msgBuf);
ASSERT_EQ(ret, 0);
// case 7
sql = " st , ";
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseString(sql, &elements, &msgBuf);
ret = smlParseInfluxString(sql, &elements, &msgBuf);
sql++;
ASSERT_EQ(ret, 0);
ASSERT_EQ(elements.cols, sql + elements.measureTagsLen + 3);
@ -126,7 +126,7 @@ TEST(testCase, smlParseString_Test) {
// case 8 false
sql = ", st , ";
memset(&elements, 0, sizeof(SSmlLineInfo));
ret = smlParseString(sql, &elements, &msgBuf);
ret = smlParseInfluxString(sql, &elements, &msgBuf);
ASSERT_NE(ret, 0);
}
@ -140,15 +140,13 @@ TEST(testCase, smlParseCols_Error_Test) {
"c=f64", // double
"c=8f64f",
"c=8ef64",
"c=1.7976931348623158e+390f64",
"c=f32", // float
"c=8f32f",
"c=8wef32",
"c=-3.402823466e+39f32",
"c=", // float
"c=", // double
"c=8f",
"c=8we",
"c=3.402823466e+39",
"c=i8", // tiny int
"c=-8i8f",
"c=8wei8",
@ -218,7 +216,7 @@ TEST(testCase, smlParseCols_tag_Test) {
SHashObj *dumplicateKey = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
const char *data =
"cbin=\"passit hello,c=2\",cnch=L\"iisdfsf\",cbool=false,cf64=4.31f64,cf32_=8.32,cf32=8.23f32,ci8=-34i8,cu8=89u8,ci16=233i16,cu16=898u16,ci32=98289i32,cu32=12323u32,ci64=-89238i64,ci=989i,cu64=8989323u64,cbooltrue=true,cboolt=t,cboolf=f,cnch_=l\"iuwq\"";
"cbin=\"passit helloc=2\",cnch=L\"iisdfsf\",cbool=false,cf64=4.31f64,cf64_=8.32,cf32=8.23f32,ci8=-34i8,cu8=89u8,ci16=233i16,cu16=898u16,ci32=98289i32,cu32=12323u32,ci64=-89238i64,ci=989i,cu64=8989323u64,cbooltrue=true,cboolt=t,cboolf=f,cnch_=l\"iuwq\"";
int32_t len = strlen(data);
int32_t ret = smlParseCols(data, len, cols, true, dumplicateKey, &msgBuf);
ASSERT_EQ(ret, TSDB_CODE_SUCCESS);
@ -230,7 +228,7 @@ TEST(testCase, smlParseCols_tag_Test) {
ASSERT_EQ(strncasecmp(kv->key, "cbin", 4), 0);
ASSERT_EQ(kv->keyLen, 4);
ASSERT_EQ(kv->type, TSDB_DATA_TYPE_NCHAR);
ASSERT_EQ(kv->valueLen, 18);
ASSERT_EQ(kv->valueLen, 17);
ASSERT_EQ(strncasecmp(kv->value, "\"passit", 7), 0);
taosMemoryFree(kv);
@ -280,7 +278,7 @@ TEST(testCase, smlParseCols_Test) {
SHashObj *dumplicateKey = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
const char *data = "cbin=\"passit hello,c=2\",cnch=L\"iisdfsf\",cbool=false,cf64=4.31f64,cf32_=8.32,cf32=8.23f32,ci8=-34i8,cu8=89u8,ci16=233i16,cu16=898u16,ci32=98289i32,cu32=12323u32,ci64=-89238i64,ci=989i,cu64=8989323u64,cbooltrue=true,cboolt=t,cboolf=f,cnch_=l\"iuwq\"";
const char *data = "cbin=\"passit hello,c=2\",cnch=L\"iisdfsf\",cbool=false,cf64=4.31f64,cf64_=8.32,cf32=8.23f32,ci8=-34i8,cu8=89u8,ci16=233i16,cu16=898u16,ci32=98289i32,cu32=12323u32,ci64=-89238i64,ci=989i,cu64=8989323u64,cbooltrue=true,cboolt=t,cboolf=f,cnch_=l\"iuwq\"";
int32_t len = strlen(data);
int32_t ret = smlParseCols(data, len, cols, false, dumplicateKey, &msgBuf);
ASSERT_EQ(ret, TSDB_CODE_SUCCESS);
@ -321,17 +319,17 @@ TEST(testCase, smlParseCols_Test) {
ASSERT_EQ(kv->type, TSDB_DATA_TYPE_DOUBLE);
ASSERT_EQ(kv->length, 8);
//ASSERT_EQ(kv->d, 4.31);
printf("4.31 = kv->f:%f\n", kv->d);
printf("4.31 = kv->d:%f\n", kv->d);
taosMemoryFree(kv);
// float
kv = (SSmlKv *)taosArrayGetP(cols, 4);
ASSERT_EQ(strncasecmp(kv->key, "cf32_", 5), 0);
ASSERT_EQ(strncasecmp(kv->key, "cf64_", 5), 0);
ASSERT_EQ(kv->keyLen, 5);
ASSERT_EQ(kv->type, TSDB_DATA_TYPE_FLOAT);
ASSERT_EQ(kv->length, 4);
ASSERT_EQ(kv->type, TSDB_DATA_TYPE_DOUBLE);
ASSERT_EQ(kv->length, 8);
//ASSERT_EQ(kv->f, 8.32);
printf("8.32 = kv->f:%f\n", kv->f);
printf("8.32 = kv->d:%f\n", kv->d);
taosMemoryFree(kv);
// float
@ -467,7 +465,7 @@ TEST(testCase, smlParseCols_Test) {
taosHashCleanup(dumplicateKey);
}
TEST(testCase, smlParseLine_Test) {
TEST(testCase, smlProcess_influx_Test) {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(taos, nullptr);
@ -483,7 +481,7 @@ TEST(testCase, smlParseLine_Test) {
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS, true);
ASSERT_NE(info, nullptr);
const char *sql[9] = {
const char *sql[11] = {
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0 1451606400000000000",
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451607400000000000",
"readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,heading=221,grade=0,fuel_consumption=25 1451608400000000000",
@ -492,14 +490,24 @@ TEST(testCase, smlParseLine_Test) {
"readings,name=truck_1,fleet=South,driver=Albert,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=72.45258,longitude=68.83761,elevation=255,velocity=0,heading=181,grade=0,fuel_consumption=25 1451606400000000000",
"readings,name=truck_2,driver=Derek,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451606400000000000",
"readings,name=truck_2,fleet=North,driver=Derek,model=F-150 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451609400000000000",
"readings,fleet=South,name=truck_0,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451629400000000000"
"readings,fleet=South,name=truck_0,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451629400000000000",
"stable,t1=t1,t2=t2,t3=t3 c1=1,c2=2,c3=3,c4=4 1451629500000000000",
"stable,t2=t2,t1=t1,t3=t3 c1=1,c3=3,c4=4 1451629600000000000"
};
smlInsertLines(info, (char**)sql, 9);
// for (int i = 0; i < 3; i++) {
// smlParseLine(info, sql[i]);
// }
smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
TAOS_RES *res = taos_query(taos, "select * from t_6885c584b98481584ee13dac399e173d");
ASSERT_NE(res, nullptr);
int fieldNum = taos_field_count(res);
ASSERT_EQ(fieldNum, 11);
int rowNum = taos_affected_rows(res);
ASSERT_EQ(rowNum, 2);
for (int i = 0; i < rowNum; ++i) {
TAOS_ROW rows = taos_fetch_row(res);
}
}
// different types
TEST(testCase, smlParseLine_error_Test) {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(taos, nullptr);
@ -520,24 +528,247 @@ TEST(testCase, smlParseLine_error_Test) {
"measure,t1=3 c1=8",
"measure,t2=3 c1=8u8"
};
int ret = smlInsertLines(info, (char **)sql, 2);
int ret = smlProcess(info, (char **)sql, sizeof(sql)/sizeof(sql[0]));
ASSERT_NE(ret, 0);
}
// TEST(testCase, smlParseTS_Test) {
// char msg[256] = {0};
// SSmlMsgBuf msgBuf;
// msgBuf.buf = msg;
// msgBuf.len = 256;
// SSmlLineInfo elements = {0};
//
// SSmlHandle* info = smlBuildSmlInfo(taos, request, protocol, precision, dataFormat);
// if(!info){
// return (TAOS_RES*)request;
// }
// ret = smlParseTS(info, elements.timestamp, elements.timestampLen, cols);
// if(ret != TSDB_CODE_SUCCESS){
// uError("SML:0x%"PRIx64" smlParseTS failed", info->id);
// return ret;
// }
TEST(testCase, smlGetTimestampLen_Test) {
uint8_t len = smlGetTimestampLen(0);
ASSERT_EQ(len, 1);
len = smlGetTimestampLen(1);
ASSERT_EQ(len, 1);
len = smlGetTimestampLen(10);
ASSERT_EQ(len, 2);
len = smlGetTimestampLen(390);
ASSERT_EQ(len, 3);
len = smlGetTimestampLen(-1);
ASSERT_EQ(len, 1);
len = smlGetTimestampLen(-10);
ASSERT_EQ(len, 2);
len = smlGetTimestampLen(-390);
ASSERT_EQ(len, 3);
}
TEST(testCase, smlProcess_telnet_Test) {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(taos, nullptr);
TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db");
taos_free_result(pRes);
pRes = taos_query(taos, "use sml_db");
taos_free_result(pRes);
SRequestObj *request = (SRequestObj *)createRequest((STscObj*)taos, NULL, NULL, TSDB_SQL_INSERT);
ASSERT_NE(request, nullptr);
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS, true);
ASSERT_NE(info, nullptr);
const char *sql[4] = {
"sys.if.bytes.out 1479496100 1.3E0 host=web01 interface=eth0",
"sys.if.bytes.out 1479496101 1.3E1 interface=eth0 host=web01 ",
"sys.if.bytes.out 1479496102 1.3E3 network=tcp",
"sys.procs.running 1479496100 42 host=web01"
};
int ret = smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
ASSERT_EQ(ret, 0);
TAOS_RES *res = taos_query(taos, "select * from t_8c30283b3c4131a071d1e16cf6d7094a");
ASSERT_NE(res, nullptr);
int fieldNum = taos_field_count(res);
ASSERT_EQ(fieldNum, 2);
int rowNum = taos_affected_rows(res);
ASSERT_EQ(rowNum, 1);
for (int i = 0; i < rowNum; ++i) {
TAOS_ROW rows = taos_fetch_row(res);
}
res = taos_query(taos, "select * from t_6931529054e5637ca92c78a1ad441961");
ASSERT_NE(res, nullptr);
fieldNum = taos_field_count(res);
ASSERT_EQ(fieldNum, 2);
rowNum = taos_affected_rows(res);
ASSERT_EQ(rowNum, 2);
for (int i = 0; i < rowNum; ++i) {
TAOS_ROW rows = taos_fetch_row(res);
}
}
TEST(testCase, smlProcess_json_Test) {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(taos, nullptr);
TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db");
taos_free_result(pRes);
pRes = taos_query(taos, "use sml_db");
taos_free_result(pRes);
SRequestObj *request = (SRequestObj *)createRequest((STscObj*)taos, NULL, NULL, TSDB_SQL_INSERT);
ASSERT_NE(request, nullptr);
SSmlHandle *info = smlBuildSmlInfo(taos, request, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS, true);
ASSERT_NE(info, nullptr);
const char *sql = "[\n"
" {\n"
" \"metric\": \"sys.cpu.nice\",\n"
" \"timestamp\": 1346846400,\n"
" \"value\": 18,\n"
" \"tags\": {\n"
" \"host\": \"web01\",\n"
" \"dc\": \"lga\"\n"
" }\n"
" },\n"
" {\n"
" \"metric\": \"sys.cpu.nice\",\n"
" \"timestamp\": 1346846400,\n"
" \"value\": 9,\n"
" \"tags\": {\n"
" \"host\": \"web02\",\n"
" \"dc\": \"lga\"\n"
" }\n"
" }\n"
"]";
int ret = smlProcess(info, (char**)(&sql), -1);
ASSERT_EQ(ret, 0);
TAOS_RES *res = taos_query(taos, "select * from t_cb27a7198d637b4f1c6464bd73f756a7");
ASSERT_NE(res, nullptr);
int fieldNum = taos_field_count(res);
ASSERT_EQ(fieldNum, 2);
// int rowNum = taos_affected_rows(res);
// ASSERT_EQ(rowNum, 1);
// for (int i = 0; i < rowNum; ++i) {
// TAOS_ROW rows = taos_fetch_row(res);
// }
sql = "{\n"
" \"metric\": \"meter_current\",\n"
" \"timestamp\": {\n"
" \"value\" : 1346846400,\n"
" \"type\" : \"s\"\n"
" },\n"
" \"value\": {\n"
" \"value\" : 10.3,\n"
" \"type\" : \"i64\"\n"
" },\n"
" \"tags\": {\n"
" \"groupid\": { \n"
" \"value\" : 2,\n"
" \"type\" : \"bigint\"\n"
" },\n"
" \"location\": { \n"
" \"value\" : \"北京\",\n"
" \"type\" : \"binary\"\n"
" },\n"
" \"id\": \"d1001\"\n"
" }\n"
"}";
ret = smlProcess(info, (char**)(&sql), -1);
ASSERT_EQ(ret, 0);
sql = "{\n"
" \"metric\": \"meter_current\",\n"
" \"timestamp\": {\n"
" \"value\" : 1346846400,\n"
" \"type\" : \"s\"\n"
" },\n"
" \"value\": {\n"
" \"value\" : 10.3,\n"
" \"type\" : \"i64\"\n"
" },\n"
" \"tags\": {\n"
" \"t1\": { \n"
" \"value\" : 2,\n"
" \"type\" : \"bigint\"\n"
" },\n"
" \"t2\": { \n"
" \"value\" : 2,\n"
" \"type\" : \"int\"\n"
" },\n"
" \"t3\": { \n"
" \"value\" : 2,\n"
" \"type\" : \"i16\"\n"
" },\n"
" \"t4\": { \n"
" \"value\" : 2,\n"
" \"type\" : \"i8\"\n"
" },\n"
" \"t5\": { \n"
" \"value\" : 2,\n"
" \"type\" : \"f32\"\n"
" },\n"
" \"t6\": { \n"
" \"value\" : 2,\n"
" \"type\" : \"double\"\n"
" },\n"
" \"t7\": { \n"
" \"value\" : \"8323\",\n"
" \"type\" : \"binary\"\n"
" },\n"
" \"t8\": { \n"
" \"value\" : \"北京\",\n"
" \"type\" : \"binary\"\n"
" },\n"
" \"t9\": { \n"
" \"value\" : true,\n"
" \"type\" : \"bool\"\n"
" },\n"
" \"id\": \"d1001\"\n"
" }\n"
"}";
ret = smlProcess(info, (char**)(&sql), -1);
ASSERT_EQ(ret, 0);
sql = "{\n"
" \"metric\": \"meter_current\",\n"
" \"timestamp\": {\n"
" \"value\" : 1346846400000,\n"
" \"type\" : \"ms\"\n"
" },\n"
" \"value\": \"ni\",\n"
" \"tags\": {\n"
" \"t1\": { \n"
" \"value\" : 20,\n"
" \"type\" : \"i64\"\n"
" },\n"
" \"t2\": { \n"
" \"value\" : 25,\n"
" \"type\" : \"i32\"\n"
" },\n"
" \"t3\": { \n"
" \"value\" : 2,\n"
" \"type\" : \"smallint\"\n"
" },\n"
" \"t4\": { \n"
" \"value\" : 2,\n"
" \"type\" : \"tinyint\"\n"
" },\n"
" \"t5\": { \n"
" \"value\" : 2,\n"
" \"type\" : \"float\"\n"
" },\n"
" \"t6\": { \n"
" \"value\" : 0.2,\n"
" \"type\" : \"f64\"\n"
" },\n"
" \"t7\": \"nsj\",\n"
" \"t8\": { \n"
" \"value\" : \"北京\",\n"
" \"type\" : \"binary\"\n"
" },\n"
" \"t9\": false,\n"
" \"id\": \"d1001\"\n"
" }\n"
"}";
ret = smlProcess(info, (char**)(&sql), -1);
ASSERT_EQ(ret, 0);
}

View File

@ -1311,6 +1311,7 @@ int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) {
tlen += taosEncodeFixedI16(buf, pColData->info.colId);
tlen += taosEncodeFixedI16(buf, pColData->info.type);
tlen += taosEncodeFixedI32(buf, pColData->info.bytes);
tlen += taosEncodeFixedBool(buf, pColData->hasNull);
if (IS_VAR_DATA_TYPE(pColData->info.type)) {
tlen += taosEncodeBinary(buf, pColData->varmeta.offset, sizeof(int32_t) * rows);
@ -1340,6 +1341,7 @@ void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock) {
buf = taosDecodeFixedI16(buf, &data.info.colId);
buf = taosDecodeFixedI16(buf, &data.info.type);
buf = taosDecodeFixedI32(buf, &data.info.bytes);
buf = taosDecodeFixedBool(buf, &data.hasNull);
if (IS_VAR_DATA_TYPE(data.info.type)) {
buf = taosDecodeBinary(buf, (void**)&data.varmeta.offset, pBlock->info.rows * sizeof(int32_t));
@ -1445,6 +1447,10 @@ void blockDebugShowData(const SArray* dataBlocks) {
for (int32_t k = 0; k < colNum; k++) {
SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k);
void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
if (pColInfoData->hasNull) {
printf(" %15s |", "NULL");
continue;
}
switch (pColInfoData->info.type) {
case TSDB_DATA_TYPE_TIMESTAMP:
formatTimestamp(pBuf, *(uint64_t*)var, TSDB_TIME_PRECISION_MILLI);
@ -1462,6 +1468,9 @@ void blockDebugShowData(const SArray* dataBlocks) {
case TSDB_DATA_TYPE_UBIGINT:
printf(" %15lu |", *(uint64_t*)var);
break;
case TSDB_DATA_TYPE_DOUBLE:
printf(" %15f |", *(double*)var);
break;
}
}
printf("\n");

View File

@ -153,11 +153,11 @@ bool tsStreamSchedV = true;
/*
* minimum scale for whole system, millisecond by default
* for TSDB_TIME_PRECISION_MILLI: 86400000L
* TSDB_TIME_PRECISION_MICRO: 86400000000L
* TSDB_TIME_PRECISION_NANO: 86400000000000L
* for TSDB_TIME_PRECISION_MILLI: 60000L
* TSDB_TIME_PRECISION_MICRO: 60000000L
* TSDB_TIME_PRECISION_NANO: 60000000000L
*/
int64_t tsTickPerDay[] = {86400000L, 86400000000L, 86400000000000L};
int64_t tsTickPerMin[] = {60000L, 60000000L, 60000000000L};
// lossy compress 6
char tsLossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty
@ -444,7 +444,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddBool(pCfg, "startUdfd", tsStartUdfd, 0) != 0) return -1;
if (cfgAddBool(pCfg, "udf", tsStartUdfd, 0) != 0) return -1;
return 0;
}
@ -585,7 +585,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32;
tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32;
tsStartUdfd = cfgGetItem(pCfg, "startUdfd")->bval;
tsStartUdfd = cfgGetItem(pCfg, "udf")->bval;
if (tsQueryBufferSize >= 0) {
tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL;

View File

@ -4032,6 +4032,7 @@ int32_t tDecodeSVSubmitReq(SDecoder *pCoder, SVSubmitReq *pReq) {
static int32_t tEncodeSSubmitBlkRsp(SEncoder *pEncoder, const SSubmitBlkRsp *pBlock) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI32(pEncoder, pBlock->code) < 0) return -1;
if (tEncodeI8(pEncoder, pBlock->hashMeta) < 0) return -1;
if (pBlock->hashMeta) {
if (tEncodeI64(pEncoder, pBlock->uid) < 0) return -1;
@ -4047,6 +4048,7 @@ static int32_t tEncodeSSubmitBlkRsp(SEncoder *pEncoder, const SSubmitBlkRsp *pBl
static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeI32(pDecoder, &pBlock->code) < 0) return -1;
if (tDecodeI8(pDecoder, &pBlock->hashMeta) < 0) return -1;
if (pBlock->hashMeta) {
if (tDecodeI64(pDecoder, &pBlock->uid) < 0) return -1;
@ -4108,4 +4110,3 @@ void tFreeSSubmitRsp(SSubmitRsp *pRsp) {
taosMemoryFree(pRsp);
}

View File

@ -144,11 +144,12 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
pCfg->szCache = pCreate->pages;
pCfg->szBuf = pCreate->buffer * 1024 * 1024;
pCfg->isWeak = true;
pCfg->tsdbCfg.compression = pCreate->compression;
pCfg->tsdbCfg.precision = pCreate->precision;
pCfg->tsdbCfg.days = 10;
pCfg->tsdbCfg.keep0 = 3650;
pCfg->tsdbCfg.keep1 = 3650;
pCfg->tsdbCfg.keep2 = 3650;
pCfg->tsdbCfg.days = pCreate->daysPerFile;
pCfg->tsdbCfg.keep0 = pCreate->daysToKeep0;
pCfg->tsdbCfg.keep1 = pCreate->daysToKeep1;
pCfg->tsdbCfg.keep2 = pCreate->daysToKeep2;
pCfg->tsdbCfg.minRows = pCreate->minRows;
pCfg->tsdbCfg.maxRows = pCreate->maxRows;
for (size_t i = 0; i < taosArrayGetSize(pCreate->pRetensions); ++i) {

View File

@ -318,6 +318,7 @@ static int32_t mndStbActionUpdate(SSdb *pSdb, SStbObj *pOld, SStbObj *pNew) {
pOld->updateTime = pNew->updateTime;
pOld->version = pNew->version;
pOld->nextColId = pNew->nextColId;
pOld->ttl = pNew->ttl;
pOld->numOfColumns = pNew->numOfColumns;
pOld->numOfTags = pNew->numOfTags;
memcpy(pOld->pColumns, pNew->pColumns, pOld->numOfColumns * sizeof(SSchema));
@ -832,7 +833,7 @@ static int32_t mndProcessVCreateStbRsp(SNodeMsg *pRsp) {
}
static int32_t mndCheckAlterStbReq(SMAlterStbReq *pAlter) {
if (pAlter->commentLen != 0) return 0;
if (pAlter->commentLen != 0 || pAlter->ttl != 0) return 0;
if (pAlter->numOfFields < 1 || pAlter->numOfFields != (int32_t)taosArrayGetSize(pAlter->pFields)) {
terrno = TSDB_CODE_MND_INVALID_STB_OPTION;
@ -883,7 +884,8 @@ static int32_t mndAllocStbSchemas(const SStbObj *pOld, SStbObj *pNew) {
return 0;
}
static int32_t mndUpdateStbComment(const SStbObj *pOld, SStbObj *pNew, char *pComment, int32_t commentLen) {
static int32_t mndUpdateStbCommentAndTTL(const SStbObj *pOld, SStbObj *pNew, char *pComment, int32_t commentLen,
int32_t ttl) {
if (commentLen > 0) {
pNew->commentLen = commentLen;
pNew->comment = taosMemoryCalloc(1, commentLen);
@ -893,6 +895,9 @@ static int32_t mndUpdateStbComment(const SStbObj *pOld, SStbObj *pNew, char *pCo
}
memcpy(pNew->comment, pComment, commentLen);
}
if (ttl >= 0) {
pNew->ttl = ttl;
}
if (mndAllocStbSchemas(pOld, pNew) != 0) {
return -1;
@ -1232,7 +1237,7 @@ static int32_t mndAlterStb(SMnode *pMnode, SNodeMsg *pReq, const SMAlterStbReq *
code = mndAlterStbColumnBytes(pOld, &stbObj, pField0);
break;
case TSDB_ALTER_TABLE_UPDATE_OPTIONS:
code = mndUpdateStbComment(pOld, &stbObj, pAlter->comment, pAlter->commentLen);
code = mndUpdateStbCommentAndTTL(pOld, &stbObj, pAlter->comment, pAlter->commentLen, pAlter->ttl);
break;
default:
terrno = TSDB_CODE_OPS_NOT_SUPPORT;
@ -1723,7 +1728,7 @@ static int32_t mndRetrieveStb(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *pBlo
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pStb->updateTime, false); // number of tables
char *p = taosMemoryMalloc(pStb->commentLen + VARSTR_HEADER_SIZE); // check malloc failures
char *p = taosMemoryCalloc(1, pStb->commentLen + 1 + VARSTR_HEADER_SIZE); // check malloc failures
if (p != NULL) {
if (pStb->commentLen != 0) {
STR_TO_VARSTR(p, pStb->comment);

View File

@ -485,8 +485,10 @@ static int32_t mndProcessDropTopicReq(SNodeMsg *pReq) {
return -1;
}
}
// TODO: check ref
int32_t code = mndDropTopic(pMnode, pReq, pTopic);
// TODO: iterate and drop related subscriptions and offsets
mndReleaseTopic(pMnode, pTopic);
if (code != 0) {

View File

@ -76,6 +76,7 @@ void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags);
void metaReaderClear(SMetaReader *pReader);
int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
int metaReadNext(SMetaReader *pReader);
const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t cid);
#if 1 // refact APIs below (TODO)
typedef SVCreateTbReq STbCfg;
@ -114,6 +115,7 @@ void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond)
void tsdbDestroyTableGroup(STableGroupInfo *pGroupList);
int32_t tsdbGetOneTableGroup(void *pMeta, uint64_t uid, TSKEY startKey, STableGroupInfo *pGroupInfo);
int32_t tsdbGetTableGroupFromIdList(SVnode *pVnode, SArray *pTableIdList, STableGroupInfo *pGroupInfo);
void tsdbCleanupReadHandle(tsdbReaderT queryHandle);
// tq

View File

@ -99,7 +99,6 @@ int32_t tsdbInitSma(STsdb *pTsdb);
int32_t tsdbDropTSma(STsdb *pTsdb, char *pMsg);
int32_t tsdbDropTSmaData(STsdb *pTsdb, int64_t indexUid);
int32_t tsdbInsertRSmaData(STsdb *pTsdb, char *msg);
void tsdbCleanupReadHandle(tsdbReaderT queryHandle);
typedef enum {
TSDB_FILE_HEAD = 0, // .head
TSDB_FILE_DATA, // .data
@ -518,9 +517,9 @@ void tsdbGetRtnSnap(STsdb *pRepo, SRtn *pRtn);
static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) {
if (key < 0) {
return (int)((key + 1) / tsTickPerDay[precision] / days - 1);
return (int)((key + 1) / tsTickPerMin[precision] / days - 1);
} else {
return (int)((key / tsTickPerDay[precision] / days));
return (int)((key / tsTickPerMin[precision] / days));
}
}
@ -770,8 +769,8 @@ static FORCE_INLINE int tsdbCopyDFileSet(SDFileSet *pSrc, SDFileSet *pDest) {
}
static FORCE_INLINE void tsdbGetFidKeyRange(int days, int8_t precision, int fid, TSKEY *minKey, TSKEY *maxKey) {
*minKey = fid * days * tsTickPerDay[precision];
*maxKey = *minKey + days * tsTickPerDay[precision] - 1;
*minKey = fid * days * tsTickPerMin[precision];
*maxKey = *minKey + days * tsTickPerMin[precision] - 1;
}
static FORCE_INLINE bool tsdbFSetIsOk(SDFileSet *pSet) {

View File

@ -158,7 +158,9 @@ SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, boo
skmDbKey.sver = sver;
pKey = &skmDbKey;
kLen = sizeof(skmDbKey);
metaRLock(pMeta);
ret = tdbDbGet(pMeta->pSkmDb, pKey, kLen, &pVal, &vLen);
metaULock(pMeta);
if (ret < 0) {
return NULL;
}
@ -181,6 +183,7 @@ SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, boo
}
struct SMCtbCursor {
SMeta *pMeta;
TDBC *pCur;
tb_uid_t suid;
void *pKey;
@ -200,9 +203,13 @@ SMCtbCursor *metaOpenCtbCursor(SMeta *pMeta, tb_uid_t uid) {
return NULL;
}
pCtbCur->pMeta = pMeta;
pCtbCur->suid = uid;
metaRLock(pMeta);
ret = tdbDbcOpen(pMeta->pCtbIdx, &pCtbCur->pCur, NULL);
if (ret < 0) {
metaULock(pMeta);
taosMemoryFree(pCtbCur);
return NULL;
}
@ -220,6 +227,7 @@ SMCtbCursor *metaOpenCtbCursor(SMeta *pMeta, tb_uid_t uid) {
void metaCloseCtbCurosr(SMCtbCursor *pCtbCur) {
if (pCtbCur) {
if (pCtbCur->pMeta) metaULock(pCtbCur->pMeta);
if (pCtbCur->pCur) {
tdbDbcClose(pCtbCur->pCur);
@ -458,3 +466,8 @@ void *metaGetSmaInfoByIndex(SMeta *pMeta, int64_t indexUid, bool isDecode) {
}
#endif
const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t cid) {
ASSERT(pEntry->type == TSDB_CHILD_TABLE);
return tdGetKVRowValOfCol((const SKVRow)pEntry->ctbEntry.pTags, cid);
}

View File

@ -427,13 +427,18 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
SMqDataBlkRsp rsp = {0};
rsp.reqOffset = pReq->currentOffset;
rsp.withSchema = pExec->withSchema;
rsp.withTbName = pExec->withTbName;
rsp.blockData = taosArrayInit(0, sizeof(void*));
rsp.blockDataLen = taosArrayInit(0, sizeof(int32_t));
rsp.blockSchema = taosArrayInit(0, sizeof(void*));
rsp.blockTbName = taosArrayInit(0, sizeof(void*));
int8_t withTbName = pExec->withTbName;
if (pReq->withTbName != -1) {
withTbName = pReq->withTbName;
}
rsp.withTbName = withTbName;
while (1) {
consumerEpoch = atomic_load_32(&pExec->epoch);
if (consumerEpoch > reqEpoch) {
@ -452,9 +457,9 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
}
if (pHeadWithCkSum->head.msgType != TDMT_VND_SUBMIT) {
walSkipFetchBody(pExec->pWalReader, pHeadWithCkSum);
ASSERT(walSkipFetchBody(pExec->pWalReader, pHeadWithCkSum) == 0);
} else {
walFetchBody(pExec->pWalReader, &pHeadWithCkSum);
ASSERT(walFetchBody(pExec->pWalReader, &pHeadWithCkSum) == 0);
}
SWalReadHead* pHead = &pHeadWithCkSum->head;
@ -538,7 +543,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
taosArrayPush(rsp.blockSchema, &pSW);
}
if (pExec->withTbName) {
if (withTbName) {
SMetaReader mr = {0};
metaReaderInit(&mr, pTq->pVnode->pMeta, 0);
int64_t uid = pExec->pExecReader[workerId]->msgIter.uid;
@ -578,7 +583,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
ASSERT(actualLen <= dataStrLen);
taosArrayPush(rsp.blockDataLen, &actualLen);
taosArrayPush(rsp.blockData, &buf);
if (pExec->withTbName) {
if (withTbName) {
SMetaReader mr = {0};
metaReaderInit(&mr, pTq->pVnode->pMeta, 0);
if (metaGetTableEntryByUid(&mr, block.info.uid) < 0) {
@ -945,6 +950,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int32_t parallel) {
.reader = pStreamReader,
.meta = pTq->pVnode->pMeta,
.pMsgCb = &pTq->pVnode->msgCb,
.vnode = pTq->pVnode,
};
pTask->exec.runners[i].inputHandle = pStreamReader;
pTask->exec.runners[i].executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle);

View File

@ -70,6 +70,7 @@ static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid);
static void tsdbResetCommitFile(SCommitH *pCommith);
static int tsdbSetAndOpenCommitFile(SCommitH *pCommith, SDFileSet *pSet, int fid);
static int tsdbCommitToTable(SCommitH *pCommith, int tid);
static bool tsdbCommitIsSameFile(SCommitH *pCommith, int bidx);
static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx);
static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable);
static int tsdbComparKeyBlock(const void *arg1, const void *arg2);
@ -215,9 +216,9 @@ void tsdbGetRtnSnap(STsdb *pRepo, SRtn *pRtn) {
TSKEY minKey, midKey, maxKey, now;
now = taosGetTimestamp(pCfg->precision);
minKey = now - pCfg->keep2 * tsTickPerDay[pCfg->precision];
midKey = now - pCfg->keep1 * tsTickPerDay[pCfg->precision];
maxKey = now - pCfg->keep0 * tsTickPerDay[pCfg->precision];
minKey = now - pCfg->keep2 * tsTickPerMin[pCfg->precision];
midKey = now - pCfg->keep1 * tsTickPerMin[pCfg->precision];
maxKey = now - pCfg->keep0 * tsTickPerMin[pCfg->precision];
pRtn->minKey = minKey;
pRtn->minFid = (int)(TSDB_KEY_FID(minKey, pCfg->days, pCfg->precision));
@ -892,6 +893,8 @@ static int tsdbCommitToTable(SCommitH *pCommith, int tid) {
static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) {
SReadH *pReadh = &pCommith->readh;
STsdb *pTsdb = TSDB_READ_REPO(pReadh);
STSchema *pTSchema = NULL;
int nBlocks = pIdx->numOfBlocks;
int bidx = 0;
@ -903,24 +906,42 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) {
return -1;
}
STable table = {.tid = pIdx->uid, .uid = pIdx->uid, .pSchema = NULL};
pCommith->pTable = &table;
while (bidx < nBlocks) {
if (!pTSchema && !tsdbCommitIsSameFile(pCommith, bidx)) {
// Set commit table
pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, 0); // TODO: schema version
if (!pTSchema) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
table.pSchema = pTSchema;
if (tsdbSetCommitTable(pCommith, &table) < 0) {
taosMemoryFreeClear(pTSchema);
return -1;
}
}
if (tsdbMoveBlock(pCommith, bidx) < 0) {
tsdbError("vgId:%d failed to move block into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith),
TSDB_FILE_FULL_NAME(TSDB_COMMIT_HEAD_FILE(pCommith)), tstrerror(terrno));
taosMemoryFreeClear(pTSchema);
return -1;
}
++bidx;
}
STable table = {.tid = pIdx->uid, .uid = pIdx->uid, .pSchema = NULL};
TSDB_COMMIT_TABLE(pCommith) = &table;
if (tsdbWriteBlockInfo(pCommith) < 0) {
tsdbError("vgId:%d failed to write SBlockInfo part into file %s since %s", TSDB_COMMIT_REPO_ID(pCommith),
TSDB_FILE_FULL_NAME(TSDB_COMMIT_HEAD_FILE(pCommith)), tstrerror(terrno));
taosMemoryFreeClear(pTSchema);
return -1;
}
taosMemoryFreeClear(pTSchema);
return 0;
}
@ -1323,6 +1344,14 @@ static int tsdbMergeMemData(SCommitH *pCommith, SCommitIter *pIter, int bidx) {
return 0;
}
static bool tsdbCommitIsSameFile(SCommitH *pCommith, int bidx) {
SBlock *pBlock = pCommith->readh.pBlkInfo->blocks + bidx;
if (pBlock->last) {
return pCommith->isLFileSame;
}
return pCommith->isDFileSame;
}
static int tsdbMoveBlock(SCommitH *pCommith, int bidx) {
SBlock *pBlock = pCommith->readh.pBlkInfo->blocks + bidx;
SDFile *pDFile;

View File

@ -62,6 +62,16 @@ int tsdbMemTableCreate(STsdb *pTsdb, STsdbMemTable **ppMemTable) {
void tsdbMemTableDestroy(STsdb *pTsdb, STsdbMemTable *pMemTable) {
if (pMemTable) {
taosHashCleanup(pMemTable->pHashIdx);
SSkipListIterator *pIter = tSkipListCreateIter(pMemTable->pSlIdx);
SSkipListNode *pNode = NULL;
STbData *pTbData = NULL;
for (;;) {
if (!tSkipListIterNext(pIter)) break;
pNode = tSkipListIterGet(pIter);
pTbData = (STbData *)pNode->pData;
tsdbFreeTbData(pTbData);
}
tSkipListDestroyIter(pIter);
tSkipListDestroy(pMemTable->pSlIdx);
taosMemoryFree(pMemTable);
}
@ -300,6 +310,17 @@ int tsdbInsertTableData(STsdb *pTsdb, SSubmitMsgIter *pMsgIter, SSubmitBlk *pBlo
TSKEY keyMax;
SSubmitBlk *pBlkCopy;
// check if table exists
SMetaReader mr = {0};
SMetaEntry me = {0};
metaReaderInit(&mr, pTsdb->pVnode->pMeta, 0);
if (metaGetTableEntryByUid(&mr, pMsgIter->uid) < 0) {
metaReaderClear(&mr);
terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST;
return -1;
}
metaReaderClear(&mr);
// create container is nedd
tptr = taosHashGet(pMemTable->pHashIdx, &(pMsgIter->uid), sizeof(pMsgIter->uid));
if (tptr == NULL) {

View File

@ -323,7 +323,7 @@ static int64_t getEarliestValidTimestamp(STsdb* pTsdb) {
STsdbKeepCfg* pCfg = REPO_KEEP_CFG(pTsdb);
int64_t now = taosGetTimestamp(pCfg->precision);
return now - (tsTickPerDay[pCfg->precision] * pCfg->keep2) + 1; // needs to add one tick
return now - (tsTickPerMin[pCfg->precision] * pCfg->keep2) + 1; // needs to add one tick
}
static void setQueryTimewindow(STsdbReadHandle* pTsdbReadHandle, SQueryTableDataCond* pCond) {
@ -1047,10 +1047,10 @@ static int32_t getFileIdFromKey(TSKEY key, int32_t daysPerFile, int32_t precisio
}
if (key < 0) {
key -= (daysPerFile * tsTickPerDay[precision]);
key -= (daysPerFile * tsTickPerMin[precision]);
}
int64_t fid = (int64_t)(key / (daysPerFile * tsTickPerDay[precision])); // set the starting fileId
int64_t fid = (int64_t)(key / (daysPerFile * tsTickPerMin[precision])); // set the starting fileId
if (fid < 0L && llabs(fid) > INT32_MAX) { // data value overflow for INT32
fid = INT32_MIN;
}

View File

@ -1017,7 +1017,7 @@ static int32_t tsdbGetTSmaDays(STsdb *pTsdb, int64_t interval, int32_t storageLe
int32_t daysPerFile = pCfg->days;
if (storageLevel == SMA_STORAGE_LEVEL_TSDB) {
int32_t days = SMA_STORAGE_TSDB_TIMES * (interval / tsTickPerDay[pCfg->precision]);
int32_t days = SMA_STORAGE_TSDB_TIMES * (interval / tsTickPerMin[pCfg->precision]);
daysPerFile = days > SMA_STORAGE_TSDB_DAYS ? days : SMA_STORAGE_TSDB_DAYS;
}

View File

@ -63,8 +63,8 @@ static int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq *pMsg) {
STSRow *row = NULL;
STsdbKeepCfg *pCfg = REPO_KEEP_CFG(pTsdb);
TSKEY now = taosGetTimestamp(pCfg->precision);
TSKEY minKey = now - tsTickPerDay[pCfg->precision] * pCfg->keep2;
TSKEY maxKey = now + tsTickPerDay[pCfg->precision] * pCfg->days;
TSKEY minKey = now - tsTickPerMin[pCfg->precision] * pCfg->keep2;
TSKEY maxKey = now + tsTickPerMin[pCfg->precision] * pCfg->days;
terrno = TSDB_CODE_SUCCESS;
// pMsg->length = htonl(pMsg->length);

View File

@ -562,6 +562,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
int32_t nRows;
int32_t tsize, ret;
SEncoder encoder = {0};
terrno = TSDB_CODE_SUCCESS;
pRsp->code = 0;
@ -576,6 +577,11 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
}
submitRsp.pArray = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(SSubmitBlkRsp));
if (!submitRsp.pArray) {
pRsp->code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
for (int i = 0;;) {
tGetSubmitMsgNext(&msgIter, &pBlock);
if (pBlock == NULL) break;
@ -595,7 +601,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
if (metaCreateTable(pVnode->pMeta, version, &createTbReq) < 0) {
if (terrno != TSDB_CODE_TDB_TABLE_ALREADY_EXIST) {
pRsp->code = terrno;
submitBlkRsp.code = terrno;
tDecoderClear(&decoder);
goto _exit;
}
@ -617,8 +623,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
}
if (tsdbInsertTableData(pVnode->pTsdb, &msgIter, pBlock, &submitBlkRsp) < 0) {
pRsp->code = terrno;
goto _exit;
submitBlkRsp.code = terrno;
}
submitRsp.numOfRows += submitBlkRsp.numOfRows;
@ -640,7 +645,12 @@ _exit:
taosArrayDestroy(submitRsp.pArray);
// TODO: the partial success scenario and the error case
// TODO: refactor
if ((terrno == TSDB_CODE_SUCCESS || terrno == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) &&
(pRsp->code == TSDB_CODE_SUCCESS)) {
tsdbTriggerRSma(pVnode->pTsdb, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK);
}
return 0;
}

View File

@ -72,6 +72,7 @@ int32_t vnodeSendMsg(void *rpcHandle, const SEpSet *pEpSet, SRpcMsg *pMsg) {
int32_t ret = 0;
SMsgCb *pMsgCb = rpcHandle;
if (pMsgCb->queueFps[SYNC_QUEUE] != NULL) {
pMsg->noResp = 1;
tmsgSendReq(rpcHandle, pEpSet, pMsg);
} else {
vError("vnodeSendMsg queue is NULL, SYNC_QUEUE:%d", SYNC_QUEUE);

View File

@ -333,6 +333,8 @@ typedef struct SScanInfo {
typedef struct STableScanInfo {
void* dataReader;
SReadHandle readHandle;
SFileBlockLoadRecorder readRecorder;
int64_t numOfRows;
int64_t elapsedTime;
@ -348,6 +350,11 @@ typedef struct STableScanInfo {
SArray* pColMatchInfo;
int32_t numOfOutput;
SExprInfo* pPseudoExpr;
int32_t numOfPseudoExpr;
SqlFunctionCtx* pPseudoCtx;
// int32_t* rowCellInfoOffset;
SQueryTableDataCond cond;
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
int32_t dataBlockLoadFlag;
@ -364,9 +371,18 @@ typedef struct STagScanInfo {
STableGroupInfo *pTableGroups;
} STagScanInfo;
typedef enum EStreamScanMode {
STREAM_SCAN_FROM_READERHANDLE = 1,
STREAM_SCAN_FROM_RES,
STREAM_SCAN_FROM_UPDATERES,
STREAM_SCAN_FROM_DATAREADER,
} EStreamScanMode;
typedef struct SStreamBlockScanInfo {
SArray* pBlockLists; // multiple SSDatablock.
SSDataBlock* pRes; // result SSDataBlock
SSDataBlock* pUpdateRes; // update SSDataBlock
int32_t updateResIndex;
int32_t blockType; // current block type
int32_t validBlockIndex; // Is current data has returned?
SColumnInfo* pCols; // the output column info
@ -378,6 +394,10 @@ typedef struct SStreamBlockScanInfo {
SArray* tsArray;
SUpdateInfo* pUpdateInfo;
int32_t primaryTsIndex; // primary time stamp slot id
void* pDataReader;
EStreamScanMode scanMode;
SOperatorInfo* pOperatorDumy;
SInterval interval; // if the upstream is an interval operator, the interval info is also kept here.
} SStreamBlockScanInfo;
typedef struct SSysTableScanInfo {
@ -616,10 +636,10 @@ int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t
int32_t initAggInfo(SOptrBasicInfo* pBasicInfo, SAggSupporter* pAggSup, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResultBlock, size_t keyBufSize, const char* pkey);
void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows);
void doBuildResultDatablock(SOptrBasicInfo *pbInfo, SGroupResInfo* pGroupResInfo, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf);
void doBuildResultDatablock(SExecTaskInfo *taskInfo, SOptrBasicInfo *pbInfo, SGroupResInfo* pGroupResInfo, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf);
void finalizeMultiTupleQueryResult(int32_t numOfOutput, SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int32_t* rowCellInfoOffset);
void doApplyFunctions(SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset,
void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset,
int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput, int32_t order);
int32_t setGroupResultOutputBuf(SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t type, int16_t bytes,
int32_t groupId, SDiskbasedBuf* pBuf, SExecTaskInfo* pTaskInfo, SAggSupporter* pAggSup);
@ -628,7 +648,7 @@ int32_t setSDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadI
int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total,
SArray* pColList);
void getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key, STimeWindow* win);
int32_t getTableScanOrder(SOperatorInfo* pOperator);
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag);
void doSetOperatorCompleted(SOperatorInfo* pOperator);
void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock);
@ -638,18 +658,24 @@ void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWin
void cleanupAggSup(SAggSupporter* pAggSup);
void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode);
SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity, SArray* pColMatchInfo);
SSDataBlock* loadNextDataBlock(void* param);
void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset);
SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols,
int32_t type);
SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs);
SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode);
int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode);
SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo,
char* pData, int16_t bytes, bool masterscan, uint64_t groupId,
SExecTaskInfo* pTaskInfo, bool isIntervalQuery, SAggSupporter* pSup);
SOperatorInfo* createTableScanOperatorInfo(void* pDataReader, SQueryTableDataCond* pCond, int32_t numOfOutput, int32_t dataLoadFlag, const uint8_t* scanInfo,
SArray* pColMatchInfo, SSDataBlock* pResBlock, SNode* pCondition, SInterval* pInterval, double sampleRatio, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, tsdbReaderT pDataReader, SReadHandle* pHandle, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SExprInfo* pScalarExprInfo,
int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo, const STableGroupInfo* pTableGroupInfo);
@ -678,8 +704,9 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo,
const STableGroupInfo* pTableGroupInfo);
SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, SSDataBlock* pResBlock, SArray* pColList,
SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pConditions);
SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SSDataBlock* pResBlock,
SArray* pColList, SArray* pTableIdList, SExecTaskInfo* pTaskInfo,
SNode* pConditions, SOperatorInfo* pOperatorDumy, SInterval* pInterval);
SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols,
SInterval* pInterval, STimeWindow* pWindow, SSDataBlock* pResBlock, int32_t fillType, SNodeListNode* fillVal,
@ -704,7 +731,7 @@ SOperatorInfo* createTableSeqScanOperatorInfo(void* pTsdbReadHandle, STaskRuntim
int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
int32_t numOfOutput, SArray* pPseudoList);
void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order, bool createDummyCol);
void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order, int32_t scanFlag, bool createDummyCol);
void copyTsColoum(SSDataBlock* pRes, SqlFunctionCtx* pCtx, int32_t numOfOutput);
@ -733,6 +760,15 @@ bool aggDecodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasi
int32_t length);
void aggEncodeResultRow(SOperatorInfo* pOperator, SAggSupporter* pSup, SOptrBasicInfo* pInfo, char** result,
int32_t* length);
STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts,
SInterval* pInterval, int32_t precision, STimeWindow* win);
int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos,
TSKEY ekey, __block_search_fn_t searchFn, STableQueryInfo* item,
int32_t order);
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
void doClearWindow(SIntervalAggOperatorInfo* pInfo, char* pData, int16_t bytes,
uint64_t groupId, int32_t numOfOutput);
#ifdef __cplusplus
}

View File

@ -155,7 +155,7 @@ SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn,
void operatorDummyCloseFn(void* param, int32_t numOfCols) {}
static int32_t doCopyToSDataBlock(SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf,
static int32_t doCopyToSDataBlock(SExecTaskInfo *taskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf,
SGroupResInfo* pGroupResInfo, int32_t orderType, int32_t* rowCellOffset,
SqlFunctionCtx* pCtx);
@ -344,6 +344,28 @@ SResultRow* getNewResultRow_rv(SDiskbasedBuf* pResultBuf, int64_t tableGroupId,
return pResultRow;
}
void doClearWindow(SIntervalAggOperatorInfo* pInfo, char* pData, int16_t bytes,
uint64_t groupId, int32_t numOfOutput) {
SAggSupporter* pSup = &pInfo->aggSup;
SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId);
SResultRowPosition* p1 =
(SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf,
GET_RES_WINDOW_KEY_LEN(bytes));
SResultRow* pResult = getResultRowByPos(pSup->pResultBuf, p1);
SqlFunctionCtx* pCtx = pInfo->binfo.pCtx;
for (int32_t i = 0; i < numOfOutput; ++i) {
pCtx[i].resultInfo = getResultCell(pResult, i, pInfo->binfo.rowCellInfoOffset);
struct SResultRowEntryInfo* pResInfo = pCtx[i].resultInfo;
if (fmIsWindowPseudoColumnFunc(pCtx[i].functionId)) {
continue;
}
pResInfo->initialized = false;
if (pCtx[i].functionId != -1) {
pCtx[i].fpSet.init(&pCtx[i], pResInfo);
}
}
}
/**
* the struct of key in hash table
* +----------+---------------+
@ -579,7 +601,7 @@ void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWindow
colDataAppendInt64(pColData, 4, &pQueryWindow->ekey);
}
void doApplyFunctions(SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset,
void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset,
int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput, int32_t order) {
for (int32_t k = 0; k < numOfOutput; ++k) {
pCtx[k].startTs = pWin->skey;
@ -618,9 +640,14 @@ void doApplyFunctions(SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData*
pEntryInfo->numOfRes = 1;
continue;
}
int32_t code = TSDB_CODE_SUCCESS;
if (functionNeedToExecute(&pCtx[k]) && pCtx[k].fpSet.process != NULL) {
pCtx[k].fpSet.process(&pCtx[k]);
code = pCtx[k].fpSet.process(&pCtx[k]);
if (code != TSDB_CODE_SUCCESS) {
qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code));
taskInfo->code = code;
longjmp(taskInfo->env, code);
}
}
// restore it
@ -649,7 +676,7 @@ static FORCE_INLINE TSKEY reviseWindowEkey(STaskAttr* pQueryAttr, STimeWindow* p
}
static int32_t doSetInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order,
bool createDummyCol);
int32_t scanFlag, bool createDummyCol);
static void doSetInputDataBlockInfo(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock,
int32_t order) {
@ -660,12 +687,12 @@ static void doSetInputDataBlockInfo(SOperatorInfo* pOperator, SqlFunctionCtx* pC
}
}
void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order,
void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order, int32_t scanFlag,
bool createDummyCol) {
if (pBlock->pBlockAgg != NULL) {
doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order);
} else {
doSetInputDataBlock(pOperator, pCtx, pBlock, order, createDummyCol);
doSetInputDataBlock(pOperator, pCtx, pBlock, order, scanFlag, createDummyCol);
}
}
@ -712,14 +739,14 @@ static int32_t doCreateConstantValColumnInfo(SInputColumnInfoData* pInput, SFunc
}
static int32_t doSetInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order,
bool createDummyCol) {
int32_t scanFlag, bool createDummyCol) {
int32_t code = TSDB_CODE_SUCCESS;
for (int32_t i = 0; i < pOperator->numOfExprs; ++i) {
pCtx[i].order = order;
pCtx[i].size = pBlock->info.rows;
pCtx[i].pSrcBlock = pBlock;
pCtx[i].currentStage = MAIN_SCAN;
pCtx[i].currentStage = scanFlag;
SInputColumnInfoData* pInput = &pCtx[i].input;
pInput->uid = pBlock->info.uid;
@ -735,7 +762,7 @@ static int32_t doSetInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCt
pInput->numOfRows = pBlock->info.rows;
pInput->startRowIndex = 0;
// the last parameter is the timestamp column
// NOTE: the last parameter is the primary timestamp column
if (fmIsTimelineFunc(pCtx[i].functionId) && (j == pOneExpr->base.numOfParams - 1)) {
pInput->pPTS = pInput->pData[j];
}
@ -806,7 +833,13 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SqlFunction
// this can be set during create the struct
// todo add a dummy funtion to avoid process check
if (pCtx[k].fpSet.process != NULL) {
pCtx[k].fpSet.process(&pCtx[k]);
int32_t code = pCtx[k].fpSet.process(&pCtx[k]);
if (code != TSDB_CODE_SUCCESS) {
qError("%s call aggregate function error happens, code : %s",
GET_TASKID(pOperator->pTaskInfo), tstrerror(code));
pOperator->pTaskInfo->code = code;
longjmp(pOperator->pTaskInfo->env, code);
}
}
}
}
@ -873,7 +906,8 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
} else if (pExpr[k].pExpr->nodeType == QUERY_NODE_FUNCTION) {
ASSERT(!fmIsAggFunc(pfCtx->functionId));
if (fmIsPseudoColumnFunc(pfCtx->functionId)) {
// _rowts/_c0, not tbname column
if (fmIsPseudoColumnFunc(pfCtx->functionId) && (!fmIsScanPseudoColumnFunc(pfCtx->functionId))) {
// do nothing
} else if (fmIsNonstandardSQLFunc(pfCtx->functionId)) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(&pCtx[k]);
@ -2176,7 +2210,7 @@ void setExecutionContext(int32_t numOfOutput, uint64_t groupId, SExecTaskInfo* p
* @param pQInfo
* @param result
*/
int32_t doCopyToSDataBlock(SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo,
int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo,
int32_t orderType, int32_t* rowCellOffset, SqlFunctionCtx* pCtx) {
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
int32_t numOfResult = pBlock->info.rows; // there are already exists result rows
@ -2215,8 +2249,14 @@ int32_t doCopyToSDataBlock(SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbased
int32_t slotId = pExprInfo[j].base.resSchema.slotId;
pCtx[j].resultInfo = getResultCell(pRow, j, rowCellOffset);
if (pCtx[j].fpSet.process) {
pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
if (pCtx[j].fpSet.finalize) {
int32_t code = TSDB_CODE_SUCCESS;
code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
if (TAOS_FAILED(code)) {
qError("%s build result data block error, code %s", GET_TASKID(taskInfo), tstrerror(code));
taskInfo->code = code;
longjmp(taskInfo->env, code);
}
} else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
// do nothing, todo refactor
} else {
@ -2243,7 +2283,7 @@ int32_t doCopyToSDataBlock(SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbased
return 0;
}
void doBuildResultDatablock(SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, SExprInfo* pExprInfo,
void doBuildResultDatablock(SExecTaskInfo *taskInfo, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, SExprInfo* pExprInfo,
SDiskbasedBuf* pBuf) {
assert(pGroupResInfo->currentGroup <= pGroupResInfo->totalGroup);
@ -2257,7 +2297,7 @@ void doBuildResultDatablock(SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo
}
int32_t orderType = TSDB_ORDER_ASC;
doCopyToSDataBlock(pBlock, pExprInfo, pBuf, pGroupResInfo, orderType, rowCellOffset, pCtx);
doCopyToSDataBlock(taskInfo, pBlock, pExprInfo, pBuf, pGroupResInfo, orderType, rowCellOffset, pCtx);
// add condition (pBlock->info.rows >= 1) just to runtime happy
blockDataUpdateTsWindow(pBlock);
@ -3489,7 +3529,7 @@ static SSDataBlock* doMerge(SOperatorInfo* pOperator) {
break;
}
setInputDataBlock(pOperator, pInfo->binfo.pCtx, pDataBlock, TSDB_ORDER_ASC, true);
setInputDataBlock(pOperator, pInfo->binfo.pCtx, pDataBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
// updateOutputBuf(&pInfo->binfo, &pAggInfo->bufCapacity, pBlock->info.rows * pAggInfo->resultRowFactor,
// pOperator->pRuntimeEnv, true);
doMergeImpl(pOperator, pOperator->numOfExprs, pDataBlock);
@ -3654,17 +3694,24 @@ _error:
return NULL;
}
int32_t getTableScanOrder(SOperatorInfo* pOperator) {
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
if (pOperator->pDownstream == NULL || pOperator->pDownstream[0] == NULL) {
return TSDB_ORDER_ASC;
} else {
return getTableScanOrder(pOperator->pDownstream[0]);
}
}
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag) {
// todo add more information about exchange operation
if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_EXCHANGE) {
*order = TSDB_ORDER_ASC;
*scanFlag = MAIN_SCAN;
return TSDB_CODE_SUCCESS;
} else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
STableScanInfo* pTableScanInfo = pOperator->info;
return pTableScanInfo->cond.order;
*order = pTableScanInfo->cond.order;
*scanFlag = pTableScanInfo->scanFlag;
return TSDB_CODE_SUCCESS;
} else {
if (pOperator->pDownstream == NULL || pOperator->pDownstream[0] == NULL) {
return TSDB_CODE_INVALID_PARA;
} else {
return getTableScanInfo(pOperator->pDownstream[0], order, scanFlag);
}
}
}
// this is a blocking operator
@ -3680,6 +3727,9 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
SOperatorInfo* downstream = pOperator->pDownstream[0];
int32_t order = TSDB_ORDER_ASC;
int32_t scanFlag = MAIN_SCAN;
while (1) {
publishOperatorProfEvent(downstream, QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
@ -3692,11 +3742,14 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
// setTagValue(pOperator, pAggInfo->current->pTable, pInfo->pCtx, pOperator->numOfExprs);
// }
int32_t order = getTableScanOrder(pOperator);
int32_t code = getTableScanInfo(pOperator, &order, &scanFlag);
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
}
// there is an scalar expression that needs to be calculated before apply the group aggregation.
if (pAggInfo->pScalarExprInfo != NULL) {
int32_t code = projectApplyFunctions(pAggInfo->pScalarExprInfo, pBlock, pBlock, pAggInfo->pScalarCtx,
code = projectApplyFunctions(pAggInfo->pScalarExprInfo, pBlock, pBlock, pAggInfo->pScalarCtx,
pAggInfo->numOfScalarExpr, NULL);
if (code != TSDB_CODE_SUCCESS) {
pTaskInfo->code = code;
@ -3706,7 +3759,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
// the pDataBlock are always the same one, no need to call this again
setExecutionContext(pOperator->numOfExprs, pBlock->info.groupId, pTaskInfo, pAggInfo);
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order, true);
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order, scanFlag, true);
doAggregateImpl(pOperator, 0, pInfo->pCtx);
#if 0 // test for encode/decode result info
@ -3749,7 +3802,7 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
}
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
doBuildResultDatablock(pInfo, &pAggInfo->groupResInfo, pOperator->pExpr, pAggInfo->aggSup.pResultBuf);
doBuildResultDatablock(pTaskInfo, pInfo, &pAggInfo->groupResInfo, pOperator->pExpr, pAggInfo->aggSup.pResultBuf);
if (pInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pAggInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator);
}
@ -3987,6 +4040,9 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
}
#endif
int32_t order = 0;
int32_t scanFlag = 0;
SOperatorInfo* downstream = pOperator->pDownstream[0];
while (1) {
@ -4018,15 +4074,14 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
// }
// the pDataBlock are always the same one, no need to call this again
int32_t order = getTableScanOrder(pOperator->pDownstream[0]);
int32_t code = getTableScanInfo(pOperator->pDownstream[0], &order, &scanFlag);
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order, false);
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order, scanFlag, false);
blockDataEnsureCapacity(pInfo->pRes, pInfo->pRes->info.rows + pBlock->info.rows);
pTaskInfo->code = projectApplyFunctions(pOperator->pExpr, pInfo->pRes, pBlock, pInfo->pCtx, pOperator->numOfExprs,
pProjectInfo->pPseudoColInfo);
if (pTaskInfo->code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, pTaskInfo->code);
code = projectApplyFunctions(pOperator->pExpr, pInfo->pRes, pBlock, pInfo->pCtx, pOperator->numOfExprs, pProjectInfo->pPseudoColInfo);
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
}
int32_t status = handleLimitOffset(pOperator, pBlock);
@ -4548,7 +4603,7 @@ static SResSchema createResSchema(int32_t type, int32_t bytes, int32_t slotId, i
return s;
}
static SColumn* createColumn(int32_t blockId, int32_t slotId, SDataType* pType) {
static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType) {
SColumn* pCol = taosMemoryCalloc(1, sizeof(SColumn));
if (pCol == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -4556,6 +4611,7 @@ static SColumn* createColumn(int32_t blockId, int32_t slotId, SDataType* pType)
}
pCol->slotId = slotId;
pCol->colId = colId;
pCol->bytes = pType->bytes;
pCol->type = pType->type;
pCol->scale = pType->scale;
@ -4601,7 +4657,7 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t*
SDataType* pType = &pColNode->node.resType;
pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
pType->precision, pColNode->colName);
pExp->base.pParam[0].pCol = createColumn(pColNode->dataBlockId, pColNode->slotId, pType);
pExp->base.pParam[0].pCol = createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType);
pExp->base.pParam[0].type = FUNC_PARAM_TYPE_COLUMN;
} else if (type == QUERY_NODE_VALUE) {
pExp->pExpr->nodeType = QUERY_NODE_VALUE;
@ -4625,8 +4681,22 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t*
pExp->pExpr->_function.functionId = pFuncNode->funcId;
pExp->pExpr->_function.pFunctNode = pFuncNode;
strncpy(pExp->pExpr->_function.functionName, pFuncNode->functionName,
tListLen(pExp->pExpr->_function.functionName));
#if 1
// todo refactor: add the parameter for tbname function
if (strcmp(pExp->pExpr->_function.functionName, "tbname") == 0) {
pFuncNode->pParameterList = nodesMakeList();
ASSERT(LIST_LENGTH(pFuncNode->pParameterList) == 0);
SValueNode *res = (SValueNode *)nodesMakeNode(QUERY_NODE_VALUE);
if (NULL == res) { // todo handle error
} else {
res->node.resType = (SDataType) {.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT};
nodesListAppend(pFuncNode->pParameterList, res);
}
}
#endif
int32_t numOfParam = LIST_LENGTH(pFuncNode->pParameterList);
@ -4639,7 +4709,7 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t*
SColumnNode* pcn = (SColumnNode*)p1;
pExp->base.pParam[j].type = FUNC_PARAM_TYPE_COLUMN;
pExp->base.pParam[j].pCol = createColumn(pcn->dataBlockId, pcn->slotId, &pcn->node.resType);
pExp->base.pParam[j].pCol = createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType);
} else if (p1->type == QUERY_NODE_VALUE) {
SValueNode* pvn = (SValueNode*)p1;
pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE;
@ -4687,45 +4757,59 @@ static int32_t doCreateTableGroup(void* metaHandle, int32_t tableType, uint64_t
uint64_t queryId, uint64_t taskId);
static SArray* extractTableIdList(const STableGroupInfo* pTableGroupInfo);
static SArray* extractColumnInfo(SNodeList* pNodeList);
static SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols,
int32_t type);
static SArray* createSortInfo(SNodeList* pNodeList);
static SArray* extractPartitionColInfo(SNodeList* pNodeList);
static int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode);
static void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode);
static SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) {
SInterval interval = {
.interval = pTableScanNode->interval,
.sliding = pTableScanNode->sliding,
.intervalUnit = pTableScanNode->intervalUnit,
.slidingUnit = pTableScanNode->slidingUnit,
.offset = pTableScanNode->offset,
};
return interval;
}
SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle,
uint64_t queryId, uint64_t taskId, STableGroupInfo* pTableGroupInfo) {
int32_t type = nodeType(pPhyNode);
if (pPhyNode->pChildren == NULL || LIST_LENGTH(pPhyNode->pChildren) == 0) {
if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) {
SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode;
STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
int32_t numOfCols = 0;
tsdbReaderT pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableGroupInfo, (uint64_t)queryId, taskId);
if (pDataReader == NULL && terrno != 0) {
return NULL;
}
SOperatorInfo* pOperator = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo);
STableScanInfo* pScanInfo = pOperator->info;
pTaskInfo->cost.pRecoder = &pScanInfo->readRecorder;
return pOperator;
} else if (QUERY_NODE_PHYSICAL_PLAN_EXCHANGE == type) {
SExchangePhysiNode* pExchange = (SExchangePhysiNode*)pPhyNode;
SSDataBlock* pResBlock = createResDataBlock(pExchange->node.pOutputDataBlockDesc);
return createExchangeOperatorInfo(pHandle->pMsgCb->clientRpc, pExchange->pSrcEndPoints, pResBlock, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) {
SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode; // simple child table.
STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
int32_t numOfCols = 0;
tsdbReaderT pDataReader = NULL;
if (pHandle->vnode) {
pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableGroupInfo, (uint64_t)queryId, taskId);
} else {
doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo,
queryId, taskId);
}
if (pDataReader == NULL && terrno != 0) {
qDebug("pDataReader is NULL");
// return NULL;
} else {
qDebug("pDataReader is not NULL");
}
SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc;
SArray* pColList = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID);
SSDataBlock* pResBlock = createResDataBlock(pDescNode);
SSDataBlock* pResBlockDumy = createResDataBlock(pDescNode);
SQueryTableDataCond cond = {0};
int32_t code = initQueryTableDataCond(&cond, pTableScanNode);
@ -4734,30 +4818,14 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
}
SInterval interval = extractIntervalInfo(pTableScanNode);
SOperatorInfo* pOperator = createTableScanOperatorInfo(
pDataReader, &cond, numOfCols, pTableScanNode->dataRequired, pTableScanNode->scanSeq, pColList, pResBlock,
pScanPhyNode->node.pConditions, &interval, pTableScanNode->ratio, pTaskInfo);
STableScanInfo* pScanInfo = pOperator->info;
pTaskInfo->cost.pRecoder = &pScanInfo->readRecorder;
return pOperator;
} else if (QUERY_NODE_PHYSICAL_PLAN_EXCHANGE == type) {
SExchangePhysiNode* pExchange = (SExchangePhysiNode*)pPhyNode;
SSDataBlock* pResBlock = createResDataBlock(pExchange->node.pOutputDataBlockDesc);
return createExchangeOperatorInfo(pHandle->pMsgCb->clientRpc, pExchange->pSrcEndPoints, pResBlock, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) {
SScanPhysiNode* pScanPhyNode = (SScanPhysiNode*)pPhyNode; // simple child table.
SOperatorInfo* pOperatorDumy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo);
int32_t code = doCreateTableGroup(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableGroupInfo,
queryId, taskId);
SArray* tableIdList = extractTableIdList(pTableGroupInfo);
SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc;
SSDataBlock* pResBlock = createResDataBlock(pDescNode);
int32_t numOfCols = 0;
SArray* pCols = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID);
SOperatorInfo* pOperator = createStreamScanOperatorInfo(pHandle->reader, pResBlock, pCols, tableIdList, pTaskInfo,
pScanPhyNode->node.pConditions);
SOperatorInfo* pOperator = createStreamScanOperatorInfo(pHandle->reader, pDataReader, pResBlock, pCols, tableIdList, pTaskInfo,
pScanPhyNode->node.pConditions, pOperatorDumy, &interval);
taosArrayDestroy(tableIdList);
return pOperator;
} else if (QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN == type) {
@ -4928,7 +4996,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
return pOptr;
}
static int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode) {
int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode) {
pCond->loadExternalRows = false;
pCond->order = pTableScanNode->scanSeq[0] > 0 ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;

View File

@ -234,7 +234,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
}
int32_t rowIndex = j - num;
doApplyFunctions(pCtx, &w, NULL, rowIndex, num, NULL, pBlock->info.rows, pOperator->numOfExprs, TSDB_ORDER_ASC);
doApplyFunctions(pTaskInfo, pCtx, &w, NULL, rowIndex, num, NULL, pBlock->info.rows, pOperator->numOfExprs, TSDB_ORDER_ASC);
// assign the group keys or user input constant values if required
doAssignGroupKeys(pCtx, pOperator->numOfExprs, pBlock->info.rows, rowIndex);
@ -252,7 +252,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
}
int32_t rowIndex = pBlock->info.rows - num;
doApplyFunctions(pCtx, &w, NULL, rowIndex, num, NULL, pBlock->info.rows, pOperator->numOfExprs, TSDB_ORDER_ASC);
doApplyFunctions(pTaskInfo, pCtx, &w, NULL, rowIndex, num, NULL, pBlock->info.rows, pOperator->numOfExprs, TSDB_ORDER_ASC);
doAssignGroupKeys(pCtx, pOperator->numOfExprs, pBlock->info.rows, rowIndex);
}
}
@ -268,7 +268,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
SSDataBlock* pRes = pInfo->binfo.pRes;
if (pOperator->status == OP_RES_TO_RETURN) {
doBuildResultDatablock(&pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
doBuildResultDatablock(pTaskInfo, &pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
if (pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) {
pOperator->status = OP_EXEC_DONE;
}
@ -287,7 +287,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
}
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, order, true);
setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, order, MAIN_SCAN, true);
// there is an scalar expression that needs to be calculated right before apply the group aggregation.
if (pInfo->pScalarExprInfo != NULL) {
@ -317,7 +317,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, false);
while(1) {
doBuildResultDatablock(&pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
doBuildResultDatablock(pTaskInfo, &pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pRes);
bool hasRemain = hasRemainDataInCurrentGroup(&pInfo->groupResInfo);

View File

@ -13,9 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <libs/function/function.h>
#include "filter.h"
#include "function.h"
#include "filter.h"
#include "functionMgt.h"
#include "os.h"
#include "querynodes.h"
@ -284,6 +283,27 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
continue;
}
// currently only the tbname pseudo column
if (pTableScanInfo->numOfPseudoExpr > 0) {
int32_t dstSlotId = pTableScanInfo->pPseudoExpr->base.resSchema.slotId;
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotId);
colInfoDataEnsureCapacity(pColInfoData, 0, pBlock->info.rows);
struct SScalarFuncExecFuncs fpSet;
fmGetScalarFuncExecFuncs(pTableScanInfo->pPseudoExpr->pExpr->_function.functionId, &fpSet);
SColumnInfoData infoData = {0};
infoData.info.type = TSDB_DATA_TYPE_BIGINT;
infoData.info.bytes = sizeof(uint64_t);
colInfoDataEnsureCapacity(&infoData, 0, 1);
colDataAppendInt64(&infoData, 0, &pBlock->info.uid);
SScalarParam srcParam = {.numOfRows = pBlock->info.rows, .param = pTableScanInfo->readHandle.meta, .columnData = &infoData};
SScalarParam param = {.columnData = pColInfoData};
fpSet.process(&srcParam, 1, &param);
}
return pBlock;
}
@ -314,8 +334,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
STimeWindow* pWin = &pTableScanInfo->cond.twindow;
qDebug("%s start to repeat ascending order scan data blocks due to query func required, qrange:%" PRId64
"-%" PRId64,
GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey);
"-%" PRId64, GET_TASKID(pTaskInfo), pWin->skey, pWin->ekey);
// do prepare for the next round table scan operation
tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond);
@ -359,10 +378,29 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
return NULL;
}
SOperatorInfo* createTableScanOperatorInfo(void* pDataReader, SQueryTableDataCond* pCond, int32_t numOfOutput,
int32_t dataLoadFlag, const uint8_t* scanInfo, SArray* pColMatchInfo,
SSDataBlock* pResBlock, SNode* pCondition, SInterval* pInterval,
double sampleRatio, SExecTaskInfo* pTaskInfo) {
SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) {
SInterval interval = {
.interval = pTableScanNode->interval,
.sliding = pTableScanNode->sliding,
.intervalUnit = pTableScanNode->intervalUnit,
.slidingUnit = pTableScanNode->slidingUnit,
.offset = pTableScanNode->offset,
};
return interval;
}
static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) {
STableScanInfo* pTableScanInfo = (STableScanInfo*)param;
taosMemoryFree(pTableScanInfo->pResBlock);
tsdbCleanupReadHandle(pTableScanInfo->dataReader);
if (pTableScanInfo->pColMatchInfo != NULL) {
taosArrayDestroy(pTableScanInfo->pColMatchInfo);
}
}
SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, tsdbReaderT pDataReader, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) {
STableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STableScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@ -373,27 +411,42 @@ SOperatorInfo* createTableScanOperatorInfo(void* pDataReader, SQueryTableDataCon
return NULL;
}
pInfo->cond = *pCond;
pInfo->scanInfo = (SScanInfo){.numOfAsc = scanInfo[0], .numOfDesc = scanInfo[1]};
SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc;
pInfo->interval = *pInterval;
pInfo->sampleRatio = sampleRatio;
pInfo->dataBlockLoadFlag = dataLoadFlag;
pInfo->pResBlock = pResBlock;
pInfo->pFilterNode = pCondition;
int32_t numOfCols = 0;
SArray* pColList = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID);
int32_t code = initQueryTableDataCond(&pInfo->cond, pTableScanNode);
if (code != TSDB_CODE_SUCCESS) {
return NULL;
}
if (pTableScanNode->scan.pScanPseudoCols != NULL) {
pInfo->pPseudoExpr = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pInfo->numOfPseudoExpr);
pInfo->pPseudoCtx = createSqlFunctionCtx(pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, &pInfo->rowCellInfoOffset);
}
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
pInfo->readHandle = *readHandle;
pInfo->interval = extractIntervalInfo(pTableScanNode);
pInfo->sampleRatio = pTableScanNode->ratio;
pInfo->dataBlockLoadFlag = pTableScanNode->dataRequired;
pInfo->pResBlock = createResDataBlock(pDescNode);
pInfo->pFilterNode = pTableScanNode->scan.node.pConditions;
pInfo->dataReader = pDataReader;
pInfo->scanFlag = MAIN_SCAN;
pInfo->pColMatchInfo = pColMatchInfo;
pInfo->pColMatchInfo = pColList;
pOperator->name = "TableScanOperator"; // for dubug purpose
pOperator->name = "TableScanOperator"; // for debug purpose
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN;
pOperator->blocking = false;
pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo;
pOperator->numOfExprs = numOfOutput;
pOperator->numOfExprs = numOfCols;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTableScan, NULL, NULL, NULL, NULL, NULL, NULL);
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTableScan, NULL, NULL, destroyTableScanOperatorInfo, NULL, NULL, NULL);
static int32_t cost = 0;
@ -515,7 +568,40 @@ static void doClearBufferedBlocks(SStreamBlockScanInfo* pInfo) {
taosArrayClear(pInfo->pBlockLists);
}
static SSDataBlock* getUpdateDataBlock(SStreamBlockScanInfo* pInfo) {
static bool prepareDataScan(SStreamBlockScanInfo* pInfo) {
SSDataBlock* pSDB = pInfo->pUpdateRes;
if (pInfo->updateResIndex < pSDB->info.rows) {
SColumnInfoData* pColDataInfo = taosArrayGet(pSDB->pDataBlock, 0);
TSKEY *tsCols = (TSKEY*)pColDataInfo->pData;
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[pInfo->updateResIndex], &pInfo->interval,
pInfo->interval.precision, NULL);
STableScanInfo* pTableScanInfo = pInfo->pOperatorDumy->info;
pTableScanInfo->cond.twindow = win;
tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond);
pInfo->updateResIndex += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, pInfo->updateResIndex,
win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
pTableScanInfo->scanTimes = 0;
return true;
} else {
return false;
}
}
static SSDataBlock* doDataScan(SStreamBlockScanInfo* pInfo) {
SSDataBlock* pResult = NULL;
pResult = doTableScan(pInfo->pOperatorDumy);
if (pResult == NULL) {
if (prepareDataScan(pInfo)) {
// scan next window data
pResult = doTableScan(pInfo->pOperatorDumy);
}
}
return pResult;
}
static SSDataBlock* getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool invertible) {
SColumnInfoData* pColDataInfo = taosArrayGet(pInfo->pRes->pDataBlock, pInfo->primaryTsIndex);
TSKEY* ts = (TSKEY*)pColDataInfo->pData;
for (int32_t i = 0; i < pInfo->pRes->info.rows; i++) {
@ -523,13 +609,19 @@ static SSDataBlock* getUpdateDataBlock(SStreamBlockScanInfo* pInfo) {
taosArrayPush(pInfo->tsArray, ts + i);
}
}
if (taosArrayGetSize(pInfo->tsArray) > 0) {
int32_t size = taosArrayGetSize(pInfo->tsArray);
if (size > 0 && invertible) {
// TODO(liuyao) get from tsdb
// SSDataBlock* p = createOneDataBlock(pInfo->pRes, true);
// p->info.type = STREAM_INVERT;
// taosArrayClear(pInfo->tsArray);
// return p;
return NULL;
SSDataBlock* p = createOneDataBlock(pInfo->pRes, false);
taosArraySet(p->pDataBlock, 0, pInfo->tsArray);
p->info.rows = size;
p->info.type = STREAM_REPROCESS;
taosArrayClear(pInfo->tsArray);
return p;
}
return NULL;
}
@ -556,14 +648,23 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
int32_t current = pInfo->validBlockIndex++;
return taosArrayGetP(pInfo->pBlockLists, current);
} else {
if (total > 0) {
ASSERT(total == 2);
SSDataBlock* pRes = taosArrayGetP(pInfo->pBlockLists, 0);
SSDataBlock* pUpRes = taosArrayGetP(pInfo->pBlockLists, 1);
blockDataDestroy(pUpRes);
taosArrayClear(pInfo->pBlockLists);
return pRes;
if (pInfo->scanMode == STREAM_SCAN_FROM_RES) {
blockDataDestroy(pInfo->pUpdateRes);
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
return pInfo->pRes;
} else if (pInfo->scanMode == STREAM_SCAN_FROM_UPDATERES) {
blockDataCleanup(pInfo->pRes);
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
return pInfo->pUpdateRes;
} else if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER) {
SSDataBlock* pSDB = doDataScan(pInfo);
if (pSDB == NULL) {
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
} else {
return pSDB;
}
}
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
blockDataCleanup(pInfo->pRes);
@ -629,21 +730,29 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
if (rows == 0) {
pOperator->status = OP_EXEC_DONE;
} else {
SSDataBlock* upRes = getUpdateDataBlock(pInfo);
} else if (pInfo->interval.interval > 0) {
SSDataBlock* upRes = getUpdateDataBlock(pInfo, true); //TODO(liuyao) get invertible from plan
if (upRes) {
taosArrayPush(pInfo->pBlockLists, &(pInfo->pRes));
taosArrayPush(pInfo->pBlockLists, &upRes);
pInfo->pUpdateRes = upRes;
if (upRes->info.type = STREAM_REPROCESS) {
pInfo->updateResIndex = 0;
prepareDataScan(pInfo);
pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES;
} else if (upRes->info.type = STREAM_INVERT) {
pInfo->scanMode = STREAM_SCAN_FROM_RES;
return upRes;
}
}
}
return (rows == 0) ? NULL : pInfo->pRes;
}
}
SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, SSDataBlock* pResBlock, SArray* pColList,
SArray* pTableIdList, SExecTaskInfo* pTaskInfo, SNode* pCondition) {
SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader,
SSDataBlock* pResBlock, SArray* pColList, SArray* pTableIdList,
SExecTaskInfo* pTaskInfo, SNode* pCondition, SOperatorInfo* pOperatorDumy,
SInterval* pInterval) {
SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@ -683,7 +792,7 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, SSDataBlock*
}
pInfo->primaryTsIndex = 0; // TODO(liuyao) get it from physical plan
pInfo->pUpdateInfo = updateInfoInit(60000, 0, 100); // TODO(liuyao) get it from physical plan
pInfo->pUpdateInfo = updateInfoInitP(pInterval, 10000); // TODO(liuyao) get watermark from physical plan
if (pInfo->pUpdateInfo == NULL) {
taosMemoryFreeClear(pInfo);
taosMemoryFreeClear(pOperator);
@ -693,6 +802,10 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, SSDataBlock*
pInfo->readerHandle = streamReadHandle;
pInfo->pRes = pResBlock;
pInfo->pCondition = pCondition;
pInfo->pDataReader = pDataReader;
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
pInfo->pOperatorDumy = pOperatorDumy;
pInfo->interval = *pInterval;
pOperator->name = "StreamBlockScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
@ -1295,36 +1408,33 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
char str[512] = {0};
int32_t count = 0;
SMetaReader mr = {0};
metaReaderInit(&mr, pInfo->readHandle.meta, 0);
while (pInfo->curPos < pInfo->pTableGroups->numOfTables && count < pOperator->resultInfo.capacity) {
STableKeyInfo* item = taosArrayGet(pa, pInfo->curPos);
metaGetTableEntryByUid(&mr, item->uid);
for (int32_t j = 0; j < pOperator->numOfExprs; ++j) {
SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId);
// refactor later
if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) {
metaReaderInit(&mr, pInfo->readHandle.meta, 0);
metaGetTableEntryByUid(&mr, item->uid);
STR_TO_VARSTR(str, mr.me.name);
metaReaderClear(&mr);
colDataAppend(pDst, count, str, false);
// data = tsdbGetTableTagVal(item->pTable, pExprInfo[j].base.pColumns->info.colId, type, bytes);
// dst = pColInfo->pData + count * pExprInfo[j].base.resSchema.bytes;
// doSetTagValueToResultBuf(dst, data, type, bytes);
} else { // it is a tag value
const char* p = metaGetTableTagVal(&mr.me, pExprInfo[j].base.pParam[0].pCol->colId);
colDataAppend(pDst, count, p, (p == NULL));
}
}
count += 1;
}
if (++pInfo->curPos >= pInfo->pTableGroups->numOfTables) {
pOperator->status = OP_EXEC_DONE;
}
}
metaReaderClear(&mr);
// qDebug("QInfo:0x%"PRIx64" create tag values results completed, rows:%d", GET_TASKID(pRuntimeEnv), count);
if (pOperator->status == OP_EXEC_DONE) {
setTaskStatus(pTaskInfo, TASK_COMPLETED);

View File

@ -82,7 +82,7 @@ static void getInitialStartTimeWindow(SInterval* pInterval, int32_t precision, T
}
// get the correct time window according to the handled timestamp
static STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts,
STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts,
SInterval* pInterval, int32_t precision, STimeWindow* win) {
STimeWindow w = {0};
@ -186,7 +186,7 @@ static FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_se
return forwardStep;
}
static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) {
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) {
int32_t midPos = -1;
int32_t numOfRows;
@ -249,7 +249,7 @@ static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) {
return midPos;
}
static int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos,
int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos,
TSKEY ekey, __block_search_fn_t searchFn, STableQueryInfo* item,
int32_t order) {
assert(startPos >= 0 && startPos < pDataBlockInfo->rows);
@ -703,7 +703,7 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
pInfo->order, false);
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true);
doApplyFunctions(pInfo->binfo.pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardStep, tsCols,
doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardStep, tsCols,
pSDataBlock->info.rows, numOfOutput, TSDB_ORDER_ASC);
STimeWindow nextWin = win;
@ -740,7 +740,7 @@ static SArray* hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
pInfo->order, false);
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
doApplyFunctions(pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardStep, tsCols,
doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardStep, tsCols,
pSDataBlock->info.rows, numOfOutput, TSDB_ORDER_ASC);
}
@ -775,7 +775,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
// setTagValue(pOperator, pRuntimeEnv->current->pTable, pInfo->pCtx, pOperator->numOfExprs);
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, order, true);
setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, order, MAIN_SCAN, true);
STableQueryInfo* pTableQueryInfo = pInfo->pCurrent;
setIntervalQueryRange(pTableQueryInfo, pBlock->info.window.skey, &pTaskInfo->window);
@ -855,7 +855,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
}
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &window, false);
doApplyFunctions(pInfo->binfo.pCtx, &window, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex,
doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &window, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex,
pRowSup->numOfRows, NULL, pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC);
// here we start a new session window
@ -874,7 +874,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
}
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false);
doApplyFunctions(pInfo->binfo.pCtx, &pRowSup->win, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex,
doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &pRowSup->win, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex,
pRowSup->numOfRows, NULL, pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC);
}
@ -888,7 +888,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
SOptrBasicInfo* pBInfo = &pInfo->binfo;
if (pOperator->status == OP_RES_TO_RETURN) {
doBuildResultDatablock(pBInfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
doBuildResultDatablock(pTaskInfo, pBInfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
if (pBInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator);
return NULL;
@ -910,7 +910,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
break;
}
setInputDataBlock(pOperator, pBInfo->pCtx, pBlock, order, true);
setInputDataBlock(pOperator, pBInfo->pCtx, pBlock, order, MAIN_SCAN, true);
doStateWindowAggImpl(pOperator, pInfo, pBlock);
}
@ -921,7 +921,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, true);
blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity);
doBuildResultDatablock(pBInfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
doBuildResultDatablock(pTaskInfo, pBInfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
if (pBInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator);
}
@ -948,7 +948,7 @@ static SSDataBlock* doBuildIntervalResult(SOperatorInfo* pOperator) {
}
blockDataEnsureCapacity(pBlock, pOperator->resultInfo.capacity);
doBuildResultDatablock(&pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
doBuildResultDatablock(pTaskInfo, &pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
if (pBlock->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator);
@ -988,6 +988,20 @@ static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type
}
}
}
static void doClearWindows(SIntervalAggOperatorInfo* pInfo, int32_t numOfOutput, SSDataBlock* pBlock) {
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
TSKEY *tsCols = (TSKEY*)pColDataInfo->pData;
int32_t step = 0;
for (int32_t i = 0; i < pBlock->info.rows; i += step) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], &pInfo->interval,
pInfo->interval.precision, NULL);
step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i,
win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
doClearWindow(pInfo, (char*)&win.skey, sizeof(TKEY), pBlock->info.groupId, numOfOutput);
}
}
static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
SIntervalAggOperatorInfo* pInfo = pOperator->info;
@ -998,7 +1012,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
}
if (pOperator->status == OP_RES_TO_RETURN) {
doBuildResultDatablock(&pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
doBuildResultDatablock(pOperator->pTaskInfo, &pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) {
pOperator->status = OP_EXEC_DONE;
}
@ -1024,10 +1038,14 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
// setTagValue(pOperator, pRuntimeEnv->current->pTable, pInfo->pCtx, pOperator->numOfExprs);
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, order, true);
setInputDataBlock(pOperator, pInfo->binfo.pCtx, pBlock, order, MAIN_SCAN, true);
if (pInfo->invertible) {
setInverFunction(pInfo->binfo.pCtx, pOperator->numOfExprs, pBlock->info.type);
}
if (pBlock->info.type == STREAM_REPROCESS) {
doClearWindows(pInfo, pOperator->numOfExprs, pBlock);
continue;
}
pUpdated = hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, 0);
}
@ -1035,7 +1053,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
doBuildResultDatablock(&pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
doBuildResultDatablock(pOperator->pTaskInfo, &pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
// TODO: remove for stream
/*ASSERT(pInfo->binfo.pRes->info.rows > 0);*/
@ -1233,7 +1251,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator
// pInfo->numOfRows data belong to the current session window
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &window, false);
doApplyFunctions(pInfo->binfo.pCtx, &window, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex,
doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &window, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex,
pRowSup->numOfRows, NULL, pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC);
// here we start a new session window
@ -1252,7 +1270,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator
}
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false);
doApplyFunctions(pInfo->binfo.pCtx, &pRowSup->win, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex,
doApplyFunctions(pTaskInfo, pInfo->binfo.pCtx, &pRowSup->win, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex,
pRowSup->numOfRows, NULL, pBlock->info.rows, numOfOutput, TSDB_ORDER_ASC);
}
@ -1265,7 +1283,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
SOptrBasicInfo* pBInfo = &pInfo->binfo;
if (pOperator->status == OP_RES_TO_RETURN) {
doBuildResultDatablock(pBInfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
doBuildResultDatablock(pOperator->pTaskInfo, pBInfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
if (pBInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator);
return NULL;
@ -1286,7 +1304,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
}
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pBInfo->pCtx, pBlock, order, true);
setInputDataBlock(pOperator, pBInfo->pCtx, pBlock, order, MAIN_SCAN, true);
doSessionWindowAggImpl(pOperator, pInfo, pBlock);
}
@ -1298,7 +1316,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, true);
blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity);
doBuildResultDatablock(pBInfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
doBuildResultDatablock(pOperator->pTaskInfo, pBInfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf);
if (pBInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator);
}
@ -1334,7 +1352,7 @@ static SSDataBlock* doAllIntervalAgg(SOperatorInfo* pOperator) {
// setTagValue(pOperator, pRuntimeEnv->current->pTable, pIntervalInfo->pCtx, pOperator->numOfExprs);
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pSliceInfo->binfo.pCtx, pBlock, order, true);
setInputDataBlock(pOperator, pSliceInfo->binfo.pCtx, pBlock, order, MAIN_SCAN, true);
// hashAllIntervalAgg(pOperator, &pSliceInfo->binfo.resultRowInfo, pBlock, 0);
}

View File

@ -95,6 +95,13 @@ bool stateFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
int32_t stateCountFunction(SqlFunctionCtx* pCtx);
int32_t stateDurationFunction(SqlFunctionCtx* pCtx);
bool getCsumFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
int32_t csumFunction(SqlFunctionCtx* pCtx);
bool getMavgFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
bool mavgFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
int32_t mavgFunction(SqlFunctionCtx* pCtx);
bool getSelectivityFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv);
#ifdef __cplusplus

View File

@ -308,6 +308,58 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32
return TSDB_CODE_SUCCESS;
}
static int32_t translateCsum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return TSDB_CODE_SUCCESS;
}
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
if (QUERY_NODE_COLUMN != nodeType(pPara)) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"The input parameter of CSUM function can only be column");
}
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
uint8_t resType;
if (!IS_NUMERIC_TYPE(colType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
} else {
if (IS_SIGNED_NUMERIC_TYPE(colType)) {
resType = TSDB_DATA_TYPE_BIGINT;
} else if (IS_UNSIGNED_NUMERIC_TYPE(colType)) {
resType = TSDB_DATA_TYPE_UBIGINT;
} else if (IS_FLOAT_TYPE(colType)) {
resType = TSDB_DATA_TYPE_DOUBLE;
} else {
ASSERT(0);
}
}
pFunc->node.resType = (SDataType) { .bytes = tDataTypes[resType].bytes, .type = resType};
return TSDB_CODE_SUCCESS;
}
static int32_t translateMavg(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (2 != LIST_LENGTH(pFunc->pParameterList)) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
if (QUERY_NODE_COLUMN != nodeType(pPara)) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"The input parameter of MAVG function can only be column");
}
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
if (!IS_NUMERIC_TYPE(colType) || !IS_INTEGER_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
return TSDB_CODE_SUCCESS;
}
static int32_t translateLastRow(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
// todo
return TSDB_CODE_SUCCESS;
@ -742,6 +794,26 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.processFunc = stateDurationFunction,
.finalizeFunc = NULL
},
{
.name = "csum",
.type = FUNCTION_TYPE_CSUM,
.classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateCsum,
.getEnvFunc = getCsumFuncEnv,
.initFunc = functionSetup,
.processFunc = csumFunction,
.finalizeFunc = NULL
},
{
.name = "mavg",
.type = FUNCTION_TYPE_MAVG,
.classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateMavg,
.getEnvFunc = getMavgFuncEnv,
.initFunc = mavgFunctionSetup,
.processFunc = mavgFunction,
.finalizeFunc = NULL
},
{
.name = "abs",
.type = FUNCTION_TYPE_ABS,

View File

@ -21,7 +21,8 @@
#include "tdatablock.h"
#include "tpercentile.h"
#define HISTOGRAM_MAX_BINS_NUM 100
#define HISTOGRAM_MAX_BINS_NUM 1000
#define MAVG_MAX_POINTS_NUM 1000
typedef struct SSumRes {
union {
@ -141,6 +142,14 @@ typedef enum {
STATE_OPER_EQ,
} EStateOperType;
typedef struct SMavgInfo {
int32_t pos;
double sum;
int32_t numOfPoints;
bool pointsMeet;
double points[];
} SMavgInfo;
#define SET_VAL(_info, numOfElem, res) \
do { \
if ((numOfElem) <= 0) { \
@ -1644,7 +1653,7 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) {
pResInfo->complete = true;
return 0;
} else {
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
pInfo->pMemBucket = tMemBucketCreate(pCol->info.bytes, type, pInfo->minval, pInfo->maxval);
}
}
@ -1695,10 +1704,7 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) {
pInfo->numOfElems += 1;
}
}
return 0;
}
} else {
// the second stage, calculate the true percentile value
int32_t start = pInput->startRowIndex;
for (int32_t i = start; i < pInput->numOfRows + start; ++i) {
@ -1707,18 +1713,19 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) {
}
char* data = colDataGetData(pCol, i);
notNullElems += 1;
tMemBucketPut(pInfo->pMemBucket, data, 1);
}
SET_VAL(pResInfo, notNullElems, 1);
}
return TSDB_CODE_SUCCESS;
}
int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SVariant* pVal = &pCtx->param[1].param;
double v = pVal->nType == TSDB_DATA_TYPE_INT ? pVal->i : pVal->d;
double v = (pVal->nType == TSDB_DATA_TYPE_BIGINT) ? pVal->i : pVal->d;
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo* ppInfo = (SPercentileInfo*)GET_ROWCELL_INTERBUF(pResInfo);
@ -2818,7 +2825,6 @@ int32_t stateCountFunction(SqlFunctionCtx* pCtx) {
SInputColumnInfoData* pInput = &pCtx->input;
SColumnInfoData* pInputCol = pInput->pData[0];
SColumnInfoData* pTsOutput = pCtx->pTsOutput;
int32_t numOfElems = 0;
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
@ -2856,7 +2862,6 @@ int32_t stateDurationFunction(SqlFunctionCtx* pCtx) {
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
SColumnInfoData* pInputCol = pInput->pData[0];
SColumnInfoData* pTsOutput = pCtx->pTsOutput;
int32_t numOfElems = 0;
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
@ -2896,3 +2901,136 @@ int32_t stateDurationFunction(SqlFunctionCtx* pCtx) {
return numOfElems;
}
bool getCsumFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
pEnv->calcMemSize = sizeof(SSumRes);
return true;
}
int32_t csumFunction(SqlFunctionCtx* pCtx) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SSumRes* pSumRes = GET_ROWCELL_INTERBUF(pResInfo);
SInputColumnInfoData* pInput = &pCtx->input;
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
SColumnInfoData* pInputCol = pInput->pData[0];
SColumnInfoData* pTsOutput = pCtx->pTsOutput;
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
int32_t numOfElems = 0;
int32_t type = pInputCol->info.type;
int32_t startOffset = pCtx->offset;
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
int32_t pos = startOffset + numOfElems;
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
//colDataAppendNULL(pOutput, i);
continue;
}
char* data = colDataGetData(pInputCol, i);
if (IS_SIGNED_NUMERIC_TYPE(type)) {
int64_t v;
GET_TYPED_DATA(v, int64_t, type, data);
pSumRes->isum += v;
colDataAppend(pOutput, pos, (char *)&pSumRes->isum, false);
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
uint64_t v;
GET_TYPED_DATA(v, uint64_t, type, data);
pSumRes->usum += v;
colDataAppend(pOutput, pos, (char *)&pSumRes->usum, false);
} else if (IS_FLOAT_TYPE(type)) {
double v;
GET_TYPED_DATA(v, double, type, data);
pSumRes->dsum += v;
colDataAppend(pOutput, pos, (char *)&pSumRes->dsum, false);
}
//TODO: remove this after pTsOutput is handled
if (pTsOutput != NULL) {
colDataAppendInt64(pTsOutput, pos, &tsList[i]);
}
numOfElems++;
}
return numOfElems;
}
bool getMavgFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
pEnv->calcMemSize = sizeof(SMavgInfo) + MAVG_MAX_POINTS_NUM * sizeof(double);
return true;
}
bool mavgFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo *pResultInfo) {
if (!functionSetup(pCtx, pResultInfo)) {
return false;
}
SMavgInfo *pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
pInfo->pos = 0;
pInfo->sum = 0;
pInfo->numOfPoints = pCtx->param[1].param.i;
if (pInfo->numOfPoints < 1 || pInfo->numOfPoints > MAVG_MAX_POINTS_NUM) {
return false;
}
pInfo->pointsMeet = false;
return true;
}
int32_t mavgFunction(SqlFunctionCtx* pCtx) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SMavgInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
SInputColumnInfoData* pInput = &pCtx->input;
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
SColumnInfoData* pInputCol = pInput->pData[0];
SColumnInfoData* pTsOutput = pCtx->pTsOutput;
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
int32_t numOfElems = 0;
int32_t type = pInputCol->info.type;
int32_t startOffset = pCtx->offset;
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
int32_t pos = startOffset + numOfElems;
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
//colDataAppendNULL(pOutput, i);
continue;
}
char* data = colDataGetData(pInputCol, i);
double v;
GET_TYPED_DATA(v, double, type, data);
if (!pInfo->pointsMeet && (pInfo->pos < pInfo->numOfPoints - 1)) {
pInfo->points[pInfo->pos] = v;
pInfo->sum += v;
} else {
if (!pInfo->pointsMeet && (pInfo->pos == pInfo->numOfPoints - 1)) {
pInfo->sum +=v;
pInfo->pointsMeet = true;
} else {
pInfo->sum = pInfo->sum + v - pInfo->points[pInfo->pos];
}
pInfo->points[pInfo->pos] = v;
double result = pInfo->sum / pInfo->numOfPoints;
colDataAppend(pOutput, pos, (char *)&result, false);
//TODO: remove this after pTsOutput is handled
if (pTsOutput != NULL) {
colDataAppendInt64(pTsOutput, pos, &tsList[i]);
}
numOfElems++;
}
pInfo->pos++;
if (pInfo->pos == pInfo->numOfPoints) {
pInfo->pos = 0;
}
}
return numOfElems;
}

View File

@ -27,19 +27,6 @@
#include "tvariant.h"
#include "tdef.h"
//static uint8_t UNUSED_FUNC isQueryOnPrimaryKey(const char *primaryColumnName, const tExprNode *pLeft, const tExprNode *pRight) {
// if (pLeft->nodeType == TEXPR_COL_NODE) {
// // if left node is the primary column,return true
// return (strcmp(primaryColumnName, pLeft->pSchema->name) == 0) ? 1 : 0;
// } else {
// // if any children have query on primary key, their parents are also keep this value
// return ((pLeft->nodeType == TEXPR_BINARYEXPR_NODE && pLeft->_node.hasPK == 1) ||
// (pRight->nodeType == TEXPR_BINARYEXPR_NODE && pRight->_node.hasPK == 1)) == true
// ? 1
// : 0;
// }
//}
static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *));
void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)) {
@ -64,21 +51,7 @@ static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) {
}
int32_t type = (*pExpr)->nodeType;
if (type == TEXPR_BINARYEXPR_NODE) {
doExprTreeDestroy(&(*pExpr)->_node.pLeft, fp);
doExprTreeDestroy(&(*pExpr)->_node.pRight, fp);
if (fp != NULL) {
fp((*pExpr)->_node.info);
}
} else if (type == TEXPR_UNARYEXPR_NODE) {
doExprTreeDestroy(&(*pExpr)->_node.pLeft, fp);
if (fp != NULL) {
fp((*pExpr)->_node.info);
}
assert((*pExpr)->_node.pRight == NULL);
} else if (type == TEXPR_VALUE_NODE) {
if (type == TEXPR_VALUE_NODE) {
taosVariantDestroy((*pExpr)->pVal);
taosMemoryFree((*pExpr)->pVal);
} else if (type == TEXPR_COL_NODE) {
@ -90,9 +63,7 @@ static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) {
}
bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param) {
tExprNode *pLeft = pExpr->_node.pLeft;
tExprNode *pRight = pExpr->_node.pRight;
#if 0
//non-leaf nodes, recursively traverse the expression tree in the post-root order
if (pLeft->nodeType == TEXPR_BINARYEXPR_NODE && pRight->nodeType == TEXPR_BINARYEXPR_NODE) {
if (pExpr->_node.optr == LOGIC_COND_TYPE_OR) { // or
@ -114,6 +85,9 @@ bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp
// handle the leaf node
param->setupInfoFn(pExpr, param->pExtInfo);
return param->nodeFilterFn(pItem, pExpr->_node.info);
#endif
return 0;
}
// TODO: these three functions should be made global
@ -141,59 +115,6 @@ static UNUSED_FUNC char* exception_strdup(const char* str) {
return p;
}
static tExprNode* exprTreeFromBinaryImpl(SBufferReader* br) {
int32_t anchor = CLEANUP_GET_ANCHOR();
if (CLEANUP_EXCEED_LIMIT()) {
THROW(TSDB_CODE_QRY_EXCEED_TAGS_LIMIT);
return NULL;
}
tExprNode* pExpr = exception_calloc(1, sizeof(tExprNode));
CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, pExpr, NULL);
pExpr->nodeType = tbufReadUint8(br);
if (pExpr->nodeType == TEXPR_VALUE_NODE) {
SVariant* pVal = exception_calloc(1, sizeof(SVariant));
pExpr->pVal = pVal;
pVal->nType = tbufReadUint32(br);
if (pVal->nType == TSDB_DATA_TYPE_BINARY) {
tbufReadToBuffer(br, &pVal->nLen, sizeof(pVal->nLen));
pVal->pz = taosMemoryCalloc(1, pVal->nLen + 1);
tbufReadToBuffer(br, pVal->pz, pVal->nLen);
} else {
pVal->i = tbufReadInt64(br);
}
} else if (pExpr->nodeType == TEXPR_COL_NODE) {
SSchema* pSchema = exception_calloc(1, sizeof(SSchema));
pExpr->pSchema = pSchema;
pSchema->colId = tbufReadInt16(br);
pSchema->bytes = tbufReadInt16(br);
pSchema->type = tbufReadUint8(br);
tbufReadToString(br, pSchema->name, TSDB_COL_NAME_LEN);
} else if (pExpr->nodeType == TEXPR_BINARYEXPR_NODE) {
pExpr->_node.optr = tbufReadUint8(br);
pExpr->_node.pLeft = exprTreeFromBinaryImpl(br);
pExpr->_node.pRight = exprTreeFromBinaryImpl(br);
assert(pExpr->_node.pLeft != NULL && pExpr->_node.pRight != NULL);
}
CLEANUP_EXECUTE_TO(anchor, false);
return pExpr;
}
tExprNode* exprTreeFromBinary(const void* data, size_t size) {
if (size == 0) {
return NULL;
}
SBufferReader br = tbufInitReader(data, size, false);
return exprTreeFromBinaryImpl(&br);
}
void buildFilterSetFromBinary(void **q, const char *buf, int32_t len) {
SBufferReader br = tbufInitReader(buf, len, false);
uint32_t type = tbufReadUint32(&br);
@ -405,38 +326,3 @@ err_ret:
taosHashCleanup(pObj);
taosMemoryFreeClear(tmp);
}
tExprNode* exprdup(tExprNode* pNode) {
if (pNode == NULL) {
return NULL;
}
tExprNode* pCloned = taosMemoryCalloc(1, sizeof(tExprNode));
if (pNode->nodeType == TEXPR_BINARYEXPR_NODE) {
tExprNode* pLeft = exprdup(pNode->_node.pLeft);
tExprNode* pRight = exprdup(pNode->_node.pRight);
pCloned->_node.pLeft = pLeft;
pCloned->_node.pRight = pRight;
pCloned->_node.optr = pNode->_node.optr;
} else if (pNode->nodeType == TEXPR_VALUE_NODE) {
pCloned->pVal = taosMemoryCalloc(1, sizeof(SVariant));
taosVariantAssign(pCloned->pVal, pNode->pVal);
} else if (pNode->nodeType == TEXPR_COL_NODE) {
pCloned->pSchema = taosMemoryCalloc(1, sizeof(SSchema));
*pCloned->pSchema = *pNode->pSchema;
} else if (pNode->nodeType == TEXPR_FUNCTION_NODE) {
strcpy(pCloned->_function.functionName, pNode->_function.functionName);
int32_t num = pNode->_function.num;
pCloned->_function.num = num;
pCloned->_function.pChild = taosMemoryCalloc(num, POINTER_BYTES);
for(int32_t i = 0; i < num; ++i) {
pCloned->_function.pChild[i] = exprdup(pNode->_function.pChild[i]);
}
}
pCloned->nodeType = pNode->nodeType;
return pCloned;
}

View File

@ -695,6 +695,7 @@ int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlo
udfCol->colMeta.scale = col->info.scale;
udfCol->colMeta.precision = col->info.precision;
udfCol->colData.numOfRows = udfBlock->numOfRows;
udfCol->hasNull = col->hasNull;
if (IS_VAR_DATA_TYPE(udfCol->colMeta.type)) {
udfCol->colData.varLenCol.varOffsetsLen = sizeof(int32_t) * udfBlock->numOfRows;
udfCol->colData.varLenCol.varOffsets = taosMemoryMalloc(udfCol->colData.varLenCol.varOffsetsLen);
@ -731,6 +732,7 @@ int32_t convertUdfColumnToDataBlock(SUdfColumn *udfCol, SSDataBlock *block) {
col->info.bytes = meta->bytes;
col->info.scale = meta->scale;
col->info.type = meta->type;
col->hasNull = udfCol->hasNull;
SUdfColumnData *data = &udfCol->colData;
if (!IS_VAR_DATA_TYPE(meta->type)) {
@ -929,7 +931,7 @@ void udfcUvHandleError(SClientUvConn *conn) {
while (!QUEUE_EMPTY(&conn->taskQueue)) {
QUEUE* h = QUEUE_HEAD(&conn->taskQueue);
SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, connTaskQueue);
task->errCode = UDFC_CODE_PIPE_READ_ERR;
task->errCode = TSDB_CODE_UDF_PIPE_READ_ERR;
QUEUE_REMOVE(&task->connTaskQueue);
QUEUE_REMOVE(&task->procTaskQueue);
uv_sem_post(&task->taskSem);
@ -1117,7 +1119,7 @@ void cleanUpUvTasks(SUdfdProxy *udfc) {
QUEUE_REMOVE(h);
SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, recvTaskQueue);
if (udfc->gUdfcState == UDFC_STATE_STOPPING) {
task->errCode = UDFC_CODE_STOPPING;
task->errCode = TSDB_CODE_UDF_STOPPING;
}
uv_sem_post(&task->taskSem);
}
@ -1127,7 +1129,7 @@ void cleanUpUvTasks(SUdfdProxy *udfc) {
QUEUE_REMOVE(h);
SClientUvTaskNode *task = QUEUE_DATA(h, SClientUvTaskNode, procTaskQueue);
if (udfc->gUdfcState == UDFC_STATE_STOPPING) {
task->errCode = UDFC_CODE_STOPPING;
task->errCode = TSDB_CODE_UDF_STOPPING;
}
uv_sem_post(&task->taskSem);
}
@ -1211,7 +1213,7 @@ int32_t udfcRunUdfUvTask(SClientUdfTask *task, int8_t uvTaskType) {
int32_t setupUdf(char udfName[], UdfcFuncHandle *funcHandle) {
fnInfo("udfc setup udf. udfName: %s", udfName);
if (gUdfdProxy.gUdfcState != UDFC_STATE_READY) {
return UDFC_CODE_INVALID_STATE;
return TSDB_CODE_UDF_INVALID_STATE;
}
SClientUdfTask *task = taosMemoryCalloc(1,sizeof(SClientUdfTask));
task->errCode = 0;
@ -1225,7 +1227,7 @@ int32_t setupUdf(char udfName[], UdfcFuncHandle *funcHandle) {
int32_t errCode = udfcRunUdfUvTask(task, UV_TASK_CONNECT);
if (errCode != 0) {
fnError("failed to connect to pipe. udfName: %s, pipe: %s", udfName, (&gUdfdProxy)->udfdPipeName);
return UDFC_CODE_CONNECT_PIPE_ERR;
return TSDB_CODE_UDF_PIPE_CONNECT_ERR;
}
udfcRunUdfUvTask(task, UV_TASK_REQ_RSP);
@ -1252,7 +1254,7 @@ int32_t callUdf(UdfcFuncHandle handle, int8_t callType, SSDataBlock *input, SUdf
SClientUdfUvSession *session = (SClientUdfUvSession *) handle;
if (session->udfUvPipe == NULL) {
fnError("No pipe to udfd");
return UDFC_CODE_NO_PIPE;
return TSDB_CODE_UDF_PIPE_NO_PIPE;
}
SClientUdfTask *task = taosMemoryCalloc(1, sizeof(SClientUdfTask));
task->errCode = 0;
@ -1372,7 +1374,7 @@ int32_t teardownUdf(UdfcFuncHandle handle) {
SClientUdfUvSession *session = (SClientUdfUvSession *) handle;
if (session->udfUvPipe == NULL) {
fnError("pipe to udfd does not exist");
return UDFC_CODE_NO_PIPE;
return TSDB_CODE_UDF_PIPE_NO_PIPE;
}
SClientUdfTask *task = taosMemoryCalloc(1, sizeof(SClientUdfTask));
@ -1495,7 +1497,7 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) {
taosArrayDestroy(tempBlock.pDataBlock);
taosMemoryFree(newState.buf);
return TSDB_CODE_SUCCESS;
return udfCode;
}
int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) {

View File

@ -102,7 +102,7 @@ int32_t udfdLoadUdf(char *udfName, SUdf *udf) {
int err = uv_dlopen(udf->path, &udf->lib);
if (err != 0) {
fnError("can not load library %s. error: %s", udf->path, uv_strerror(err));
return UDFC_CODE_LOAD_UDF_FAILURE;
return TSDB_CODE_UDF_LOAD_UDF_FAILURE;
}
char initFuncName[TSDB_FUNC_NAME_LEN+5] = {0};
@ -140,20 +140,14 @@ int32_t udfdLoadUdf(char *udfName, SUdf *udf) {
return 0;
}
void udfdProcessRequest(uv_work_t *req) {
SUvUdfWork *uvUdf = (SUvUdfWork *)(req->data);
SUdfRequest request = {0};
decodeUdfRequest(uvUdf->input.base, &request);
switch (request.type) {
case UDF_TASK_SETUP: {
void udfdProcessSetupRequest(SUvUdfWork* uvUdf, SUdfRequest* request) {
// TODO: tracable id from client. connect, setup, call, teardown
fnInfo("%" PRId64 " setup request. udf name: %s", request.seqNum, request.setup.udfName);
SUdfSetupRequest *setup = &request.setup;
fnInfo("%" PRId64 " setup request. udf name: %s", request->seqNum, request->setup.udfName);
SUdfSetupRequest *setup = &request->setup;
int32_t code = TSDB_CODE_SUCCESS;
SUdf *udf = NULL;
uv_mutex_lock(&global.udfsMutex);
SUdf **udfInHash = taosHashGet(global.udfsHash, request.setup.udfName, strlen(request.setup.udfName));
SUdf **udfInHash = taosHashGet(global.udfsHash, request->setup.udfName, strlen(request->setup.udfName));
if (udfInHash) {
++(*udfInHash)->refCount;
udf = *udfInHash;
@ -166,14 +160,14 @@ void udfdProcessRequest(uv_work_t *req) {
uv_mutex_init(&udfNew->lock);
uv_cond_init(&udfNew->condReady);
udf = udfNew;
taosHashPut(global.udfsHash, request.setup.udfName, strlen(request.setup.udfName), &udfNew, sizeof(&udfNew));
taosHashPut(global.udfsHash, request->setup.udfName, strlen(request->setup.udfName), &udfNew, sizeof(&udfNew));
uv_mutex_unlock(&global.udfsMutex);
}
uv_mutex_lock(&udf->lock);
if (udf->state == UDF_STATE_INIT) {
udf->state = UDF_STATE_LOADING;
udfdLoadUdf(setup->udfName, udf);
code = udfdLoadUdf(setup->udfName, udf);
if (udf->initFunc) {
udf->initFunc();
}
@ -188,14 +182,16 @@ void udfdProcessRequest(uv_work_t *req) {
}
SUdfcFuncHandle *handle = taosMemoryMalloc(sizeof(SUdfcFuncHandle));
handle->udf = udf;
SUdfResponse rsp;
rsp.seqNum = request.seqNum;
rsp.type = request.type;
rsp.code = 0;
rsp.seqNum = request->seqNum;
rsp.type = request->type;
rsp.code = code;
rsp.setupRsp.udfHandle = (int64_t)(handle);
rsp.setupRsp.outputType = udf->outputType;
rsp.setupRsp.outputLen = udf->outputLen;
rsp.setupRsp.bufSize = udf->bufSize;
int32_t len = encodeUdfResponse(NULL, &rsp);
rsp.msgLen = len;
void *bufBegin = taosMemoryMalloc(len);
@ -205,12 +201,12 @@ void udfdProcessRequest(uv_work_t *req) {
uvUdf->output = uv_buf_init(bufBegin, len);
taosMemoryFree(uvUdf->input.base);
break;
return;
}
case UDF_TASK_CALL: {
SUdfCallRequest *call = &request.call;
fnDebug("%" PRId64 "call request. call type %d, handle: %" PRIx64, request.seqNum, call->callType,
void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
SUdfCallRequest *call = &request->call;
fnDebug("%" PRId64 "call request. call type %d, handle: %" PRIx64, request->seqNum, call->callType,
call->udfHandle);
SUdfcFuncHandle *handle = (SUdfcFuncHandle *)(call->udfHandle);
SUdf *udf = handle->udf;
@ -218,13 +214,14 @@ void udfdProcessRequest(uv_work_t *req) {
SUdfResponse *rsp = &response;
SUdfCallResponse *subRsp = &rsp->callRsp;
int32_t code = TSDB_CODE_SUCCESS;
switch(call->callType) {
case TSDB_UDF_CALL_SCALA_PROC: {
SUdfColumn output = {0};
SUdfDataBlock input = {0};
convertDataBlockToUdfDataBlock(&call->block, &input);
udf->scalarProcFunc(&input, &output);
code = udf->scalarProcFunc(&input, &output);
convertUdfColumnToDataBlock(&output, &response.callRsp.resultData);
freeUdfColumn(&output);
@ -244,7 +241,7 @@ void udfdProcessRequest(uv_work_t *req) {
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize),
.bufLen= udf->bufSize,
.numOfResult = 0};
udf->aggProcFunc(&input, &call->interBuf, &outBuf);
code = udf->aggProcFunc(&input, &call->interBuf, &outBuf);
subRsp->resultBuf = outBuf;
break;
@ -253,7 +250,7 @@ void udfdProcessRequest(uv_work_t *req) {
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize),
.bufLen= udf->bufSize,
.numOfResult = 0};
udf->aggFinishFunc(&call->interBuf, &outBuf);
code = udf->aggFinishFunc(&call->interBuf, &outBuf);
subRsp->resultBuf = outBuf;
break;
}
@ -261,9 +258,9 @@ void udfdProcessRequest(uv_work_t *req) {
break;
}
rsp->seqNum = request.seqNum;
rsp->type = request.type;
rsp->code = 0;
rsp->seqNum = request->seqNum;
rsp->type = request->type;
rsp->code = code;
subRsp->callType = call->callType;
int32_t len = encodeUdfResponse(NULL, rsp);
@ -274,14 +271,17 @@ void udfdProcessRequest(uv_work_t *req) {
uvUdf->output = uv_buf_init(bufBegin, len);
taosMemoryFree(uvUdf->input.base);
break;
return;
}
case UDF_TASK_TEARDOWN: {
SUdfTeardownRequest *teardown = &request.teardown;
fnInfo("teardown. %" PRId64 "handle:%" PRIx64, request.seqNum, teardown->udfHandle) SUdfcFuncHandle *handle =
(SUdfcFuncHandle *)(teardown->udfHandle);
void udfdProcessTeardownRequest(SUvUdfWork* uvUdf, SUdfRequest* request) {
SUdfTeardownRequest *teardown = &request->teardown;
fnInfo("teardown. %" PRId64 "handle:%" PRIx64, request->seqNum, teardown->udfHandle);
SUdfcFuncHandle *handle = (SUdfcFuncHandle *)(teardown->udfHandle);
SUdf *udf = handle->udf;
bool unloadUdf = false;
int32_t code = TSDB_CODE_SUCCESS;
uv_mutex_lock(&global.udfsMutex);
udf->refCount--;
if (udf->refCount == 0) {
@ -302,9 +302,9 @@ void udfdProcessRequest(uv_work_t *req) {
SUdfResponse response;
SUdfResponse *rsp = &response;
rsp->seqNum = request.seqNum;
rsp->type = request.type;
rsp->code = 0;
rsp->seqNum = request->seqNum;
rsp->type = request->type;
rsp->code = code;
int32_t len = encodeUdfResponse(NULL, rsp);
rsp->msgLen = len;
void *bufBegin = taosMemoryMalloc(len);
@ -313,6 +313,26 @@ void udfdProcessRequest(uv_work_t *req) {
uvUdf->output = uv_buf_init(bufBegin, len);
taosMemoryFree(uvUdf->input.base);
return;
}
void udfdProcessRequest(uv_work_t *req) {
SUvUdfWork *uvUdf = (SUvUdfWork *)(req->data);
SUdfRequest request = {0};
decodeUdfRequest(uvUdf->input.base, &request);
switch (request.type) {
case UDF_TASK_SETUP: {
udfdProcessSetupRequest(uvUdf, &request);
break;
}
case UDF_TASK_CALL: {
udfdProcessCallRequest(uvUdf, &request);
break;
}
case UDF_TASK_TEARDOWN: {
udfdProcessTeardownRequest(uvUdf, &request);
break;
}
default: {

View File

@ -19,31 +19,50 @@ int32_t udf2_destroy() {
int32_t udf2_start(SUdfInterBuf *buf) {
*(int64_t*)(buf->buf) = 0;
buf->bufLen = sizeof(int64_t);
buf->bufLen = sizeof(double);
buf->numOfResult = 0;
return 0;
}
int32_t udf2(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) {
int64_t sumSquares = *(int64_t*)interBuf->buf;
double sumSquares = *(double*)interBuf->buf;
int8_t numOutput = 0;
for (int32_t i = 0; i < block->numOfCols; ++i) {
SUdfColumn* col = block->udfCols[i];
if (!(col->colMeta.type == TSDB_DATA_TYPE_INT ||
col->colMeta.type == TSDB_DATA_TYPE_DOUBLE)) {
return TSDB_CODE_UDF_INVALID_INPUT;
}
}
for (int32_t i = 0; i < block->numOfCols; ++i) {
for (int32_t j = 0; j < block->numOfRows; ++j) {
SUdfColumn* col = block->udfCols[i];
if (udfColDataIsNull(col, j)) {
continue;
}
switch (col->colMeta.type) {
case TSDB_DATA_TYPE_INT: {
char* cell = udfColDataGetData(col, j);
int32_t num = *(int32_t*)cell;
sumSquares += num * num;
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
char* cell = udfColDataGetData(col, j);
double num = *(double*)cell;
sumSquares += num * num;
break;
}
default:
break;
}
numOutput = 1;
}
}
if (numOutput == 1) {
*(int64_t*)(newInterBuf->buf) = sumSquares;
newInterBuf->bufLen = sizeof(int64_t);
*(double*)(newInterBuf->buf) = sumSquares;
newInterBuf->bufLen = sizeof(double);
}
newInterBuf->numOfResult = numOutput;
return 0;
@ -54,7 +73,7 @@ int32_t udf2_finish(SUdfInterBuf* buf, SUdfInterBuf *resultData) {
resultData->numOfResult = 0;
return 0;
}
int64_t sumSquares = *(int64_t*)(buf->buf);
double sumSquares = *(double*)(buf->buf);
*(double*)(resultData->buf) = sqrt(sumSquares);
resultData->bufLen = sizeof(double);
resultData->numOfResult = 1;

View File

@ -1142,9 +1142,9 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
return code;
}
static int32_t physiStreamScanNodeToJson(const void* pObj, SJson* pJson) { return physiScanNodeToJson(pObj, pJson); }
static int32_t physiStreamScanNodeToJson(const void* pObj, SJson* pJson) { return physiTableScanNodeToJson(pObj, pJson); }
static int32_t jsonToPhysiStreamScanNode(const SJson* pJson, void* pObj) { return jsonToPhysiScanNode(pJson, pObj); }
static int32_t jsonToPhysiStreamScanNode(const SJson* pJson, void* pObj) { return jsonToPhysiTableScanNode(pJson, pObj); }
static const char* jkSysTableScanPhysiPlanMnodeEpSet = "MnodeEpSet";
static const char* jkSysTableScanPhysiPlanShowRewrite = "ShowRewrite";

View File

@ -32,6 +32,7 @@ typedef struct SAstCreateContext {
bool notSupport;
SNode* pRootNode;
int16_t placeholderNo;
SArray* pPlaceholderValues;
int32_t errCode;
} SAstCreateContext;

View File

@ -46,6 +46,7 @@ SSchema* getTableTagSchema(const STableMeta* pTableMeta);
int32_t getNumOfColumns(const STableMeta* pTableMeta);
int32_t getNumOfTags(const STableMeta* pTableMeta);
STableComInfo getTableInfo(const STableMeta* pTableMeta);
STableMeta* tableMetaDup(const STableMeta* pTableMeta);
int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* errMsg, int16_t startColId);
int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen);

View File

@ -44,6 +44,7 @@ void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) {
pCxt->notSupport = false;
pCxt->pRootNode = NULL;
pCxt->placeholderNo = 0;
pCxt->pPlaceholderValues = NULL;
pCxt->errCode = TSDB_CODE_SUCCESS;
}
@ -78,7 +79,7 @@ static bool checkUserName(SAstCreateContext* pCxt, SToken* pUserName) {
static bool checkPassword(SAstCreateContext* pCxt, const SToken* pPasswordToken, char* pPassword) {
if (NULL == pPasswordToken) {
pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR;
} else if (pPasswordToken->n >= (TSDB_USET_PASSWORD_LEN - 2)) {
} else if (pPasswordToken->n >= (TSDB_USET_PASSWORD_LEN + 2)) {
pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NAME_OR_PASSWD_TOO_LONG);
} else {
strncpy(pPassword, pPasswordToken->z, pPasswordToken->n);
@ -299,6 +300,14 @@ SNode* createPlaceholderValueNode(SAstCreateContext* pCxt, const SToken* pLitera
val->literal = strndup(pLiteral->z, pLiteral->n);
CHECK_OUT_OF_MEM(val->literal);
val->placeholderNo = ++pCxt->placeholderNo;
if (NULL == pCxt->pPlaceholderValues) {
pCxt->pPlaceholderValues = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES);
if (NULL == pCxt->pPlaceholderValues) {
nodesDestroyNode(val);
return NULL;
}
}
taosArrayPush(pCxt->pPlaceholderValues, &val);
return (SNode*)val;
}

View File

@ -81,6 +81,8 @@ abort_parse:
}
(*pQuery)->pRoot = cxt.pRootNode;
(*pQuery)->placeholderNum = cxt.placeholderNo;
TSWAP((*pQuery)->pPlaceholderValues, cxt.pPlaceholderValues);
}
taosArrayDestroy(cxt.pPlaceholderValues);
return cxt.errCode;
}

View File

@ -53,6 +53,7 @@ typedef struct SInsertParseContext {
SHashObj* pTableBlockHashObj; // global
SHashObj* pSubTableHashObj; // global
SArray* pVgDataBlocks; // global
SHashObj* pTableNameHashObj; // global
int32_t totalNum;
SVnodeModifOpStmt* pOutput;
SStmtCallback* pStmtCb;
@ -252,6 +253,7 @@ static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char *db
} else {
CHECK_CODE(catalogGetTableMeta(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name,
&pCxt->pTableMeta));
ASSERT(pCxt->pTableMeta->tableInfo.rowSize > 0);
SVgroupInfo vg;
CHECK_CODE(
catalogGetTableHashVgroup(pBasicCtx->pCatalog, pBasicCtx->pTransporter, &pBasicCtx->mgmtEpSet, name, &vg));
@ -260,9 +262,13 @@ static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char *db
return TSDB_CODE_SUCCESS;
}
static int32_t getTableMeta(SInsertParseContext* pCxt, SName* name, char *dbFname) { return getTableMetaImpl(pCxt, name, dbFname, false); }
static int32_t getTableMeta(SInsertParseContext* pCxt, SName* name, char* dbFname) {
return getTableMetaImpl(pCxt, name, dbFname, false);
}
static int32_t getSTableMeta(SInsertParseContext* pCxt, SName* name, char *dbFname) { return getTableMetaImpl(pCxt, name, dbFname, true); }
static int32_t getSTableMeta(SInsertParseContext* pCxt, SName* name, char* dbFname) {
return getTableMetaImpl(pCxt, name, dbFname, true);
}
static int32_t findCol(SToken* pColname, int32_t start, int32_t end, SSchema* pSchema) {
while (start < end) {
@ -1065,6 +1071,7 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
int32_t tbNum = 0;
char tbFName[TSDB_TABLE_FNAME_LEN];
bool autoCreateTbl = false;
STableMeta *pMeta = NULL;
// for each table
while (1) {
@ -1106,6 +1113,8 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
createSName(&name, &tbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg);
tNameExtractFullName(&name, tbFName);
CHECK_CODE(taosHashPut(pCxt->pTableNameHashObj, tbFName, strlen(tbFName), &name, sizeof(SName)));
// USING cluase
if (TK_USING == sToken.type) {
CHECK_CODE(parseUsingClause(pCxt, &name, tbFName));
@ -1119,10 +1128,12 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, tbFName, strlen(tbFName), TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta,
&dataBuf, NULL, &pCxt->createTblReq));
pMeta = pCxt->pTableMeta;
pCxt->pTableMeta = NULL;
if (TK_NK_LP == sToken.type) {
// pSql -> field1_name, ...)
CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pCxt->pTableMeta)));
CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pMeta)));
NEXT_TOKEN(pCxt->pSql, sToken);
}
@ -1158,7 +1169,7 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
memcpy(tags, &pCxt->tags, sizeof(pCxt->tags));
(*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pCxt->pTableMeta, tags, tbFName, autoCreateTbl, pCxt->pVgroupsHashObj, pCxt->pTableBlockHashObj);
(*pCxt->pStmtCb->setInfoFn)(pCxt->pStmtCb->pStmt, pMeta, tags, tbFName, autoCreateTbl, pCxt->pVgroupsHashObj, pCxt->pTableBlockHashObj);
memset(&pCxt->tags, 0, sizeof(pCxt->tags));
pCxt->pVgroupsHashObj = NULL;
@ -1187,7 +1198,8 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
.pSql = (char*)pContext->pSql,
.msg = {.buf = pContext->pMsg, .len = pContext->msgLen},
.pTableMeta = NULL,
.pSubTableHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), true, false),
.pSubTableHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), true, HASH_NO_LOCK),
.pTableNameHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), true, HASH_NO_LOCK),
.totalNum = 0,
.pOutput = (SVnodeModifOpStmt*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT),
.pStmtCb = pContext->pStmtCb};
@ -1196,12 +1208,13 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
(*pContext->pStmtCb->getExecInfoFn)(pContext->pStmtCb->pStmt, &context.pVgroupsHashObj,
&context.pTableBlockHashObj);
} else {
context.pVgroupsHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, false);
context.pTableBlockHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
context.pVgroupsHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
context.pTableBlockHashObj =
taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
}
if (NULL == context.pVgroupsHashObj || NULL == context.pTableBlockHashObj || NULL == context.pSubTableHashObj ||
NULL == context.pOutput) {
NULL == context.pTableNameHashObj || NULL == context.pOutput) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@ -1214,6 +1227,10 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
if (NULL == *pQuery) {
return TSDB_CODE_OUT_OF_MEMORY;
}
(*pQuery)->pTableList = taosArrayInit(taosHashGetSize(context.pTableNameHashObj), sizeof(SName));
if (NULL == (*pQuery)->pTableList) {
return TSDB_CODE_OUT_OF_MEMORY;
}
(*pQuery)->execMode = QUERY_EXEC_MODE_SCHEDULE;
(*pQuery)->haveResultSet = false;
(*pQuery)->msgType = TDMT_VND_SUBMIT;
@ -1226,6 +1243,13 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery) {
if (TSDB_CODE_SUCCESS == code) {
code = parseInsertBody(&context);
}
if (TSDB_CODE_SUCCESS == code) {
SName* pTable = taosHashIterate(context.pTableNameHashObj, NULL);
while (NULL != pTable) {
taosArrayPush((*pQuery)->pTableList, pTable);
pTable = taosHashIterate(context.pTableNameHashObj, pTable);
}
}
destroyInsertParseContext(&context);
return code;
}
@ -1479,7 +1503,6 @@ int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBu
taosMemoryFree(pSTSchema);
}
#endif
}
if (rowEnd) {
@ -1656,7 +1679,7 @@ static int32_t smlBuildTagRow(SArray* cols, SKVRowBuilder* tagsBuilder, SParsedD
return TSDB_CODE_SUCCESS;
}
int32_t smlBindData(void* handle, SArray* tags, SArray* colsFormat, SArray* colsSchema, SArray* cols, bool format,
int32_t smlBindData(void *handle, SArray *tags, SArray *colsSchema, SArray *cols, bool format,
STableMeta *pTableMeta, char *tableName, char *msgBuf, int16_t msgBufLen) {
SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen};
@ -1677,9 +1700,9 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsFormat, SArray* cols
buildCreateTbReq(&smlHandle->createTblReq, tableName, row, pTableMeta->suid);
STableDataBlocks* pDataBlock = NULL;
ret = getDataBlockFromList(smlHandle->pBlockHash, &pTableMeta->uid, sizeof(pTableMeta->uid), TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), getTableInfo(pTableMeta).rowSize, pTableMeta,
&pDataBlock, NULL, &smlHandle->createTblReq);
ret = getDataBlockFromList(smlHandle->pBlockHash, &pTableMeta->uid, sizeof(pTableMeta->uid),
TSDB_DEFAULT_PAYLOAD_SIZE, sizeof(SSubmitBlk), getTableInfo(pTableMeta).rowSize,
pTableMeta, &pDataBlock, NULL, &smlHandle->createTblReq);
if (ret != TSDB_CODE_SUCCESS) {
buildInvalidOperationMsg(&pBuf, "create data block error");
return ret;
@ -1699,7 +1722,7 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsFormat, SArray* cols
initRowBuilder(&pDataBlock->rowBuilder, pDataBlock->pTableMeta->sversion, &pDataBlock->boundColumnInfo);
int32_t rowNum = format ? taosArrayGetSize(colsFormat) : taosArrayGetSize(cols);
int32_t rowNum = taosArrayGetSize(cols);
if(rowNum <= 0) {
return buildInvalidOperationMsg(&pBuf, "cols size <= 0");
}
@ -1711,13 +1734,10 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsFormat, SArray* cols
for (int32_t r = 0; r < rowNum; ++r) {
STSRow* row = (STSRow*)(pDataBlock->pData + pDataBlock->size); // skip the SSubmitBlk header
tdSRowResetBuf(pBuilder, row);
void* rowData = NULL;
void *rowData = taosArrayGetP(cols, r);
size_t rowDataSize = 0;
if(format){
rowData = taosArrayGetP(colsFormat, r);
rowDataSize = taosArrayGetSize(rowData);
} else {
rowData = taosArrayGetP(cols, r);
}
// 1. set the parsed value from sql string

View File

@ -137,7 +137,7 @@ static int32_t createDataBlock(size_t defaultSize, int32_t rowSize, int32_t star
}
memset(dataBuf->pData, 0, sizeof(SSubmitBlk));
dataBuf->pTableMeta = pTableMeta;
dataBuf->pTableMeta = tableMetaDup(pTableMeta);
SParsedDataColInfo* pColInfo = &dataBuf->boundColumnInfo;
SSchema* pSchema = getTableColumnSchema(dataBuf->pTableMeta);
@ -465,7 +465,7 @@ int32_t mergeTableDataBlocks(SHashObj* pHashObj, uint8_t payloadType, SArray** p
taosMemoryFreeClear(blkKeyInfo.pKeyTuple);
return ret;
}
ASSERT(pOneTableBlock->pTableMeta->tableInfo.rowSize > 0);
// the maximum expanded size in byte when a row-wise data is converted to SDataRow format
int32_t expandSize = isRawPayload ? getRowExpandSize(pOneTableBlock->pTableMeta) : 0;
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize +

View File

@ -368,11 +368,15 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p
return TSDB_CODE_SUCCESS;
}
static bool isInternalPrimaryKey(const SColumnNode* pCol) {
return PRIMARYKEY_TIMESTAMP_COL_ID == pCol->colId && 0 == strcmp(pCol->colName, PK_TS_COL_INTERNAL_NAME);
}
static bool findAndSetColumn(SColumnNode* pCol, const STableNode* pTable) {
bool found = false;
if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) {
const STableMeta* pMeta = ((SRealTableNode*)pTable)->pMeta;
if (PRIMARYKEY_TIMESTAMP_COL_ID == pCol->colId && 0 == strcmp(pCol->colName, PK_TS_COL_INTERNAL_NAME)) {
if (isInternalPrimaryKey(pCol)) {
setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema, false, pCol);
return true;
}
@ -389,7 +393,9 @@ static bool findAndSetColumn(SColumnNode* pCol, const STableNode* pTable) {
SNode* pNode;
FOREACH(pNode, pProjectList) {
SExprNode* pExpr = (SExprNode*)pNode;
if (0 == strcmp(pCol->colName, pExpr->aliasName)) {
if (0 == strcmp(pCol->colName, pExpr->aliasName) ||
((QUERY_NODE_COLUMN == nodeType(pExpr) && PRIMARYKEY_TIMESTAMP_COL_ID == ((SColumnNode*)pExpr)->colId) &&
isInternalPrimaryKey(pCol))) {
setColumnInfoByExpr(pTable, pExpr, pCol);
found = true;
break;
@ -433,8 +439,12 @@ static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNod
}
}
if (!found) {
if (isInternalPrimaryKey(pCol)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_INTERNAL_PK);
} else {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, pCol->colName);
}
}
return DEAL_RES_CONTINUE;
}
@ -3655,6 +3665,9 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt*
destroyCreateTbReq(&req);
return TSDB_CODE_OUT_OF_MEMORY;
}
if (pStmt->ignoreExists) {
req.flags |= TD_CREATE_IF_NOT_EXISTS;
}
SNode* pCol;
col_id_t index = 0;
FOREACH(pCol, pStmt->pCols) {
@ -3785,24 +3798,27 @@ static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) {
return code;
}
static void addCreateTbReqIntoVgroup(int32_t acctId, SHashObj* pVgroupHashmap, const char* pDbName,
const char* pTableName, SKVRow row, uint64_t suid, SVgroupInfo* pVgInfo) {
static void addCreateTbReqIntoVgroup(int32_t acctId, SHashObj* pVgroupHashmap, SCreateSubTableClause* pStmt, SKVRow row,
uint64_t suid, SVgroupInfo* pVgInfo) {
char dbFName[TSDB_DB_FNAME_LEN] = {0};
SName name = {.type = TSDB_DB_NAME_T, .acctId = acctId};
strcpy(name.dbname, pDbName);
strcpy(name.dbname, pStmt->dbName);
tNameGetFullDbName(&name, dbFName);
struct SVCreateTbReq req = {0};
req.type = TD_CHILD_TABLE;
req.name = strdup(pTableName);
req.name = strdup(pStmt->tableName);
req.ctb.suid = suid;
req.ctb.pTag = row;
if (pStmt->ignoreExists) {
req.flags |= TD_CREATE_IF_NOT_EXISTS;
}
SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pVgInfo->vgId, sizeof(pVgInfo->vgId));
if (pTableBatch == NULL) {
SVgroupCreateTableBatch tBatch = {0};
tBatch.info = *pVgInfo;
strcpy(tBatch.dbName, pDbName);
strcpy(tBatch.dbName, pStmt->dbName);
tBatch.req.pArray = taosArrayInit(4, sizeof(struct SVCreateTbReq));
taosArrayPush(tBatch.req.pArray, &req);
@ -3964,8 +3980,7 @@ static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableCla
code = getTableHashVgroup(pCxt, pStmt->dbName, pStmt->tableName, &info);
}
if (TSDB_CODE_SUCCESS == code) {
addCreateTbReqIntoVgroup(pCxt->pParseCxt->acctId, pVgroupHashmap, pStmt->dbName, pStmt->tableName, row,
pSuperTableMeta->uid, &info);
addCreateTbReqIntoVgroup(pCxt->pParseCxt->acctId, pVgroupHashmap, pStmt, row, pSuperTableMeta->uid, &info);
}
taosMemoryFreeClear(pSuperTableMeta);

View File

@ -146,6 +146,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "Invalid binary/nchar column length";
case TSDB_CODE_PAR_INVALID_TAGS_NUM:
return "Invalid number of tag columns";
case TSDB_CODE_PAR_INVALID_INTERNAL_PK:
return "Invalid _c0 or _rowts expression";
case TSDB_CODE_OUT_OF_MEMORY:
return "Out of memory";
default:
@ -226,6 +228,23 @@ STableComInfo getTableInfo(const STableMeta* pTableMeta) {
return pTableMeta->tableInfo;
}
static uint32_t getTableMetaSize(const STableMeta* pTableMeta) {
int32_t totalCols = 0;
if (pTableMeta->tableInfo.numOfColumns >= 0) {
totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags;
}
return sizeof(STableMeta) + totalCols * sizeof(SSchema);
}
STableMeta* tableMetaDup(const STableMeta* pTableMeta) {
size_t size = getTableMetaSize(pTableMeta);
STableMeta* p = taosMemoryMalloc(size);
memcpy(p, pTableMeta, size);
return p;
}
int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen) {
if (len <= 0 || dlen <= 0) return 0;

View File

@ -39,16 +39,99 @@ static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) {
if (TSDB_CODE_SUCCESS == code) {
code = authenticate(pCxt, *pQuery);
}
if (TSDB_CODE_SUCCESS == code) {
if (TSDB_CODE_SUCCESS == code && 0 == (*pQuery)->placeholderNum) {
code = translate(pCxt, *pQuery);
}
if (TSDB_CODE_SUCCESS == code) {
if (TSDB_CODE_SUCCESS == code && 0 == (*pQuery)->placeholderNum) {
code = calculateConstant(pCxt, *pQuery);
}
return code;
}
int32_t qParseQuerySql(SParseContext* pCxt, SQuery** pQuery) {
static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
if (pParam->is_null && 1 == *(pParam->is_null)) {
pVal->node.resType.type = TSDB_DATA_TYPE_NULL;
pVal->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes;
return TSDB_CODE_SUCCESS;
}
int32_t inputSize = (NULL != pParam->length ? *(pParam->length) : tDataTypes[pParam->buffer_type].bytes);
pVal->node.resType.type = pParam->buffer_type;
pVal->node.resType.bytes = inputSize;
switch (pParam->buffer_type) {
case TSDB_DATA_TYPE_BOOL:
pVal->datum.b = *((bool*)pParam->buffer);
break;
case TSDB_DATA_TYPE_TINYINT:
pVal->datum.i = *((int8_t*)pParam->buffer);
break;
case TSDB_DATA_TYPE_SMALLINT:
pVal->datum.i = *((int16_t*)pParam->buffer);
break;
case TSDB_DATA_TYPE_INT:
pVal->datum.i = *((int32_t*)pParam->buffer);
break;
case TSDB_DATA_TYPE_BIGINT:
pVal->datum.i = *((int64_t*)pParam->buffer);
break;
case TSDB_DATA_TYPE_FLOAT:
pVal->datum.d = *((float*)pParam->buffer);
break;
case TSDB_DATA_TYPE_DOUBLE:
pVal->datum.d = *((double*)pParam->buffer);
break;
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_VARBINARY:
pVal->datum.p = taosMemoryCalloc(1, pVal->node.resType.bytes + VARSTR_HEADER_SIZE + 1);
if (NULL == pVal->datum.p) {
return TSDB_CODE_OUT_OF_MEMORY;
}
varDataSetLen(pVal->datum.p, pVal->node.resType.bytes);
strncpy(varDataVal(pVal->datum.p), (const char*)pParam->buffer, pVal->node.resType.bytes);
break;
case TSDB_DATA_TYPE_NCHAR: {
pVal->node.resType.bytes *= TSDB_NCHAR_SIZE;
pVal->datum.p = taosMemoryCalloc(1, pVal->node.resType.bytes + VARSTR_HEADER_SIZE + 1);
if (NULL == pVal->datum.p) {
return TSDB_CODE_OUT_OF_MEMORY;
}
int32_t output = 0;
if (!taosMbsToUcs4(pParam->buffer, inputSize, (TdUcs4*)varDataVal(pVal->datum.p), pVal->node.resType.bytes,
&output)) {
return errno;
}
varDataSetLen(pVal->datum.p, output);
pVal->node.resType.bytes = output;
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
pVal->datum.i = *((int64_t*)pParam->buffer);
break;
case TSDB_DATA_TYPE_UTINYINT:
pVal->datum.u = *((uint8_t*)pParam->buffer);
break;
case TSDB_DATA_TYPE_USMALLINT:
pVal->datum.u = *((uint16_t*)pParam->buffer);
break;
case TSDB_DATA_TYPE_UINT:
pVal->datum.u = *((uint32_t*)pParam->buffer);
break;
case TSDB_DATA_TYPE_UBIGINT:
pVal->datum.u = *((uint64_t*)pParam->buffer);
break;
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_DECIMAL:
case TSDB_DATA_TYPE_BLOB:
case TSDB_DATA_TYPE_MEDIUMBLOB:
// todo
default:
break;
}
pVal->translate = true;
return TSDB_CODE_SUCCESS;
}
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery) {
int32_t code = TSDB_CODE_SUCCESS;
if (isInsertSql(pCxt->pSql, pCxt->sqlLen)) {
code = parseInsertSql(pCxt, pQuery);
@ -77,3 +160,29 @@ void qDestroyQuery(SQuery* pQueryNode) {
int32_t qExtractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema) {
return extractResultSchema(pRoot, numOfCols, pSchema);
}
int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx, uint64_t queryId) {
int32_t code = TSDB_CODE_SUCCESS;
if (colIdx < 0) {
int32_t size = taosArrayGetSize(pQuery->pPlaceholderValues);
for (int32_t i = 0; i < size; ++i) {
code = setValueByBindParam((SValueNode*)taosArrayGetP(pQuery->pPlaceholderValues, i), pParams + i);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
}
} else {
code = setValueByBindParam((SValueNode*)taosArrayGetP(pQuery->pPlaceholderValues, colIdx), pParams);
}
return code;
}
int32_t qStmtParseQuerySql(SParseContext* pCxt, SQuery* pQuery) {
int32_t code = translate(pCxt, pQuery);
if (TSDB_CODE_SUCCESS == code) {
code = calculateConstant(pCxt, pQuery);
}
return code;
}

View File

@ -151,20 +151,32 @@ static bool needOptimizeDynamicScan(const SFunctionNode* pFunc) {
static int32_t osdGetRelatedFuncs(SScanLogicNode* pScan, SNodeList** pSdrFuncs, SNodeList** pDsoFuncs) {
SNodeList* pAllFuncs = osdGetAllFuncs(pScan->node.pParent);
SNodeList* pTmpSdrFuncs = NULL;
SNodeList* pTmpDsoFuncs = NULL;
SNode* pFunc = NULL;
bool otherFunc = false;
FOREACH(pFunc, pAllFuncs) {
int32_t code = TSDB_CODE_SUCCESS;
if (needOptimizeDataRequire((SFunctionNode*)pFunc)) {
code = nodesListMakeStrictAppend(pSdrFuncs, nodesCloneNode(pFunc));
code = nodesListMakeStrictAppend(&pTmpSdrFuncs, nodesCloneNode(pFunc));
} else if (needOptimizeDynamicScan((SFunctionNode*)pFunc)) {
code = nodesListMakeStrictAppend(pDsoFuncs, nodesCloneNode(pFunc));
code = nodesListMakeStrictAppend(&pTmpDsoFuncs, nodesCloneNode(pFunc));
} else {
otherFunc = true;
}
if (TSDB_CODE_SUCCESS != code) {
nodesDestroyList(*pSdrFuncs);
nodesDestroyList(*pDsoFuncs);
nodesDestroyList(pTmpSdrFuncs);
nodesDestroyList(pTmpDsoFuncs);
return code;
}
}
if (otherFunc) {
nodesDestroyList(pTmpSdrFuncs);
nodesDestroyList(pTmpDsoFuncs);
} else {
*pSdrFuncs = pTmpSdrFuncs;
*pDsoFuncs = pTmpDsoFuncs;
}
return TSDB_CODE_SUCCESS;
}

View File

@ -460,9 +460,13 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp
memcpy(pTableScan->scanSeq, pScanLogicNode->scanSeq, sizeof(pScanLogicNode->scanSeq));
pTableScan->scanRange = pScanLogicNode->scanRange;
pTableScan->ratio = pScanLogicNode->ratio;
if (pScanLogicNode->pVgroupList) {
vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode);
taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode);
pSubplan->execNodeStat.tableNum = pScanLogicNode->pVgroupList->vgroups[0].numOfTable;
}
if (pCxt->pExecNodeList) {
taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode);
}
tNameGetFullDbName(&pScanLogicNode->tableName, pSubplan->dbFName);
pTableScan->dataRequired = pScanLogicNode->dataRequired;
pTableScan->pDynamicScanFuncs = nodesCloneList(pScanLogicNode->pDynamicScanFuncs);
@ -505,13 +509,12 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan*
static int32_t createStreamScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode,
SPhysiNode** pPhyNode) {
SStreamScanPhysiNode* pScan =
(SStreamScanPhysiNode*)makePhysiNode(pCxt, pScanLogicNode->pMeta->tableInfo.precision,
(SLogicNode*)pScanLogicNode, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN);
if (NULL == pScan) {
return TSDB_CODE_OUT_OF_MEMORY;
int32_t res = createTableScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode);
if (res == TSDB_CODE_SUCCESS) {
ENodeType type = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
setNodeType(*pPhyNode, type);
}
return createScanPhysiNodeFinalize(pCxt, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode);
return res;
}
static int32_t createScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode,
@ -786,7 +789,7 @@ static int32_t doCreateExchangePhysiNode(SPhysiPlanContext* pCxt, SExchangeLogic
}
static int32_t createStreamScanPhysiNodeByExchange(SPhysiPlanContext* pCxt, SExchangeLogicNode* pExchangeLogicNode,
SPhysiNode** pPhyNode) {
SStreamScanPhysiNode* pScan = (SStreamScanPhysiNode*)makePhysiNode(
SScanPhysiNode* pScan = (SScanPhysiNode*)makePhysiNode(
pCxt, pExchangeLogicNode->precision, (SLogicNode*)pExchangeLogicNode, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN);
if (NULL == pScan) {
return TSDB_CODE_OUT_OF_MEMORY;

View File

@ -18,7 +18,7 @@
static char* getUsageErrFormat(int32_t errCode) {
switch (errCode) {
case TSDB_CODE_PLAN_EXPECTED_TS_EQUAL:
return "l.ts = r.ts is expected in join expression";
return "left.ts = right.ts is expected in join expression";
case TSDB_CODE_PLAN_NOT_SUPPORT_CROSS_JOIN:
return "not support cross join";
default:

View File

@ -224,7 +224,7 @@ static int32_t calcConstList(SNodeList* pList) {
}
static bool isEmptyResultCond(SNode** pCond) {
if (QUERY_NODE_VALUE != nodeType(*pCond)) {
if (NULL == *pCond || QUERY_NODE_VALUE != nodeType(*pCond)) {
return false;
}
if (((SValueNode*)*pCond)->datum.b) {

View File

@ -28,6 +28,8 @@ TEST_F(PlanOptimizeTest, optimizeScanData) {
run("SELECT COUNT(c1) FROM t1");
run("SELECT COUNT(CAST(c1 AS BIGINT)) FROM t1");
run("SELECT PERCENTILE(c1, 40), COUNT(*) FROM t1");
}
TEST_F(PlanOptimizeTest, orderByPrimaryKey) {

View File

@ -50,5 +50,5 @@ class PlanStmtTest : public PlannerTestBase {
TEST_F(PlanStmtTest, stmt) {
useDb("root", "test");
run("select * from t1 where c1 = ?");
// run("select * from t1 where c1 = ?");
}

View File

@ -198,7 +198,7 @@ class PlannerTestBaseImpl {
cxt.pMsg = stmtEnv_.msgBuf_.data();
cxt.msgLen = stmtEnv_.msgBuf_.max_size();
DO_WITH_THROW(qParseQuerySql, &cxt, pQuery);
DO_WITH_THROW(qParseSql, &cxt, pQuery);
res_.ast_ = toString((*pQuery)->pRoot);
}

View File

@ -153,11 +153,6 @@ int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTra
.handle = pInfo->msgInfo.handle,
.persistHandle = persistHandle,
.code = 0};
// if (pInfo->msgType == TDMT_VND_QUERY || pInfo->msgType == TDMT_VND_FETCH ||
// pInfo->msgType == TDMT_VND_QUERY_CONTINUE) {
// rpcMsg.persistHandle = 1;
//}
assert(pInfo->fp != NULL);
rpcSendRequestWithCtx(pTransporter, epSet, &rpcMsg, pTransporterId, rpcCtx);

View File

@ -8,7 +8,7 @@ target_include_directories(
)
target_link_libraries(scalar
PRIVATE os util common nodes function qcom
PRIVATE os util common nodes function qcom vnode
)
if(${BUILD_TEST})

View File

@ -26,6 +26,7 @@ typedef struct SScalarCtx {
int32_t code;
SArray *pBlockList; /* element is SSDataBlock* */
SHashObj *pRes; /* element is SScalarParam */
void *param; // additional parameter (meta actually) for acquire value such as tbname/tags values
} SScalarCtx;
@ -49,6 +50,7 @@ typedef struct SScalarCtx {
int32_t doConvertDataType(SValueNode* pValueNode, SScalarParam* out);
SColumnInfoData* createColumnInfoData(SDataType* pType, int32_t numOfRows);
void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode);
#define GET_PARAM_TYPE(_c) ((_c)->columnData->info.type)
#define GET_PARAM_BYTES(_c) ((_c)->columnData->info.bytes)

View File

@ -3505,19 +3505,6 @@ int32_t fltAddValueNodeToConverList(SFltTreeStat *stat, SValueNode* pNode) {
return TSDB_CODE_SUCCESS;
}
void fltConvertToTsValueNode(SFltTreeStat *stat, SValueNode* valueNode) {
char *timeStr = valueNode->datum.p;
if (convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, stat->precision, &valueNode->datum.i) !=
TSDB_CODE_SUCCESS) {
valueNode->datum.i = 0;
}
taosMemoryFree(timeStr);
valueNode->typeData = valueNode->datum.i;
valueNode->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP;
valueNode->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes;
}
EDealRes fltReviseRewriter(SNode** pNode, void* pContext) {
SFltTreeStat *stat = (SFltTreeStat *)pContext;
@ -3566,7 +3553,7 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) {
return DEAL_RES_CONTINUE;
}
fltConvertToTsValueNode(stat, valueNode);
sclConvertToTsValueNode(stat->precision, valueNode);
return DEAL_RES_CONTINUE;
}
@ -3614,6 +3601,11 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) {
return DEAL_RES_CONTINUE;
}
if (FILTER_GET_FLAG(stat->info->options, FLT_OPTION_TIMESTAMP) && node->opType >= OP_TYPE_NOT_EQUAL) {
stat->scalarMode = true;
return DEAL_RES_CONTINUE;
}
if (NULL == node->pRight) {
if (scalarGetOperatorParamNum(node->opType) > 1) {
fltError("invalid operator, pRight:%p, nodeType:%d, opType:%d", node->pRight, nodeType(node), node->opType);
@ -3695,7 +3687,7 @@ int32_t fltReviseNodes(SFilterInfo *pInfo, SNode** pNode, SFltTreeStat *pStat) {
for (int32_t i = 0; i < nodeNum; ++i) {
SValueNode *valueNode = *(SValueNode **)taosArrayGet(pStat->nodeList, i);
fltConvertToTsValueNode(pStat, valueNode);
sclConvertToTsValueNode(pStat->precision, valueNode);
}
_return:

View File

@ -8,6 +8,7 @@
#include "tdatablock.h"
#include "scalar.h"
#include "tudf.h"
#include "ttime.h"
int32_t scalarGetOperatorParamNum(EOperatorType type) {
if (OP_TYPE_IS_NULL == type || OP_TYPE_IS_NOT_NULL == type || OP_TYPE_IS_TRUE == type || OP_TYPE_IS_NOT_TRUE == type
@ -19,6 +20,19 @@ int32_t scalarGetOperatorParamNum(EOperatorType type) {
return 2;
}
void sclConvertToTsValueNode(int8_t precision, SValueNode* valueNode) {
char *timeStr = valueNode->datum.p;
if (convertStringToTimestamp(valueNode->node.resType.type, valueNode->datum.p, precision, &valueNode->datum.i) !=
TSDB_CODE_SUCCESS) {
valueNode->datum.i = 0;
}
taosMemoryFree(timeStr);
valueNode->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP;
valueNode->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes;
}
SColumnInfoData* createColumnInfoData(SDataType* pType, int32_t numOfRows) {
SColumnInfoData* pColumnData = taosMemoryCalloc(1, sizeof(SColumnInfoData));
if (pColumnData == NULL) {
@ -251,6 +265,7 @@ int32_t sclInitParam(SNode* node, SScalarParam *param, SScalarCtx *ctx, int32_t
*rowNum = param->numOfRows;
}
param->param = ctx->param;
return TSDB_CODE_SUCCESS;
}
@ -535,7 +550,7 @@ EDealRes sclRewriteBasedOnOptr(SNode** pNode, SScalarCtx *ctx, EOperatorType opT
}
EDealRes sclRewriteOperatorForNullValue(SNode** pNode, SScalarCtx *ctx) {
EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
SOperatorNode *node = (SOperatorNode *)*pNode;
if (node->pLeft && (QUERY_NODE_VALUE == nodeType(node->pLeft))) {
@ -543,6 +558,11 @@ EDealRes sclRewriteOperatorForNullValue(SNode** pNode, SScalarCtx *ctx) {
if (SCL_IS_NULL_VALUE_NODE(valueNode) && (node->opType != OP_TYPE_IS_NULL && node->opType != OP_TYPE_IS_NOT_NULL)) {
return sclRewriteBasedOnOptr(pNode, ctx, node->opType);
}
if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pRight && nodesIsExprNode(node->pRight)
&& ((SExprNode*)node->pRight)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) {
sclConvertToTsValueNode(((SExprNode*)node->pRight)->resType.precision, valueNode);
}
}
if (node->pRight && (QUERY_NODE_VALUE == nodeType(node->pRight))) {
@ -550,6 +570,11 @@ EDealRes sclRewriteOperatorForNullValue(SNode** pNode, SScalarCtx *ctx) {
if (SCL_IS_NULL_VALUE_NODE(valueNode) && (node->opType != OP_TYPE_IS_NULL && node->opType != OP_TYPE_IS_NOT_NULL)) {
return sclRewriteBasedOnOptr(pNode, ctx, node->opType);
}
if (IS_STR_DATA_TYPE(valueNode->node.resType.type) && node->pLeft && nodesIsExprNode(node->pLeft)
&& ((SExprNode*)node->pLeft)->resType.type == TSDB_DATA_TYPE_TIMESTAMP) {
sclConvertToTsValueNode(((SExprNode*)node->pLeft)->resType.precision, valueNode);
}
}
if (node->pRight && (QUERY_NODE_NODE_LIST == nodeType(node->pRight))) {
@ -672,7 +697,7 @@ EDealRes sclRewriteOperator(SNode** pNode, SScalarCtx *ctx) {
SOperatorNode *node = (SOperatorNode *)*pNode;
if ((!SCL_IS_CONST_NODE(node->pLeft)) || (!SCL_IS_CONST_NODE(node->pRight))) {
return sclRewriteOperatorForNullValue(pNode, ctx);
return sclRewriteNonConstOperator(pNode, ctx);
}
SScalarParam output = {.columnData = taosMemoryCalloc(1, sizeof(SColumnInfoData))};
@ -885,7 +910,7 @@ int32_t scalarCalculate(SNode *pNode, SArray *pBlockList, SScalarParam *pDst) {
}
int32_t code = 0;
SScalarCtx ctx = {.code = 0, .pBlockList = pBlockList};
SScalarCtx ctx = {.code = 0, .pBlockList = pBlockList, .param = pDst->param};
// TODO: OPT performance
ctx.pRes = taosHashInit(SCL_DEFAULT_OP_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);

View File

@ -1,10 +1,11 @@
#include "function.h"
#include "scalar.h"
#include "tdatablock.h"
#include "ttime.h"
#include "sclInt.h"
#include "sclvector.h"
#include "tdatablock.h"
#include "tjson.h"
#include "ttime.h"
#include "vnode.h"
typedef float (*_float_fn)(float);
typedef double (*_double_fn)(double);
@ -1512,6 +1513,21 @@ int32_t winEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p
int32_t qTbnameFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
ASSERT(inputNum == 1);
colDataAppend(pOutput->columnData, pOutput->numOfRows, colDataGetData(pInput->columnData, 0), false);
SMetaReader mr = {0};
metaReaderInit(&mr, pInput->param, 0);
uint64_t uid = *(uint64_t *)colDataGetData(pInput->columnData, 0);
metaGetTableEntryByUid(&mr, uid);
char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(str, mr.me.name);
metaReaderClear(&mr);
for(int32_t i = 0; i < pInput->numOfRows; ++i) {
colDataAppend(pOutput->columnData, pOutput->numOfRows + i, str, false);
}
pOutput->numOfRows += pInput->numOfRows;
return TSDB_CODE_SUCCESS;
}

View File

@ -1092,11 +1092,10 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
if (TSDB_CODE_SUCCESS == code && batchRsp.nRsps > 0) {
for (int32_t i = 0; i < batchRsp.nRsps; ++i) {
SVCreateTbRsp *rsp = batchRsp.pRsps + i;
if (NEED_CLIENT_HANDLE_ERROR(rsp->code)) {
tDecoderClear(&coder);
SCH_ERR_JRET(rsp->code);
} else if (TSDB_CODE_SUCCESS != rsp->code) {
if (TSDB_CODE_SUCCESS != rsp->code) {
code = rsp->code;
tDecoderClear(&coder);
SCH_ERR_JRET(code);
}
}
}
@ -1117,11 +1116,10 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
if (TSDB_CODE_SUCCESS == code && batchRsp.nRsps > 0) {
for (int32_t i = 0; i < batchRsp.nRsps; ++i) {
SVDropTbRsp *rsp = batchRsp.pRsps + i;
if (NEED_CLIENT_HANDLE_ERROR(rsp->code)) {
tDecoderClear(&coder);
SCH_ERR_JRET(rsp->code);
} else if (TSDB_CODE_SUCCESS != rsp->code) {
if (TSDB_CODE_SUCCESS != rsp->code) {
code = rsp->code;
tDecoderClear(&coder);
SCH_ERR_JRET(code);
}
}
}
@ -1147,6 +1145,17 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
SCH_ERR_JRET(code);
}
if (rsp->nBlocks > 0) {
for (int32_t i = 0; i < rsp->nBlocks; ++i) {
SSubmitBlkRsp *blk = rsp->pBlocks + i;
if (TSDB_CODE_SUCCESS != blk->code) {
code = blk->code;
tFreeSSubmitRsp(rsp);
SCH_ERR_JRET(code);
}
}
}
atomic_add_fetch_32(&pJob->resNumOfRows, rsp->affectedRows);
SCH_TASK_DLOG("submit succeed, affectedRows:%d", rsp->affectedRows);

View File

@ -154,7 +154,7 @@ int32_t streamExecTask(SStreamTask* pTask, SMsgCb* pMsgCb, const void* input, in
// sink
if (pTask->sinkType == TASK_SINK__TABLE) {
/*blockDebugShowData(pRes);*/
blockDebugShowData(pRes);
pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, 0, pRes);
} else if (pTask->sinkType == TASK_SINK__SMA) {
pTask->smaSink.smaSink(pTask->ahandle, pTask->smaSink.smaId, pRes);

View File

@ -271,6 +271,7 @@ int32_t syncNodeSendMsgByInfo(const SNodeInfo* nodeInfo, SSyncNode* pSyncNode, S
cJSON* syncNode2Json(const SSyncNode* pSyncNode);
char* syncNode2Str(const SSyncNode* pSyncNode);
char* syncNode2SimpleStr(const SSyncNode* pSyncNode);
void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg *newConfig);
SSyncNode* syncNodeAcquire(int64_t rid);
void syncNodeRelease(SSyncNode* pNode);

View File

@ -57,6 +57,11 @@ SyncIndex syncUtilMinIndex(SyncIndex a, SyncIndex b);
SyncIndex syncUtilMaxIndex(SyncIndex a, SyncIndex b);
void syncUtilMsgHtoN(void* msg);
void syncUtilMsgNtoH(void* msg);
bool syncUtilIsData(tmsg_t msgType);
bool syncUtilUserPreCommit(tmsg_t msgType);
bool syncUtilUserCommit(tmsg_t msgType);
bool syncUtilUserRollback(tmsg_t msgType);
#ifdef __cplusplus
}

View File

@ -19,6 +19,7 @@
#include "syncRaftStore.h"
#include "syncUtil.h"
#include "syncVoteMgr.h"
#include "syncRaftCfg.h"
// TLA+ Spec
// HandleAppendEntriesRequest(i, j, m) ==
@ -199,7 +200,8 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
SSyncRaftEntry* pRollBackEntry = logStoreGetEntry(ths->pLogStore, index);
assert(pRollBackEntry != NULL);
if (pRollBackEntry->msgType != TDMT_VND_SYNC_NOOP) {
//if (pRollBackEntry->msgType != TDMT_VND_SYNC_NOOP) {
if (syncUtilUserRollback(pRollBackEntry->msgType)) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pRollBackEntry, &rpcMsg);
@ -227,7 +229,8 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pAppendEntry, &rpcMsg);
if (ths->pFsm != NULL) {
if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
//if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pAppendEntry->originalRpcType)) {
SFsmCbMeta cbMeta;
cbMeta.index = pAppendEntry->index;
cbMeta.isWeak = pAppendEntry->isWeak;
@ -258,7 +261,8 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pAppendEntry, &rpcMsg);
if (ths->pFsm != NULL) {
if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
//if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pAppendEntry->originalRpcType)) {
SFsmCbMeta cbMeta;
cbMeta.index = pAppendEntry->index;
cbMeta.isWeak = pAppendEntry->isWeak;
@ -320,7 +324,8 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pEntry, &rpcMsg);
if (ths->pFsm->FpCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
//if (ths->pFsm->FpCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
if (ths->pFsm->FpCommitCb != NULL && syncUtilUserCommit(pEntry->originalRpcType)) {
SFsmCbMeta cbMeta;
cbMeta.index = pEntry->index;
cbMeta.isWeak = pEntry->isWeak;
@ -330,6 +335,15 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
ths->pFsm->FpCommitCb(ths->pFsm, &rpcMsg, cbMeta);
}
// config change
if (pEntry->originalRpcType == TDMT_VND_SYNC_CONFIG_CHANGE) {
SSyncCfg newSyncCfg;
int32_t ret = syncCfgFromStr(rpcMsg.pCont, &newSyncCfg);
ASSERT(ret == 0);
syncNodeUpdateConfig(ths, &newSyncCfg);
}
rpcFreeCont(rpcMsg.pCont);
syncEntryDestory(pEntry);
}

View File

@ -19,6 +19,7 @@
#include "syncRaftLog.h"
#include "syncRaftStore.h"
#include "syncUtil.h"
#include "syncRaftCfg.h"
// \* Leader i advances its commitIndex.
// \* This is done as a separate step from handling AppendEntries responses,
@ -101,7 +102,8 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pEntry, &rpcMsg);
if (pSyncNode->pFsm->FpCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
//if (pSyncNode->pFsm->FpCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
if (pSyncNode->pFsm->FpCommitCb != NULL && syncUtilUserCommit(pEntry->originalRpcType)) {
SFsmCbMeta cbMeta;
cbMeta.index = pEntry->index;
cbMeta.isWeak = pEntry->isWeak;
@ -111,6 +113,15 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
pSyncNode->pFsm->FpCommitCb(pSyncNode->pFsm, &rpcMsg, cbMeta);
}
// config change
if (pEntry->originalRpcType == TDMT_VND_SYNC_CONFIG_CHANGE) {
SSyncCfg newSyncCfg;
int32_t ret = syncCfgFromStr(rpcMsg.pCont, &newSyncCfg);
ASSERT(ret == 0);
syncNodeUpdateConfig(pSyncNode, &newSyncCfg);
}
rpcFreeCont(rpcMsg.pCont);
syncEntryDestory(pEntry);
}

View File

@ -116,6 +116,15 @@ void syncStop(int64_t rid) {
int32_t syncReconfig(int64_t rid, const SSyncCfg* pSyncCfg) {
int32_t ret = 0;
char *configChange = syncCfg2Str((SSyncCfg*)pSyncCfg);
SRpcMsg rpcMsg = {0};
rpcMsg.msgType = TDMT_VND_SYNC_CONFIG_CHANGE;
rpcMsg.noResp = 1;
rpcMsg.contLen = strlen(configChange) + 1;
rpcMsg.pCont = rpcMallocCont(rpcMsg.contLen);
snprintf(rpcMsg.pCont, rpcMsg.contLen, "%s", configChange);
taosMemoryFree(configChange);
ret = syncPropose(rid, &rpcMsg, false);
return ret;
}
@ -849,6 +858,35 @@ char* syncNode2SimpleStr(const SSyncNode* pSyncNode) {
return s;
}
void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg *newConfig) {
pSyncNode->pRaftCfg->cfg = *newConfig;
int32_t ret = raftCfgPersist(pSyncNode->pRaftCfg);
ASSERT(ret == 0);
// init internal
pSyncNode->myNodeInfo = pSyncNode->pRaftCfg->cfg.nodeInfo[pSyncNode->pRaftCfg->cfg.myIndex];
syncUtilnodeInfo2raftId(&pSyncNode->myNodeInfo, pSyncNode->vgId, &pSyncNode->myRaftId);
// init peersNum, peers, peersId
pSyncNode->peersNum = pSyncNode->pRaftCfg->cfg.replicaNum - 1;
int j = 0;
for (int i = 0; i < pSyncNode->pRaftCfg->cfg.replicaNum; ++i) {
if (i != pSyncNode->pRaftCfg->cfg.myIndex) {
pSyncNode->peersNodeInfo[j] = pSyncNode->pRaftCfg->cfg.nodeInfo[i];
j++;
}
}
for (int i = 0; i < pSyncNode->peersNum; ++i) {
syncUtilnodeInfo2raftId(&pSyncNode->peersNodeInfo[i], pSyncNode->vgId, &pSyncNode->peersId[i]);
}
// init replicaNum, replicasId
pSyncNode->replicaNum = pSyncNode->pRaftCfg->cfg.replicaNum;
for (int i = 0; i < pSyncNode->pRaftCfg->cfg.replicaNum; ++i) {
syncUtilnodeInfo2raftId(&pSyncNode->pRaftCfg->cfg.nodeInfo[i], pSyncNode->vgId, &pSyncNode->replicasId[i]);
}
}
SSyncNode* syncNodeAcquire(int64_t rid) {
SSyncNode* pNode = taosAcquireRef(tsNodeRefId, rid);
if (pNode == NULL) {
@ -1207,7 +1245,8 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg) {
syncEntry2OriginalRpc(pEntry, &rpcMsg);
if (ths->pFsm != NULL) {
if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
//if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) {
SFsmCbMeta cbMeta;
cbMeta.index = pEntry->index;
cbMeta.isWeak = pEntry->isWeak;
@ -1228,7 +1267,8 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg) {
syncEntry2OriginalRpc(pEntry, &rpcMsg);
if (ths->pFsm != NULL) {
if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
//if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_VND_SYNC_NOOP) {
if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) {
SFsmCbMeta cbMeta;
cbMeta.index = pEntry->index;
cbMeta.isWeak = pEntry->isWeak;

View File

@ -213,3 +213,31 @@ void syncUtilMsgNtoH(void* msg) {
pHead->contLen = ntohl(pHead->contLen);
pHead->vgId = ntohl(pHead->vgId);
}
bool syncUtilIsData(tmsg_t msgType) {
if (msgType == TDMT_VND_SYNC_NOOP || msgType == TDMT_VND_SYNC_CONFIG_CHANGE) {
return false;
}
return true;
}
bool syncUtilUserPreCommit(tmsg_t msgType) {
if (msgType != TDMT_VND_SYNC_NOOP && msgType != TDMT_VND_SYNC_CONFIG_CHANGE) {
return true;
}
return false;
}
bool syncUtilUserCommit(tmsg_t msgType) {
if (msgType != TDMT_VND_SYNC_NOOP && msgType != TDMT_VND_SYNC_CONFIG_CHANGE) {
return true;
}
return false;
}
bool syncUtilUserRollback(tmsg_t msgType) {
if (msgType != TDMT_VND_SYNC_NOOP && msgType != TDMT_VND_SYNC_CONFIG_CHANGE) {
return true;
}
return false;
}

View File

@ -114,6 +114,7 @@ int tdbBtreeOpen(int keyLen, int valLen, SPager *pPager, tdb_cmpr_fn_t kcmpr, SB
int tdbBtreeClose(SBTree *pBt) {
if (pBt) {
tdbFree(pBt->pBuf);
tdbOsFree(pBt);
}
return 0;

View File

@ -242,7 +242,7 @@ static void tdbPCacheRemovePageFromHash(SPCache *pCache, SPage *pPage) {
int h;
h = tdbPCachePageHash(&(pPage->pgid));
for (ppPage = &(pCache->pgHash[h % pCache->nHash]); *ppPage != pPage; ppPage = &((*ppPage)->pHashNext))
for (ppPage = &(pCache->pgHash[h % pCache->nHash]); (*ppPage) && *ppPage != pPage; ppPage = &((*ppPage)->pHashNext))
;
ASSERT(*ppPage == pPage);
*ppPage = pPage->pHashNext;

View File

@ -54,8 +54,8 @@ int32_t walRegisterRead(SWalReadHandle *pRead, int64_t ver) {
return 0;
}
static int32_t walReadSeekFilePos(SWalReadHandle *pRead, int64_t fileFirstVer, int64_t ver) {
int ret = 0;
static int64_t walReadSeekFilePos(SWalReadHandle *pRead, int64_t fileFirstVer, int64_t ver) {
int64_t ret = 0;
TdFilePtr pIdxTFile = pRead->pReadIdxTFile;
TdFilePtr pLogTFile = pRead->pReadLogTFile;
@ -65,11 +65,18 @@ static int32_t walReadSeekFilePos(SWalReadHandle *pRead, int64_t fileFirstVer, i
ret = taosLSeekFile(pIdxTFile, offset, SEEK_SET);
if (ret < 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
wError("failed to seek idx file, ver %ld, pos: %ld, since %s", ver, offset, terrstr());
return -1;
}
SWalIdxEntry entry;
if (taosReadFile(pIdxTFile, &entry, sizeof(SWalIdxEntry)) != sizeof(SWalIdxEntry)) {
SWalIdxEntry entry = {0};
if ((ret = taosReadFile(pIdxTFile, &entry, sizeof(SWalIdxEntry))) != sizeof(SWalIdxEntry)) {
if (ret < 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
wError("failed to read idx file, since %s", terrstr());
} else {
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
wError("read idx file incompletely, read bytes %ld, bytes should be %lu", ret, sizeof(SWalIdxEntry));
}
return -1;
}
@ -77,6 +84,7 @@ static int32_t walReadSeekFilePos(SWalReadHandle *pRead, int64_t fileFirstVer, i
ret = taosLSeekFile(pLogTFile, entry.offset, SEEK_SET);
if (ret < 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
wError("failed to seek log file, ver %ld, pos: %ld, since %s", ver, entry.offset, terrstr());
return -1;
}
return ret;
@ -92,6 +100,8 @@ static int32_t walReadChangeFile(SWalReadHandle *pRead, int64_t fileFirstVer) {
TdFilePtr pLogTFile = taosOpenFile(fnameStr, TD_FILE_READ);
if (pLogTFile == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
terrno = TSDB_CODE_WAL_INVALID_VER;
wError("cannot open file %s, since %s", fnameStr, terrstr());
return -1;
}
@ -99,6 +109,7 @@ static int32_t walReadChangeFile(SWalReadHandle *pRead, int64_t fileFirstVer) {
TdFilePtr pIdxTFile = taosOpenFile(fnameStr, TD_FILE_READ);
if (pIdxTFile == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
wError("cannot open file %s, since %s", fnameStr, terrstr());
return -1;
}
@ -113,6 +124,7 @@ static int32_t walReadSeekVer(SWalReadHandle *pRead, int64_t ver) {
return 0;
}
if (ver > pWal->vers.lastVer || ver < pWal->vers.firstVer) {
wError("invalid version: % " PRId64 ", first ver %ld, last ver %ld", ver, pWal->vers.firstVer, pWal->vers.lastVer);
terrno = TSDB_CODE_WAL_INVALID_VER;
return -1;
}
@ -125,11 +137,13 @@ static int32_t walReadSeekVer(SWalReadHandle *pRead, int64_t ver) {
SWalFileInfo *pRet = taosArraySearch(pWal->fileInfoSet, &tmpInfo, compareWalFileInfo, TD_LE);
ASSERT(pRet != NULL);
if (pRead->curFileFirstVer != pRet->firstVer) {
// error code set inner
if (walReadChangeFile(pRead, pRet->firstVer) < 0) {
return -1;
}
}
// error code set inner
if (walReadSeekFilePos(pRead, pRet->firstVer, ver) < 0) {
return -1;
}
@ -142,7 +156,7 @@ static int32_t walReadSeekVer(SWalReadHandle *pRead, int64_t ver) {
void walSetReaderCapacity(SWalReadHandle *pRead, int32_t capacity) { pRead->capacity = capacity; }
int32_t walFetchHead(SWalReadHandle *pRead, int64_t ver, SWalHead *pHead) {
int32_t code;
int64_t code;
// TODO: valid ver
if (ver > pRead->pWal->vers.commitVer) {
@ -154,9 +168,7 @@ int32_t walFetchHead(SWalReadHandle *pRead, int64_t ver, SWalHead *pHead) {
if (code < 0) return -1;
}
if (!taosValidFile(pRead->pReadLogTFile)) {
return -1;
}
ASSERT(taosValidFile(pRead->pReadLogTFile) == true);
code = taosReadFile(pRead->pReadLogTFile, pHead, sizeof(SWalHead));
if (code != sizeof(SWalHead)) {
@ -175,7 +187,7 @@ int32_t walFetchHead(SWalReadHandle *pRead, int64_t ver, SWalHead *pHead) {
}
int32_t walSkipFetchBody(SWalReadHandle *pRead, const SWalHead *pHead) {
int32_t code;
int64_t code;
ASSERT(pRead->curVersion == pHead->head.version);
@ -202,23 +214,24 @@ int32_t walFetchBody(SWalReadHandle *pRead, SWalHead **ppHead) {
return -1;
}
*ppHead = ptr;
pReadHead = &((*ppHead)->head);
pRead->capacity = pReadHead->bodyLen;
}
if (pReadHead->bodyLen != taosReadFile(pRead->pReadLogTFile, pReadHead->body, pReadHead->bodyLen)) {
ASSERT(0);
return -1;
}
if (pReadHead->version != ver) {
wError("unexpected wal log version: %" PRId64 ", read request version:%" PRId64 "", pRead->pHead->head.version,
ver);
wError("wal fetch body error: %" PRId64 ", read request version:%" PRId64 "", pRead->pHead->head.version, ver);
pRead->curVersion = -1;
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
return -1;
}
if (walValidBodyCksum(*ppHead) != 0) {
wError("unexpected wal log version: % " PRId64 ", since body checksum not passed", ver);
wError("wal fetch body error: % " PRId64 ", since body checksum not passed", ver);
pRead->curVersion = -1;
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
return -1;
@ -245,20 +258,23 @@ int32_t walReadWithHandle_s(SWalReadHandle *pRead, int64_t ver, SWalReadHead **p
}
int32_t walReadWithHandle(SWalReadHandle *pRead, int64_t ver) {
int code;
int64_t code;
// TODO: check wal life
if (pRead->curVersion != ver) {
if (walReadSeekVer(pRead, ver) < 0) {
wError("unexpected wal log version: % " PRId64 ", since %s", ver, terrstr());
return -1;
}
}
if (!taosValidFile(pRead->pReadLogTFile)) {
return -1;
}
ASSERT(taosValidFile(pRead->pReadLogTFile) == true);
code = taosReadFile(pRead->pReadLogTFile, pRead->pHead, sizeof(SWalHead));
if (code != sizeof(SWalHead)) {
if (code < 0)
terrno = TAOS_SYSTEM_ERROR(errno);
else
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
return -1;
}

View File

@ -19,8 +19,8 @@
#include "tref.h"
#include "walInt.h"
static int walSeekWritePos(SWal* pWal, int64_t ver) {
int code = 0;
static int64_t walSeekWritePos(SWal* pWal, int64_t ver) {
int64_t code = 0;
TdFilePtr pIdxTFile = pWal->pWriteIdxTFile;
TdFilePtr pLogTFile = pWal->pWriteLogTFile;
@ -45,7 +45,7 @@ static int walSeekWritePos(SWal* pWal, int64_t ver) {
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
return code;
return 0;
}
int walSetWrite(SWal* pWal) {
@ -124,7 +124,7 @@ int walChangeWrite(SWal* pWal, int64_t ver) {
}
int walSeekWriteVer(SWal* pWal, int64_t ver) {
int code;
int64_t code;
if (ver == pWal->vers.lastVer) {
return 0;
}

View File

@ -30,7 +30,7 @@ int32_t walCommit(SWal *pWal, int64_t ver) {
}
int32_t walRollback(SWal *pWal, int64_t ver) {
int code;
int64_t code;
char fnameStr[WAL_FILE_LEN];
if (ver > pWal->vers.lastVer || ver < pWal->vers.commitVer) {
terrno = TSDB_CODE_WAL_INVALID_VER;
@ -225,6 +225,7 @@ int walRoll(SWal *pWal) {
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
// terrno set inner
code = walRollFileInfo(pWal);
if (code != 0) {
return -1;

View File

@ -310,9 +310,6 @@ int64_t taosCloseFile(TdFilePtr *ppFile) {
}
int64_t taosReadFile(TdFilePtr pFile, void *buf, int64_t count) {
if (pFile == NULL) {
return 0;
}
#if FILE_WITH_LOCK
taosThreadRwlockRdlock(&(pFile->rwlock));
#endif
@ -359,7 +356,7 @@ int64_t taosPReadFile(TdFilePtr pFile, void *buf, int64_t count, int64_t offset)
assert(pFile->fd >= 0); // Please check if you have closed the file.
#ifdef WINDOWS
size_t pos = lseek(pFile->fd, 0, SEEK_CUR);
lseek(pFile->fd, (long)offset, SEEK_SET);
lseek(pFile->fd, offset, SEEK_SET);
int64_t ret = read(pFile->fd, buf, count);
lseek(pFile->fd, pos, SEEK_SET);
#else
@ -372,9 +369,6 @@ int64_t taosPReadFile(TdFilePtr pFile, void *buf, int64_t count, int64_t offset)
}
int64_t taosWriteFile(TdFilePtr pFile, const void *buf, int64_t count) {
if (pFile == NULL) {
return 0;
}
#if FILE_WITH_LOCK
taosThreadRwlockWrlock(&(pFile->rwlock));
#endif
@ -406,14 +400,11 @@ int64_t taosWriteFile(TdFilePtr pFile, const void *buf, int64_t count) {
}
int64_t taosLSeekFile(TdFilePtr pFile, int64_t offset, int32_t whence) {
if (pFile == NULL) {
return 0;
}
#if FILE_WITH_LOCK
taosThreadRwlockRdlock(&(pFile->rwlock));
#endif
assert(pFile->fd >= 0); // Please check if you have closed the file.
int64_t ret = lseek(pFile->fd, (long)offset, whence);
int64_t ret = lseek(pFile->fd, offset, whence);
#if FILE_WITH_LOCK
taosThreadRwlockUnlock(&(pFile->rwlock));
#endif
@ -424,9 +415,6 @@ int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime) {
#ifdef WINDOWS
return 0;
#else
if (pFile == NULL) {
return 0;
}
assert(pFile->fd >= 0); // Please check if you have closed the file.
struct stat fileStat;
@ -451,9 +439,6 @@ int32_t taosLockFile(TdFilePtr pFile) {
#ifdef WINDOWS
return 0;
#else
if (pFile == NULL) {
return 0;
}
assert(pFile->fd >= 0); // Please check if you have closed the file.
return (int32_t)flock(pFile->fd, LOCK_EX | LOCK_NB);
@ -464,9 +449,6 @@ int32_t taosUnLockFile(TdFilePtr pFile) {
#ifdef WINDOWS
return 0;
#else
if (pFile == NULL) {
return 0;
}
assert(pFile->fd >= 0); // Please check if you have closed the file.
return (int32_t)flock(pFile->fd, LOCK_UN | LOCK_NB);

View File

@ -454,6 +454,15 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_PERMISSION_DENIED, "Permission denied")
//planner
TAOS_DEFINE_ERROR(TSDB_CODE_PLAN_INTERNAL_ERROR, "planner internal error")
//udf
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_STOPPING, "udf is stopping")
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_PIPE_READ_ERR, "udf pipe read error")
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_PIPE_CONNECT_ERR, "udf pipe connect error")
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_PIPE_NO_PIPE, "udf no pipe")
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_LOAD_UDF_FAILURE, "udf load failure")
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_INVALID_STATE, "udf invalid state")
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_INVALID_INPUT, "udf invalid function input")
//schemaless
TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_PROTOCOL_TYPE, "Invalid line protocol type")
TAOS_DEFINE_ERROR(TSDB_CODE_SML_INVALID_PRECISION_TYPE, "Invalid timestamp precision type")

View File

@ -150,6 +150,9 @@ class TDSql:
raise Exception(repr(e))
return (self.queryRows, timeout)
def getRows(self):
return self.queryRows
def checkRows(self, expectRows):
if self.queryRows == expectRows:
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows))

View File

@ -20,6 +20,11 @@ typedef struct {
bool enclose;
} OperInfo;
typedef struct {
char* funcName;
int32_t paramNum;
} FuncInfo;
typedef enum {
BP_BIND_TAG = 1,
BP_BIND_COL,
@ -44,6 +49,13 @@ OperInfo operInfo[] = {
int32_t operatorList[] = {0, 1, 2, 3, 4, 5, 6, 7};
int32_t varoperatorList[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
FuncInfo funcInfo[] = {
{"count", 1},
{"sum", 1},
{"min", 1},
{"sin", 1},
};
char *bpStbPrefix = "st";
char *bpTbPrefix = "t";
int32_t bpDefaultStbId = 1;
@ -154,7 +166,7 @@ CaseCfg gCase[] = {
{"insert:AUTO1-FULL", tListLen(fullColList), fullColList, TTYPE_INSERT, true, true, insertAUTOTest1, 10, 10, 2, 0, 0, 0, 1, -1},
{"query:SUBT-COLUMN", tListLen(fullColList), fullColList, TTYPE_QUERY, false, false, queryColumnTest, 10, 10, 1, 3, 0, 0, 1, 2},
{"query:SUBT-MISC", tListLen(fullColList), fullColList, TTYPE_QUERY, false, false, queryMiscTest, 10, 10, 1, 3, 0, 0, 1, 2},
{"query:SUBT-MISC", tListLen(fullColList), fullColList, TTYPE_QUERY, false, false, queryMiscTest, 2, 10, 1, 3, 0, 0, 1, 2},
};
@ -179,6 +191,8 @@ typedef struct {
int32_t* bindTagTypeList;
int32_t optrIdxListNum;
int32_t* optrIdxList;
int32_t funcIdxListNum;
int32_t* funcIdxList;
int32_t runTimes;
int32_t caseIdx; // static case idx
int32_t caseNum; // num in static case list
@ -186,7 +200,7 @@ typedef struct {
int32_t caseRunNum; // total run case num
} CaseCtrl;
#if 1
#if 0
CaseCtrl gCaseCtrl = { // default
.bindNullNum = 0,
.printCreateTblSql = false,
@ -203,6 +217,8 @@ CaseCtrl gCaseCtrl = { // default
.bindTagTypeList = NULL,
.optrIdxListNum = 0,
.optrIdxList = NULL,
.funcIdxListNum = 0,
.funcIdxList = NULL,
.checkParamNum = false,
.printRes = false,
.runTimes = 0,
@ -241,7 +257,7 @@ CaseCtrl gCaseCtrl = {
};
#endif
#if 0
#if 1
CaseCtrl gCaseCtrl = { // query case with specified col&oper
.bindNullNum = 0,
.printCreateTblSql = false,
@ -255,14 +271,14 @@ CaseCtrl gCaseCtrl = { // query case with specified col&oper
.optrIdxListNum = 0,
.optrIdxList = NULL,
.checkParamNum = false,
.printRes = false,
.printRes = true,
.runTimes = 0,
.caseRunIdx = -1,
.optrIdxListNum = 0,
.optrIdxList = NULL,
.bindColTypeNum = 0,
.bindColTypeList = NULL,
.caseIdx = 23,
.caseIdx = 24,
.caseNum = 1,
.caseRunNum = 1,
};
@ -513,11 +529,83 @@ void bpAppendOperatorParam(BindData *data, int32_t *len, int32_t dataType, int32
}
break;
default:
printf("!!!invalid paramNum:%d\n", pInfo->paramNum);
printf("!!!invalid operator paramNum:%d\n", pInfo->paramNum);
exit(1);
}
}
void bpAppendFunctionParam(BindData *data, int32_t *len, int32_t dataType, int32_t idx) {
FuncInfo *pInfo = NULL;
if (gCaseCtrl.funcIdxListNum > 0) {
pInfo = &funcInfo[gCaseCtrl.funcIdxList[idx]];
} else {
pInfo = &funcInfo[rand() % tListLen(funcInfo)];
}
switch (pInfo->paramNum) {
case 1:
*len += sprintf(data->sql + *len, " %s(?)", pInfo->funcName);
break;
default:
printf("!!!invalid function paramNum:%d\n", pInfo->paramNum);
exit(1);
}
}
int32_t bpAppendColumnName(BindData *data, int32_t type, int32_t len) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
return sprintf(data->sql + len, "booldata");
break;
case TSDB_DATA_TYPE_TINYINT:
return sprintf(data->sql + len, "tinydata");
break;
case TSDB_DATA_TYPE_SMALLINT:
return sprintf(data->sql + len, "smalldata");
break;
case TSDB_DATA_TYPE_INT:
return sprintf(data->sql + len, "intdata");
break;
case TSDB_DATA_TYPE_BIGINT:
return sprintf(data->sql + len, "bigdata");
break;
case TSDB_DATA_TYPE_FLOAT:
return sprintf(data->sql + len, "floatdata");
break;
case TSDB_DATA_TYPE_DOUBLE:
return sprintf(data->sql + len, "doubledata");
break;
case TSDB_DATA_TYPE_VARCHAR:
return sprintf(data->sql + len, "binarydata");
break;
case TSDB_DATA_TYPE_TIMESTAMP:
return sprintf(data->sql + len, "ts");
break;
case TSDB_DATA_TYPE_NCHAR:
return sprintf(data->sql + len, "nchardata");
break;
case TSDB_DATA_TYPE_UTINYINT:
return sprintf(data->sql + len, "utinydata");
break;
case TSDB_DATA_TYPE_USMALLINT:
return sprintf(data->sql + len, "usmalldata");
break;
case TSDB_DATA_TYPE_UINT:
return sprintf(data->sql + len, "uintdata");
break;
case TSDB_DATA_TYPE_UBIGINT:
return sprintf(data->sql + len, "ubigdata");
break;
default:
printf("!!!invalid col type:%d", type);
exit(1);
}
return 0;
}
void generateQueryCondSQL(BindData *data, int32_t tblIdx) {
int32_t len = sprintf(data->sql, "select * from %s%d where ", bpTbPrefix, tblIdx);
if (!gCurCase->fullCol) {
@ -525,53 +613,7 @@ void generateQueryCondSQL(BindData *data, int32_t tblIdx) {
if (c) {
len += sprintf(data->sql + len, " and ");
}
switch (data->pBind[c].buffer_type) {
case TSDB_DATA_TYPE_BOOL:
len += sprintf(data->sql + len, "booldata");
break;
case TSDB_DATA_TYPE_TINYINT:
len += sprintf(data->sql + len, "tinydata");
break;
case TSDB_DATA_TYPE_SMALLINT:
len += sprintf(data->sql + len, "smalldata");
break;
case TSDB_DATA_TYPE_INT:
len += sprintf(data->sql + len, "intdata");
break;
case TSDB_DATA_TYPE_BIGINT:
len += sprintf(data->sql + len, "bigdata");
break;
case TSDB_DATA_TYPE_FLOAT:
len += sprintf(data->sql + len, "floatdata");
break;
case TSDB_DATA_TYPE_DOUBLE:
len += sprintf(data->sql + len, "doubledata");
break;
case TSDB_DATA_TYPE_VARCHAR:
len += sprintf(data->sql + len, "binarydata");
break;
case TSDB_DATA_TYPE_TIMESTAMP:
len += sprintf(data->sql + len, "ts");
break;
case TSDB_DATA_TYPE_NCHAR:
len += sprintf(data->sql + len, "nchardata");
break;
case TSDB_DATA_TYPE_UTINYINT:
len += sprintf(data->sql + len, "utinydata");
break;
case TSDB_DATA_TYPE_USMALLINT:
len += sprintf(data->sql + len, "usmalldata");
break;
case TSDB_DATA_TYPE_UINT:
len += sprintf(data->sql + len, "uintdata");
break;
case TSDB_DATA_TYPE_UBIGINT:
len += sprintf(data->sql + len, "ubigdata");
break;
default:
printf("!!!invalid col type:%d", data->pBind[c].buffer_type);
exit(1);
}
len += bpAppendColumnName(data, data->pBind[c].buffer_type, len);
bpAppendOperatorParam(data, &len, data->pBind[c].buffer_type, c);
}
@ -582,64 +624,50 @@ void generateQueryCondSQL(BindData *data, int32_t tblIdx) {
}
}
void bpGenerateConstInOpSQL(BindData *data, int32_t tblIdx) {
int32_t len = 0;
len = sprintf(data->sql, "select ");
void generateQueryMiscSQL(BindData *data, int32_t tblIdx) {
int32_t len = sprintf(data->sql, "select * from %s%d where ", bpTbPrefix, tblIdx);
if (!gCurCase->fullCol) {
for (int c = 0; c < gCurCase->bindColNum; ++c) {
if (c) {
len += sprintf(data->sql + len, " and ");
}
switch (data->pBind[c].buffer_type) {
case TSDB_DATA_TYPE_BOOL:
len += sprintf(data->sql + len, "booldata");
break;
case TSDB_DATA_TYPE_TINYINT:
len += sprintf(data->sql + len, "tinydata");
break;
case TSDB_DATA_TYPE_SMALLINT:
len += sprintf(data->sql + len, "smalldata");
break;
case TSDB_DATA_TYPE_INT:
len += sprintf(data->sql + len, "intdata");
break;
case TSDB_DATA_TYPE_BIGINT:
len += sprintf(data->sql + len, "bigdata");
break;
case TSDB_DATA_TYPE_FLOAT:
len += sprintf(data->sql + len, "floatdata");
break;
case TSDB_DATA_TYPE_DOUBLE:
len += sprintf(data->sql + len, "doubledata");
break;
case TSDB_DATA_TYPE_VARCHAR:
len += sprintf(data->sql + len, "binarydata");
break;
case TSDB_DATA_TYPE_TIMESTAMP:
len += sprintf(data->sql + len, "ts");
break;
case TSDB_DATA_TYPE_NCHAR:
len += sprintf(data->sql + len, "nchardata");
break;
case TSDB_DATA_TYPE_UTINYINT:
len += sprintf(data->sql + len, "utinydata");
break;
case TSDB_DATA_TYPE_USMALLINT:
len += sprintf(data->sql + len, "usmalldata");
break;
case TSDB_DATA_TYPE_UINT:
len += sprintf(data->sql + len, "uintdata");
break;
case TSDB_DATA_TYPE_UBIGINT:
len += sprintf(data->sql + len, "ubigdata");
break;
default:
printf("!!!invalid col type:%d", data->pBind[c].buffer_type);
exit(1);
len += sprintf(data->sql + len, ", ");
}
len += bpAppendColumnName(data, data->pBind[c].buffer_type, len);
bpAppendOperatorParam(data, &len, data->pBind[c].buffer_type, c);
}
len += sprintf(data->sql + len, " from %s%d", bpTbPrefix, tblIdx);
}
void bpGenerateConstInFuncSQL(BindData *data, int32_t tblIdx) {
int32_t len = 0;
len = sprintf(data->sql, "select ");
for (int c = 0; c < gCurCase->bindColNum; ++c) {
if (c) {
len += sprintf(data->sql + len, ", ");
}
bpAppendFunctionParam(data, &len, data->pBind[c].buffer_type, c);
}
len += sprintf(data->sql + len, " from %s%d", bpTbPrefix, tblIdx);
}
void generateQueryMiscSQL(BindData *data, int32_t tblIdx) {
switch(tblIdx) {
case 0:
bpGenerateConstInOpSQL(data, tblIdx);
break;
case 1:
//TODO FILL TEST
default:
bpGenerateConstInFuncSQL(data, tblIdx);
break;
}
if (gCaseCtrl.printStmtSql) {

View File

@ -3,7 +3,7 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 2
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c startUdfd -v 1
system sh/cfg.sh -n dnode1 -c udf -v 1
print ========= start dnode1 as LEADER
system sh/exec.sh -n dnode1 -s start

View File

@ -159,6 +159,7 @@ sql alter table db.stb rename tag t1 tx
print ========== alter common
sql alter table db.stb comment 'abcde' ;
sql alter table db.stb ttl 10 ;
sql show db.stables;
if $data[0][6] != abcde then

View File

@ -0,0 +1,325 @@
import taos
import sys
import inspect
import traceback
from util.log import *
from util.sql import *
from util.cases import *
PRIVILEGES_ALL = "ALL"
PRIVILEGES_READ = "READ"
PRIVILEGES_WRITE = "WRITE"
class TDconnect:
def __init__(self,
host = None,
port = None,
user = None,
password = None,
database = None,
config = None,
) -> None:
self._conn = None
self._host = host
self._user = user
self._password = password
self._database = database
self._port = port
self._config = config
def __enter__(self):
self._conn = taos.connect(
host =self._host,
port =self._port,
user =self._user,
password=self._password,
database=self._database,
config =self._config
)
self.cursor = self._conn.cursor()
return self
def error(self, sql):
expectErrNotOccured = True
try:
self.cursor.execute(sql)
except BaseException:
expectErrNotOccured = False
if expectErrNotOccured:
caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured" )
else:
self.queryRows = 0
self.queryCols = 0
self.queryResult = None
tdLog.info(f"sql:{sql}, expect error occured")
def query(self, sql, row_tag=None):
# sourcery skip: raise-from-previous-error, raise-specific-error
self.sql = sql
try:
self.cursor.execute(sql)
self.queryResult = self.cursor.fetchall()
self.queryRows = len(self.queryResult)
self.queryCols = len(self.cursor.description)
except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.notice(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, {repr(e)}")
traceback.print_exc()
raise Exception(repr(e))
if row_tag:
return self.queryResult
return self.queryRows
def __exit__(self, types, values, trace):
if self._conn:
self.cursor.close()
self._conn.close()
def taos_connect(
host = "127.0.0.1",
port = 6030,
user = "root",
passwd = "taosdata",
database= None,
config = None
):
return TDconnect(
host = host,
port=port,
user=user,
password=passwd,
database=database,
config=config
)
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
@property
def __user_list(self):
return [f"user_test{i}" for i in range(self.users_count) ]
@property
def __passwd_list(self):
return [f"taosdata{i}" for i in range(self.users_count) ]
@property
def __privilege(self):
return [ PRIVILEGES_ALL, PRIVILEGES_READ, PRIVILEGES_WRITE ]
def __priv_level(self, dbname=None):
return f"{dbname}.*" if dbname else "*.*"
def create_user_current(self):
users = self.__user_list
passwds = self.__passwd_list
for i in range(self.users_count):
tdSql.execute(f"create user {users[i]} pass '{passwds[i]}' ")
tdSql.query("show users")
tdSql.checkRows(self.users_count + 1)
def create_user_err(self):
sqls = [
"create users u1 pass 'u1passwd' ",
"create user '' pass 'u1passwd' ",
"create user pass 'u1passwd' ",
"create user u1 pass u1passwd ",
"create user u1 password 'u1passwd' ",
"create user u1 pass u1passwd ",
"create user u1 pass '' ",
"create user u1 pass ' ' ",
"create user u1 pass ",
"create user u1 u2 pass 'u1passwd' 'u2passwd' ",
"create user u1 u2 pass 'u1passwd', 'u2passwd' ",
"create user u1, u2 pass 'u1passwd', 'u2passwd' ",
"create user u1, u2 pass 'u1passwd' 'u2passwd' ",
# length of user_name must <= 23
"create user u12345678901234567890123 pass 'u1passwd' " ,
# length of passwd must <= 128
"create user u1 pass 'u12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678' " ,
# password must have not " ' ~ ` \
"create user u1 pass 'u1passwd\\' " ,
"create user u1 pass 'u1passwd~' " ,
"create user u1 pass 'u1passwd\"' " ,
"create user u1 pass 'u1passwd\'' " ,
"create user u1 pass 'u1passwd`' " ,
# must after create a user named u1
"create user u1 pass 'u1passwd' " ,
]
tdSql.execute("create user u1 pass 'u1passwd' ")
for sql in sqls:
tdSql.error(sql)
def __alter_pass_sql(self, user, passwd):
return f'''ALTER USER {user} PASS '{passwd}' '''
def alter_pass_current(self):
self.__init_pass = True
for count, i in enumerate(range(self.users_count)):
if self.__init_pass:
tdSql.query(self.__alter_pass_sql(self.__user_list[i], f"new{self.__passwd_list[i]}"))
self.__init_pass = count != self.users_count - 1
else:
tdSql.query(self.__alter_pass_sql(self.__user_list[i], self.__passwd_list[i] ) )
self.__init_pass = count == self.users_count - 1
def alter_pass_err(self): # sourcery skip: remove-redundant-fstring
sqls = [
f"alter users {self.__user_list[0]} pass 'newpass' " ,
f"alter user {self.__user_list[0]} pass '' " ,
f"alter user {self.__user_list[0]} pass ' ' " ,
f"alter user anyuser pass 'newpass' " ,
f"alter user {self.__user_list[0]} pass " ,
f"alter user {self.__user_list[0]} password 'newpass' " ,
]
for sql in sqls:
tdSql.error(sql)
def grant_user_privileges(self, privilege, dbname=None, user_name="root"):
return f"GRANT {privilege} ON {self.__priv_level(dbname)} TO {user_name} "
def test_user_create(self):
self.create_user_current()
self.create_user_err()
def test_alter_pass(self):
self.alter_pass_current()
self.alter_pass_err()
def user_login(self, user, passwd):
login_except = False
try:
with taos_connect(user=user, passwd=passwd) as conn:
cursor = conn.cursor
except BaseException:
login_except = True
cursor = None
return login_except, cursor
def login_currrent(self, user, passwd):
login_except, _ = self.user_login(user, passwd)
if login_except:
tdLog.exit(f"connect failed, user: {user} and pass: {passwd} do not match!")
else:
tdLog.info("connect successfully, user and pass matched!")
def login_err(self, user, passwd):
login_except, _ = self.user_login(user, passwd)
if login_except:
tdLog.info("connect failed, except error occured!")
else:
tdLog.exit("connect successfully, except error not occrued!")
def __drop_user(self, user):
return f"DROP USER {user}"
def drop_user_current(self):
for user in self.__user_list:
tdSql.query(self.__drop_user(user))
def drop_user_error(self):
sqls = [
f"DROP {self.__user_list[0]}",
f"DROP user {self.__user_list[0]} {self.__user_list[1]}",
f"DROP user {self.__user_list[0]} , {self.__user_list[1]}",
f"DROP users {self.__user_list[0]} {self.__user_list[1]}",
f"DROP users {self.__user_list[0]} , {self.__user_list[1]}",
"DROP user root",
"DROP user abcde",
"DROP user ALL",
]
for sql in sqls:
tdSql.error(sql)
def test_drop_user(self):
# must drop err first
self.drop_user_error()
self.drop_user_current()
def run(self):
# 默认只有 root 用户
tdLog.printNoPrefix("==========step0: init, user list only has root account")
tdSql.query("show users")
tdSql.checkData(0, 0, "root")
tdSql.checkData(0, 1, "super")
# root用户权限
# 创建用户测试
tdLog.printNoPrefix("==========step1: create user test")
self.users_count = 5
self.test_user_create()
# 查看用户
tdLog.printNoPrefix("==========step2: show user test")
tdSql.query("show users")
tdSql.checkRows(self.users_count + 2)
# 密码登录认证
self.login_currrent(self.__user_list[0], self.__passwd_list[0])
self.login_err(self.__user_list[0], f"new{self.__passwd_list[0]}")
# 修改密码
tdLog.printNoPrefix("==========step3: alter user pass test")
self.test_alter_pass()
# 密码修改后的登录认证
tdLog.printNoPrefix("==========step4: check login test")
self.login_err(self.__user_list[0], self.__passwd_list[0])
self.login_currrent(self.__user_list[0], f"new{self.__passwd_list[0]}")
# 普通用户权限
# 密码登录
_, user = self.user_login(self.__user_list[0], f"new{self.__passwd_list[0]}")
with taos_connect(user=self.__user_list[0], passwd=f"new{self.__passwd_list[0]}") as conn:
user = conn
# 不能创建用户
tdLog.printNoPrefix("==========step5: normal user can not create user")
user.error("create use utest1 pass 'utest1pass'")
# 可以查看用户
tdLog.printNoPrefix("==========step6: normal user can show user")
user.query("show users")
assert user.queryRows == self.users_count + 2
# 不可以修改其他用户的密码
tdLog.printNoPrefix("==========step7: normal user can not alter other user pass")
user.error(self.__alter_pass_sql(self.__user_list[1], self.__passwd_list[1] ))
user.error("root", "taosdata_root")
# 可以修改自己的密码
tdLog.printNoPrefix("==========step8: normal user can alter owner pass")
user.query(self.__alter_pass_sql(self.__user_list[0], self.__passwd_list[0]))
# 不可以删除用户,包括自己
tdLog.printNoPrefix("==========step9: normal user can not drop any user ")
user.error(f"drop user {self.__user_list[0]}")
user.error(f"drop user {self.__user_list[1]}")
user.error("drop user root")
# root删除用户测试
tdLog.printNoPrefix("==========step10: super user drop normal user")
self.test_drop_user()
tdSql.query("show users")
tdSql.checkRows(1)
tdSql.checkData(0, 0, "root")
tdSql.checkData(0, 1, "super")
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -346,8 +346,10 @@ class TDTestCase:
return
def test_case3(self):
self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 1, 8, 1*10000)
# self.taosBenchCreate("test209","no","db2", "stb2", 1, 8, 1*10000)
# self.taosBenchCreate("chenhaoran02","no","db1", "stb1", 1, 8, 1*10000)
self.taosBenchCreate("chenhaoran02","no","db1", "stb1", 1, 8, 1*1000)
# self.taosBenchCreate("db1", "stb1", 4, 5, 100*10000)
# self.taosBenchCreate("db1", "stb1", 1, 5, 100*10000)

View File

@ -29,8 +29,8 @@
"batch_create_tbl_num": 50000,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 0,
"interlace_rows": 0,
"insert_rows": 10,
"interlace_rows": 100000,
"insert_interval": 0,
"max_sql_len": 10000000,
"disorder_ratio": 0,

View File

@ -45,16 +45,16 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:query timestamp type")
# tdSql.query("select * from t1 where ts between now()-1m and now()+10m")
# tdSql.checkRows(10)
# tdSql.query("select * from t1 where ts between '2021-01-01 00:00:00.000' and '2121-01-01 00:00:00.000'")
tdSql.query("select * from t1 where ts between now()-1m and now()+10m")
tdSql.checkRows(10)
tdSql.query("select * from t1 where ts between '2021-01-01 00:00:00.000' and '2121-01-01 00:00:00.000'")
# tdSql.checkRows(11)
# tdSql.query("select * from t1 where ts between '1969-01-01 00:00:00.000' and '1969-12-31 23:59:59.999'")
tdSql.query("select * from t1 where ts between '1969-01-01 00:00:00.000' and '1969-12-31 23:59:59.999'")
# tdSql.checkRows(0)
# tdSql.query("select * from t1 where ts between -2793600 and 31507199")
# tdSql.checkRows(0)
# tdSql.query("select * from t1 where ts between 1609430400000 and 4765104000000")
# tdSql.checkRows(11)
tdSql.query("select * from t1 where ts between -2793600 and 31507199")
tdSql.checkRows(0)
tdSql.query("select * from t1 where ts between 1609430400000 and 4765104000000")
tdSql.checkRows(11)
tdLog.printNoPrefix("==========step4:query int type")
@ -68,11 +68,11 @@ class TDTestCase:
tdSql.checkRows(0)
# tdSql.query("select * from t1 where c1 between 0x64 and 0x69")
# tdSql.checkRows(6)
# tdSql.query("select * from t1 where c1 not between 100 and 106")
# tdSql.checkRows(11)
tdSql.query("select * from t1 where c1 not between 100 and 106")
tdSql.checkRows(11)
tdSql.query(f"select * from t1 where c1 between {2**31-2} and {2**31+1}")
tdSql.checkRows(1)
tdSql.error(f"select * from t2 where c1 between null and {1-2**31}")
tdSql.query(f"select * from t2 where c1 between null and {1-2**31}")
# tdSql.checkRows(3)
tdSql.query(f"select * from t2 where c1 between {-2**31} and {1-2**31}")
tdSql.checkRows(1)
@ -88,12 +88,12 @@ class TDTestCase:
tdSql.query("select * from t1 where c2 between 'DC3' and 'SYN'")
tdSql.checkRows(0)
tdSql.query("select * from t1 where c2 not between 0.1 and 0.2")
# tdSql.checkRows(11)
tdSql.checkRows(11)
tdSql.query(f"select * from t1 where c2 between {pow(10,38)*3.4} and {pow(10,38)*3.4+1}")
# tdSql.checkRows(1)
tdSql.query(f"select * from t2 where c2 between {-3.4*10**38-1} and {-3.4*10**38}")
# tdSql.checkRows(2)
tdSql.error(f"select * from t2 where c2 between null and {-3.4*10**38}")
tdSql.query(f"select * from t2 where c2 between null and {-3.4*10**38}")
# tdSql.checkRows(3)
tdLog.printNoPrefix("==========step6:query bigint type")
@ -101,7 +101,7 @@ class TDTestCase:
tdSql.query(f"select * from t1 where c3 between {2**31} and {2**31+10}")
tdSql.checkRows(10)
tdSql.query(f"select * from t1 where c3 between {-2**63} and {2**63}")
# tdSql.checkRows(11)
tdSql.checkRows(11)
tdSql.query(f"select * from t1 where c3 between {2**31+10} and {2**31}")
tdSql.checkRows(0)
tdSql.query("select * from t1 where c3 between 'a' and 'z'")
@ -112,7 +112,7 @@ class TDTestCase:
tdSql.checkRows(1)
tdSql.query(f"select * from t2 where c3 between {-2**63} and {1-2**63}")
# tdSql.checkRows(3)
tdSql.error(f"select * from t2 where c3 between null and {1-2**63}")
tdSql.query(f"select * from t2 where c3 between null and {1-2**63}")
# tdSql.checkRows(2)
tdLog.printNoPrefix("==========step7:query double type")
@ -129,10 +129,10 @@ class TDTestCase:
tdSql.query("select * from t1 where c4 not between 1 and 2")
# tdSql.checkRows(0)
tdSql.query(f"select * from t1 where c4 between {1.7*10**308} and {1.7*10**308+1}")
# tdSql.checkRows(1)
tdSql.checkRows(1)
tdSql.query(f"select * from t2 where c4 between {-1.7*10**308-1} and {-1.7*10**308}")
# tdSql.checkRows(3)
tdSql.error(f"select * from t2 where c4 between null and {-1.7*10**308}")
tdSql.query(f"select * from t2 where c4 between null and {-1.7*10**308}")
# tdSql.checkRows(3)
tdLog.printNoPrefix("==========step8:query smallint type")
@ -151,7 +151,7 @@ class TDTestCase:
tdSql.checkRows(1)
tdSql.query("select * from t2 where c5 between -32768 and -32767")
tdSql.checkRows(1)
tdSql.error("select * from t2 where c5 between null and -32767")
tdSql.query("select * from t2 where c5 between null and -32767")
# tdSql.checkRows(1)
tdLog.printNoPrefix("==========step9:query tinyint type")
@ -170,21 +170,21 @@ class TDTestCase:
tdSql.checkRows(1)
tdSql.query("select * from t2 where c6 between -128 and -127")
tdSql.checkRows(1)
tdSql.error("select * from t2 where c6 between null and -127")
tdSql.query("select * from t2 where c6 between null and -127")
# tdSql.checkRows(3)
tdLog.printNoPrefix("==========step10:invalid query type")
# tdSql.query("select * from supt where location between 'beijing' and 'shanghai'")
# tdSql.checkRows(23)
# # 非0值均解析为1因此"between 负值 and o"解析为"between 1 and 0"
# tdSql.query("select * from supt where isused between 0 and 1")
# tdSql.checkRows(23)
# tdSql.query("select * from supt where isused between -1 and 0")
# tdSql.checkRows(0)
# tdSql.error("select * from supt where isused between false and true")
# tdSql.query("select * from supt where family between '拖拉机' and '自行车'")
# tdSql.checkRows(23)
tdSql.query("select * from supt where location between 'beijing' and 'shanghai'")
tdSql.checkRows(23)
# 非0值均解析为1因此"between 负值 and o"解析为"between 1 and 0"
tdSql.query("select * from supt where isused between 0 and 1")
tdSql.checkRows(23)
tdSql.query("select * from supt where isused between -1 and 0")
tdSql.checkRows(0)
tdSql.error("select * from supt where isused between false and true")
tdSql.query("select * from supt where family between '拖拉机' and '自行车'")
tdSql.checkRows(23)
tdLog.printNoPrefix("==========step11:query HEX/OCT/BIN type")

View File

@ -93,104 +93,124 @@ class TDTestCase:
res = tdSql.query(query_sql.replace('*', 'last(*)'), True)
return int(res[0][-4])
def queryTsCol(self, tb_name):
def queryTsCol(self, tb_name, check_elm=None):
select_elm = "*" if check_elm is None else check_elm
# ts and ts
query_sql = f'select * from {tb_name} where ts > "2021-01-11 12:00:00" or ts < "2021-01-13 12:00:00"'
query_sql = f'select {select_elm} from {tb_name} where ts > "2021-01-11 12:00:00" or ts < "2021-01-13 12:00:00"'
tdSql.query(query_sql)
tdSql.checkRows(11)
tdSql.checkEqual(self.queryLastC10(query_sql), 11)
tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and ts <= "2021-01-13 12:00:00"'
query_sql = f'select {select_elm} from {tb_name} where ts >= "2021-01-11 12:00:00" and ts <= "2021-01-13 12:00:00"'
tdSql.query(query_sql)
# tdSql.checkRows(2)
# tdSql.checkEqual(self.queryLastC10(query_sql), 6)
tdSql.checkRows(2)
tdSql.checkEqual(self.queryLastC10(query_sql), 6) if select_elm == "*" else False
## ts or and tinyint col
query_sql = f'select * from {tb_name} where ts > "2021-01-11 12:00:00" or c1 = 2'
tdSql.error(query_sql)
query_sql = f'select {select_elm} from {tb_name} where ts > "2021-01-11 12:00:00" or c1 = 2'
tdSql.query(query_sql)
tdSql.checkRows(7)
tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c1 != 2'
query_sql = f'select {select_elm} from {tb_name} where ts <= "2021-01-11 12:00:00" and c1 != 2'
tdSql.query(query_sql)
tdSql.checkRows(4)
tdSql.checkEqual(self.queryLastC10(query_sql), 5)
tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False
## ts or and smallint col
query_sql = f'select * from {tb_name} where ts <> "2021-01-11 12:00:00" or c2 = 10'
tdSql.error(query_sql)
query_sql = f'select {select_elm} from {tb_name} where ts <> "2021-01-11 12:00:00" or c2 = 10'
tdSql.query(query_sql)
tdSql.checkRows(10)
tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c2 <= 1'
query_sql = f'select {select_elm} from {tb_name} where ts <= "2021-01-11 12:00:00" and c2 <= 1'
tdSql.query(query_sql)
tdSql.checkRows(1)
tdSql.checkEqual(self.queryLastC10(query_sql), 1)
tdSql.checkEqual(self.queryLastC10(query_sql), 1) if select_elm == "*" else False
## ts or and int col
query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" or c3 = 4'
tdSql.error(query_sql)
query_sql = f'select {select_elm} from {tb_name} where ts >= "2021-01-11 12:00:00" or c3 = 4'
tdSql.query(query_sql)
tdSql.checkRows(8)
tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and c3 = 4'
query_sql = f'select {select_elm} from {tb_name} where ts < "2021-01-11 12:00:00" and c3 = 4'
tdSql.query(query_sql)
tdSql.checkRows(1)
tdSql.checkEqual(self.queryLastC10(query_sql), 4)
tdSql.checkEqual(self.queryLastC10(query_sql), 4) if select_elm == "*" else False
## ts or and big col
query_sql = f'select * from {tb_name} where ts is Null or c4 = 5'
tdSql.error(query_sql)
query_sql = f'select * from {tb_name} where ts is not Null and c4 = 2'
query_sql = f'select {select_elm} from {tb_name} where ts is Null or c4 = 5'
tdSql.query(query_sql)
tdSql.checkRows(1)
tdSql.checkEqual(self.queryLastC10(query_sql), 3)
tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False
query_sql = f'select {select_elm} from {tb_name} where ts is not Null and c4 = 2'
tdSql.query(query_sql)
tdSql.checkRows(1)
tdSql.checkEqual(self.queryLastC10(query_sql), 3) if select_elm == "*" else False
## ts or and float col
query_sql = f'select * from {tb_name} where ts between "2021-01-17 12:00:00" and "2021-01-23 12:00:00" or c5 = 6.6'
tdSql.error(query_sql)
query_sql = f'select {select_elm} from {tb_name} where ts between "2021-01-17 12:00:00" and "2021-01-23 12:00:00" or c5 = 6.6'
tdSql.query(query_sql)
tdSql.checkRows(5)
tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and c5 = 1.1'
query_sql = f'select {select_elm} from {tb_name} where ts < "2021-01-11 12:00:00" and c5 = 1.1'
tdSql.query(query_sql)
tdSql.checkRows(4)
tdSql.checkEqual(self.queryLastC10(query_sql), 4)
tdSql.checkEqual(self.queryLastC10(query_sql), 4) if select_elm == "*" else False
## ts or and double col
query_sql = f'select * from {tb_name} where ts between "2021-01-17 12:00:00" and "2021-01-23 12:00:00" or c6 = 7.7'
tdSql.error(query_sql)
query_sql = f'select {select_elm} from {tb_name} where ts between "2021-01-17 12:00:00" and "2021-01-23 12:00:00" or c6 = 7.7'
tdSql.query(query_sql)
tdSql.checkRows(5)
tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and c6 = 1.1'
query_sql = f'select {select_elm} from {tb_name} where ts < "2021-01-11 12:00:00" and c6 = 1.1'
tdSql.query(query_sql)
tdSql.checkRows(4)
tdSql.checkEqual(self.queryLastC10(query_sql), 4)
tdSql.checkEqual(self.queryLastC10(query_sql), 4) if select_elm == "*" else False
## ts or and binary col
query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" or c7 like "binary_"'
tdSql.error(query_sql)
query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c7 in ("binary")'
query_sql = f'select {select_elm} from {tb_name} where ts < "2021-01-11 12:00:00" or c7 like "binary_"'
tdSql.query(query_sql)
tdSql.checkRows(5)
tdSql.checkEqual(self.queryLastC10(query_sql), 5)
tdSql.checkEqual(self.queryLastC10(query_sql), 8) if select_elm == "*" else False
query_sql = f'select {select_elm} from {tb_name} where ts <= "2021-01-11 12:00:00" and c7 in ("binary")'
tdSql.query(query_sql)
tdSql.checkRows(5)
tdSql.checkEqual(self.queryLastC10(query_sql), 5) if select_elm == "*" else False
## ts or and nchar col
query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" or c8 like "nchar%"'
tdSql.error(query_sql)
query_sql = f'select {select_elm} from {tb_name} where ts < "2021-01-11 12:00:00" or c8 like "nchar%"'
tdSql.query(query_sql)
tdSql.checkRows(10)
tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False
query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and c8 is Null'
query_sql = f'select {select_elm} from {tb_name} where ts >= "2021-01-11 12:00:00" and c8 is Null'
tdSql.query(query_sql)
tdSql.checkRows(1)
tdSql.checkEqual(self.queryLastC10(query_sql), 11)
tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
## ts or and bool col
query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" or c9=false'
tdSql.error(query_sql)
query_sql = f'select {select_elm} from {tb_name} where ts < "2021-01-11 12:00:00" or c9=false'
tdSql.query(query_sql)
tdSql.checkRows(6)
tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and c9=true'
query_sql = f'select {select_elm} from {tb_name} where ts >= "2021-01-11 12:00:00" and c9=true'
tdSql.query(query_sql)
tdSql.checkRows(5)
tdSql.checkEqual(self.queryLastC10(query_sql), 9)
tdSql.checkEqual(self.queryLastC10(query_sql), 9) if select_elm == "*" else False
## multi cols
query_sql = f'select * from {tb_name} where ts > "2021-01-03 12:00:00" and c1 != 2 and c2 >= 2 and c3 <> 4 and c4 < 4 and c5 > 1 and c6 >= 1.1 and c7 is not Null and c8 = "nchar" and c9=false'
query_sql = f'select {select_elm} from {tb_name} where ts > "2021-01-03 12:00:00" and c1 != 2 and c2 >= 2 and c3 <> 4 and c4 < 4 and c5 > 1 and c6 >= 1.1 and c7 is not Null and c8 = "nchar" and c9=false'
tdSql.query(query_sql)
tdSql.checkRows(1)
tdSql.checkEqual(self.queryLastC10(query_sql), 10)
tdSql.checkEqual(self.queryLastC10(query_sql), 10) if select_elm == "*" else False
def queryTsTag(self, tb_name):
## ts and tinyint col
@ -2029,12 +2049,12 @@ class TDTestCase:
tb_name = self.initStb()
self.queryFullTagType(tb_name)
def checkTbTsCol(self):
def checkTbTsCol(self, check_elm):
'''
Ordinary table ts and col check
'''
tb_name = self.initTb()
self.queryTsCol(tb_name)
self.queryTsCol(tb_name, check_elm)
def checkStbTsTol(self):
tb_name = self.initStb()
@ -2112,8 +2132,8 @@ class TDTestCase:
for check_elm in [None, column_name]:
self.checkTbColTypeOperator(check_elm)
self.checkStbColTypeOperator(check_elm)
self.checkTbTsCol(check_elm)
# self.checkStbTagTypeOperator()
# self.checkTbTsCol()
# self.checkStbTsTol()
# self.checkStbTsTag()
# self.checkStbTsColTag()

View File

@ -13,14 +13,12 @@ from util.dnodes import *
class TDTestCase:
hostname = socket.gethostname()
rpcDebugFlagVal = '143'
clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal
print ("===================: ", updatecfgDict)
#rpcDebugFlagVal = '143'
#clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
#clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
#updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
#updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
@ -43,27 +41,35 @@ class TDTestCase:
break
return buildPath
def create_tables(self,dbName,vgroups,stbName,ctbNum,rowsPerTbl):
tdSql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tdSql.execute("use %s" %dbName)
tdSql.execute("create table %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
def newcur(self,cfg,host,port):
user = "root"
password = "taosdata"
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
cur=con.cursor()
print(cur)
return cur
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl):
tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("use %s" %dbName)
tsql.execute("create table %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
pre_create = "create table"
sql = pre_create
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
for i in range(ctbNum):
sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1)
if (i > 0) and (i%100 == 0):
tdSql.execute(sql)
tsql.execute(sql)
sql = pre_create
if sql != pre_create:
tdSql.execute(sql)
tsql.execute(sql)
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
return
def insert_data(self,dbName,stbName,ctbNum,rowsPerTbl,startTs):
def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs):
tdLog.debug("start to insert data ............")
tdSql.execute("use %s" %dbName)
tsql.execute("use %s" %dbName)
pre_insert = "insert into "
sql = pre_insert
@ -72,33 +78,389 @@ class TDTestCase:
sql += " %s_%d values "%(stbName,i)
for j in range(rowsPerTbl):
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
if (j > 0) and (j%2000 == 0):
tdSql.execute(sql)
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
tsql.execute(sql)
if j < rowsPerTbl - 1:
sql = "insert into %s_%d values " %(stbName,i)
else:
sql = "insert into "
#end sql
if sql != pre_insert:
# print(sql)
print("sql:%s"%sql)
tdSql.execute(sql)
#print("insert sql:%s"%sql)
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
def prepareEnv(self, **parameterDict):
print ("input parameters:")
print (parameterDict)
self.create_tables(parameterDict["dbName"],\
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
self.create_tables(tsql,\
parameterDict["dbName"],\
parameterDict["vgroups"],\
parameterDict["stbName"],\
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"])
self.insert_data(parameterDict["dbName"],\
self.insert_data(tsql,\
parameterDict["dbName"],\
parameterDict["stbName"],\
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"],\
parameterDict["startTs"])
return
def tmqCase1(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 1: Produce while consume")
tdLog.info("step 1: create database, stb, ctb and insert data")
# create and start thread
parameterDict = {'cfg': '', \
'dbName': 'db', \
'vgroups': 1, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 1000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
time.sleep(2)
# wait stb ready
while 1:
tdSql.query("show %s.stables"%parameterDict['dbName'])
if tdSql.getRows() == 1:
break
else:
time.sleep(1)
tdLog.info("create topics from super table")
topicFromStb = 'topic_stb_column'
topicFromCtb = 'topic_ctb_column'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName']))
time.sleep(1)
tdSql.query("show topics")
#tdSql.checkRows(2)
topic1 = tdSql.getData(0 , 0)
topic2 = tdSql.getData(1 , 0)
tdLog.info("show topics: %s, %s"%(topic1, topic2))
if topic1 != topicFromStb and topic1 != topicFromCtb:
tdLog.exit("topic error1")
if topic2 != topicFromStb and topic2 != topicFromCtb:
tdLog.exit("topic error2")
tdLog.info("create consume info table and consume result table")
cdbName = parameterDict["dbName"]
tdSql.query("create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)")
tdSql.query("create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)")
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
topicList = topicFromStb
ifcheckdata = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:false,\
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
sql = "insert into consumeinfo values "
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
tdSql.query(sql)
tdLog.info("check stb if there are data")
while 1:
tdSql.query("select count(*) from %s"%parameterDict["stbName"])
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
countOfStb = tdSql.getData(0, 0)
if countOfStb != 0:
tdLog.info("count from stb: %d"%countOfStb)
break
else:
time.sleep(1)
tdLog.info("start consume processor")
pollDelay = 5
showMsg = 1
showRow = 1
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
# wait for data ready
prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
while 1:
tdSql.query("select * from consumeresult")
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
if tdSql.getRows() == 1:
break
else:
time.sleep(5)
tdLog.info("consumer result: %d, %d"%(tdSql.getData(0 , 2), tdSql.getData(0 , 3)))
tdSql.checkData(0 , 1, consumerId)
# mulit rows and mulit tables in one sql, this num of msg is not sure
#tdSql.checkData(0 , 2, expectmsgcnt)
tdSql.checkData(0 , 3, expectrowcnt)
tdSql.query("drop topic %s"%topicFromStb)
tdSql.query("drop topic %s"%topicFromCtb)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 2: add child table with consuming ")
# create and start thread
parameterDict = {'cfg': '', \
'dbName': 'db2', \
'vgroups': 1, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 10000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
# wait db ready
while 1:
tdSql.query("show databases")
if tdSql.getRows() == 4:
print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0),)
break
else:
time.sleep(1)
tdSql.query("use %s"%parameterDict['dbName'])
# wait stb ready
while 1:
tdSql.query("show %s.stables"%parameterDict['dbName'])
if tdSql.getRows() == 1:
break
else:
time.sleep(1)
tdLog.info("create topics from super table")
topicFromStb = 'topic_stb_column2'
topicFromCtb = 'topic_ctb_column2'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName']))
time.sleep(1)
tdSql.query("show topics")
topic1 = tdSql.getData(0 , 0)
topic2 = tdSql.getData(1 , 0)
tdLog.info("show topics: %s, %s"%(topic1, topic2))
if topic1 != topicFromStb and topic1 != topicFromCtb:
tdLog.exit("topic error1")
if topic2 != topicFromStb and topic2 != topicFromCtb:
tdLog.exit("topic error2")
tdLog.info("create consume info table and consume result table")
cdbName = parameterDict["dbName"]
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
rowsOfNewCtb = 1000
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + rowsOfNewCtb
topicList = topicFromStb
ifcheckdata = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:false,\
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
sql = "insert into consumeinfo values "
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
tdSql.query(sql)
tdLog.info("check stb if there are data")
while 1:
tdSql.query("select count(*) from %s"%parameterDict["stbName"])
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
countOfStb = tdSql.getData(0, 0)
if countOfStb != 0:
tdLog.info("count from stb: %d"%countOfStb)
break
else:
time.sleep(1)
tdLog.info("start consume processor")
pollDelay = 5
showMsg = 1
showRow = 1
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
# create new child table and insert data
newCtbName = 'newctb'
tdSql.query("create table %s.%s using %s.%s tags(9999)"%(parameterDict["dbName"], newCtbName, parameterDict["dbName"], parameterDict["stbName"]))
startTs = parameterDict["startTs"]
for j in range(rowsOfNewCtb):
sql = "insert into %s.%s values (%d, %d, 'tmqrow_%d') "%(parameterDict["dbName"], newCtbName, startTs + j, j, j)
tdSql.execute(sql)
tdLog.debug("insert data into new child table ............ [OK]")
# wait for data ready
prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
while 1:
tdSql.query("select * from consumeresult")
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
if tdSql.getRows() == 1:
break
else:
time.sleep(5)
tdSql.checkData(0 , 1, consumerId)
tdSql.checkData(0 , 3, expectrowcnt)
tdSql.query("drop topic %s"%topicFromStb)
tdSql.query("drop topic %s"%topicFromCtb)
tdLog.printNoPrefix("======== test case 2 end ...... ")
def tmqCase3(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 3: tow topics, each contains a stable, \
but at the beginning, no ctables in the stable of one topic,\
after starting consumer, create ctables ")
# create and start thread
parameterDict = {'cfg': '', \
'dbName': 'db2', \
'vgroups': 1, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 10000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
# wait db ready
while 1:
tdSql.query("show databases")
if tdSql.getRows() == 4:
print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0),)
break
else:
time.sleep(1)
tdSql.query("use %s"%parameterDict['dbName'])
# wait stb ready
while 1:
tdSql.query("show %s.stables"%parameterDict['dbName'])
if tdSql.getRows() == 1:
break
else:
time.sleep(1)
tdLog.info("create topics from super table")
topicFromStb = 'topic_stb_column2'
topicFromCtb = 'topic_ctb_column2'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName']))
time.sleep(1)
tdSql.query("show topics")
topic1 = tdSql.getData(0 , 0)
topic2 = tdSql.getData(1 , 0)
tdLog.info("show topics: %s, %s"%(topic1, topic2))
if topic1 != topicFromStb and topic1 != topicFromCtb:
tdLog.exit("topic error1")
if topic2 != topicFromStb and topic2 != topicFromCtb:
tdLog.exit("topic error2")
tdLog.info("create consume info table and consume result table")
cdbName = parameterDict["dbName"]
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
rowsOfNewCtb = 1000
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + rowsOfNewCtb
topicList = topicFromStb
ifcheckdata = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:false,\
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
sql = "insert into consumeinfo values "
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
tdSql.query(sql)
tdLog.info("check stb if there are data")
while 1:
tdSql.query("select count(*) from %s"%parameterDict["stbName"])
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
countOfStb = tdSql.getData(0, 0)
if countOfStb != 0:
tdLog.info("count from stb: %d"%countOfStb)
break
else:
time.sleep(1)
tdLog.info("start consume processor")
pollDelay = 5
showMsg = 1
showRow = 1
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
# create new child table and insert data
newCtbName = 'newctb'
tdSql.query("create table %s.%s using %s.%s tags(9999)"%(parameterDict["dbName"], newCtbName, parameterDict["dbName"], parameterDict["stbName"]))
startTs = parameterDict["startTs"]
for j in range(rowsOfNewCtb):
sql = "insert into %s.%s values (%d, %d, 'tmqrow_%d') "%(parameterDict["dbName"], newCtbName, startTs + j, j, j)
tdSql.execute(sql)
tdLog.debug("insert data into new child table ............ [OK]")
# wait for data ready
prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
while 1:
tdSql.query("select * from consumeresult")
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
if tdSql.getRows() == 1:
break
else:
time.sleep(5)
tdSql.checkData(0 , 1, consumerId)
tdSql.checkData(0 , 3, expectrowcnt)
tdSql.query("drop topic %s"%topicFromStb)
tdSql.query("drop topic %s"%topicFromCtb)
tdLog.printNoPrefix("======== test case 3 end ...... ")
def run(self):
tdSql.prepare()
@ -110,28 +472,9 @@ class TDTestCase:
cfgPath = buildPath + "/../sim/psim/cfg"
tdLog.info("cfgPath: %s" % cfgPath)
tdLog.printNoPrefix("======== test scenario 1: ")
tdLog.info("step 1: create database, stb, ctb and insert data")
# create and start thread
parameterDict = {'dbName': 'db', \
'vgroups': 1, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 10, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
# wait for data ready
prepareEnvThread.join()
tdLog.printNoPrefix("======== test scenario 2: ")
tdLog.printNoPrefix("======== test scenario 3: ")
#os.system('pkill tmq_sim')
self.tmqCase1(cfgPath, buildPath)
self.tmqCase2(cfgPath, buildPath)
#self.tmqCase3(cfgPath, buildPath)
def stop(self):
tdSql.close()

View File

@ -0,0 +1,372 @@
import taos
import sys
import time
import socket
import os
import threading
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
class TDTestCase:
hostname = socket.gethostname()
#rpcDebugFlagVal = '143'
#clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
#clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
#updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
#updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
#tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql) # output sql.txt file
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def newcur(self,cfg,host,port):
user = "root"
password = "taosdata"
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
cur=con.cursor()
print(cur)
return cur
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl):
tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("use %s" %dbName)
tsql.execute("create table %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
pre_create = "create table"
sql = pre_create
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
for i in range(ctbNum):
sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1)
if (i > 0) and (i%100 == 0):
tsql.execute(sql)
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
return
def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs):
tdLog.debug("start to insert data ............")
tsql.execute("use %s" %dbName)
pre_insert = "insert into "
sql = pre_insert
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
for i in range(ctbNum):
sql += " %s_%d values "%(stbName,i)
for j in range(rowsPerTbl):
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
tsql.execute(sql)
if j < rowsPerTbl - 1:
sql = "insert into %s_%d values " %(stbName,i)
else:
sql = "insert into "
#end sql
if sql != pre_insert:
#print("insert sql:%s"%sql)
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
def prepareEnv(self, **parameterDict):
print ("input parameters:")
print (parameterDict)
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
self.create_tables(tsql,\
parameterDict["dbName"],\
parameterDict["vgroups"],\
parameterDict["stbName"],\
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"])
self.insert_data(tsql,\
parameterDict["dbName"],\
parameterDict["stbName"],\
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"],\
parameterDict["startTs"])
return
def run(self):
tdSql.prepare()
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
cfgPath = buildPath + "/../sim/psim/cfg"
tdLog.info("cfgPath: %s" % cfgPath)
tdLog.printNoPrefix("======== test scenario 1: ")
tdLog.info("step 1: create database, stb, ctb and insert data")
# create and start thread
parameterDict = {'cfg': '', \
'dbName': 'db', \
'vgroups': 1, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 100, \
'batchNum': 10, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
time.sleep(2)
# wait stb ready
while 1:
tdSql.query("show %s.stables"%parameterDict['dbName'])
if tdSql.getRows() == 1:
break
else:
time.sleep(1)
tdLog.info("create topics from super table")
topicFromStb = 'topic_stb_column'
topicFromCtb = 'topic_ctb_column'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName']))
time.sleep(1)
tdSql.query("show topics")
#tdSql.checkRows(2)
topic1 = tdSql.getData(0 , 0)
topic2 = tdSql.getData(1 , 0)
print (topic1)
print (topic2)
print (topicFromStb)
print (topicFromCtb)
#tdLog.info("show topics: %s, %s"%topic1, topic2)
#if topic1 != topicFromStb or topic1 != topicFromCtb:
# tdLog.exit("topic error1")
#if topic2 != topicFromStb or topic2 != topicFromCtb:
# tdLog.exit("topic error2")
tdLog.info("create consume info table and consume result table")
cdbName = parameterDict["dbName"]
tdSql.query("create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)")
tdSql.query("create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)")
consumerId = 0
expectmsgcnt = (parameterDict["rowsPerTbl"] / parameterDict["batchNum"] ) * parameterDict["ctbNum"]
expectmsgcnt1 = expectmsgcnt + parameterDict["ctbNum"]
topicList = topicFromStb
ifcheckdata = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:false,\
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
sql = "insert into consumeinfo values "
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectmsgcnt1, ifcheckdata)
tdSql.query(sql)
tdLog.info("check stb if there are data")
while 1:
tdSql.query("select count(*) from %s"%parameterDict["stbName"])
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
countOfStb = tdSql.getData(0, 0)
if countOfStb != 0:
tdLog.info("count from stb: %d"%countOfStb)
break
else:
time.sleep(1)
tdLog.info("start consume processor")
pollDelay = 5
showMsg = 1
showRow = 1
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
# wait for data ready
prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
while 1:
tdSql.query("select * from consumeresult")
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
if tdSql.getRows() == 1:
break
else:
time.sleep(5)
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
tdSql.checkData(0 , 1, consumerId)
tdSql.checkData(0 , 2, expectmsgcnt)
tdSql.checkData(0 , 3, expectrowcnt)
tdSql.query("drop topic %s"%topicFromStb)
tdSql.query("drop topic %s"%topicFromCtb)
# ==============================================================================
tdLog.printNoPrefix("======== test scenario 2: add child table with consuming ")
tdLog.info(" clean database")
# create and start thread
parameterDict = {'cfg': '', \
'dbName': 'db2', \
'vgroups': 1, \
'stbName': 'stb', \
'ctbNum': 10, \
'rowsPerTbl': 10000, \
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
# wait db ready
while 1:
tdSql.query("show databases")
if tdSql.getRows() == 4:
print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0),)
break
else:
time.sleep(1)
tdSql.query("use %s"%parameterDict['dbName'])
# wait stb ready
while 1:
tdSql.query("show %s.stables"%parameterDict['dbName'])
if tdSql.getRows() == 1:
break
else:
time.sleep(1)
tdLog.info("create topics from super table")
topicFromStb = 'topic_stb_column2'
topicFromCtb = 'topic_ctb_column2'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName']))
time.sleep(1)
tdSql.query("show topics")
topic1 = tdSql.getData(0 , 0)
topic2 = tdSql.getData(1 , 0)
print (topic1)
print (topic2)
print (topicFromStb)
print (topicFromCtb)
#tdLog.info("show topics: %s, %s"%topic1, topic2)
#if topic1 != topicFromStb or topic1 != topicFromCtb:
# tdLog.exit("topic error1")
#if topic2 != topicFromStb or topic2 != topicFromCtb:
# tdLog.exit("topic error2")
tdLog.info("create consume info table and consume result table")
cdbName = parameterDict["dbName"]
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
consumerId = 0
expectmsgcnt = (parameterDict["rowsPerTbl"] / parameterDict["batchNum"] ) * parameterDict["ctbNum"]
expectmsgcnt1 = expectmsgcnt + parameterDict["ctbNum"]
topicList = topicFromStb
ifcheckdata = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:false,\
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
sql = "insert into consumeinfo values "
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectmsgcnt1, ifcheckdata)
tdSql.query(sql)
tdLog.info("check stb if there are data")
while 1:
tdSql.query("select count(*) from %s"%parameterDict["stbName"])
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
countOfStb = tdSql.getData(0, 0)
if countOfStb != 0:
tdLog.info("count from stb: %d"%countOfStb)
break
else:
time.sleep(1)
tdLog.info("start consume processor")
pollDelay = 5
showMsg = 1
showRow = 1
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
# create new child table and insert data
newCtbName = 'newctb'
rowsOfNewCtb = 1000
tdSql.query("create table %s.%s using %s.%s tags(9999)"%(parameterDict["dbName"], newCtbName, parameterDict["dbName"], parameterDict["stbName"]))
startTs = parameterDict["startTs"]
for j in range(rowsOfNewCtb):
sql = "insert into %s.%s values (%d, %d, 'tmqrow_%d') "%(parameterDict["dbName"], newCtbName, startTs + j, j, j)
tdSql.execute(sql)
tdLog.debug("insert data into new child table ............ [OK]")
# wait for data ready
prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
while 1:
tdSql.query("select * from consumeresult")
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
if tdSql.getRows() == 1:
break
else:
time.sleep(5)
expectmsgcnt += rowsOfNewCtb
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + rowsOfNewCtb
tdSql.checkData(0 , 1, consumerId)
tdSql.checkData(0 , 2, expectmsgcnt)
tdSql.checkData(0 , 3, expectrowcnt)
# ==============================================================================
tdLog.printNoPrefix("======== test scenario 3: ")
#os.system('pkill tmq_sim')
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -51,3 +51,7 @@ python3 ./test.py -f 2-query/arcsin.py
python3 ./test.py -f 2-query/arccos.py
python3 ./test.py -f 2-query/arctan.py
# python3 ./test.py -f 2-query/query_cols_tags_and_or.py
python3 ./test.py -f 7-tmq/basic5.py

Some files were not shown because too many files have changed in this diff Show More