Merge branch '3.0' of https://github.com/taosdata/TDengine into feat/tsdb_snapshot
This commit is contained in:
commit
35fb7bbd31
|
@ -128,10 +128,12 @@ typedef struct SVnodeModifyLogicNode {
|
|||
SVgDataBlocks* pVgDataBlocks;
|
||||
SNode* pAffectedRows; // SColumnNode
|
||||
uint64_t tableId;
|
||||
uint64_t stableId;
|
||||
int8_t tableType; // table type
|
||||
char tableFName[TSDB_TABLE_FNAME_LEN];
|
||||
STimeWindow deleteTimeRange;
|
||||
SVgroupsInfo* pVgroupList;
|
||||
SNodeList* pInsertCols;
|
||||
} SVnodeModifyLogicNode;
|
||||
|
||||
typedef struct SExchangeLogicNode {
|
||||
|
@ -459,7 +461,9 @@ typedef struct SDataInserterNode {
|
|||
|
||||
typedef struct SQueryInserterNode {
|
||||
SDataSinkNode sink;
|
||||
SNodeList* pCols;
|
||||
uint64_t tableId;
|
||||
uint64_t stableId;
|
||||
int8_t tableType; // table type
|
||||
char tableFName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t vgId;
|
||||
|
|
|
@ -215,6 +215,7 @@ int32_t syncProposeBatch(int64_t rid, SRpcMsg* pMsgArr, bool* pIsWeakArr, in
|
|||
bool syncEnvIsStart();
|
||||
const char* syncStr(ESyncState state);
|
||||
bool syncIsRestoreFinish(int64_t rid);
|
||||
int32_t syncGetSnapshotByIndex(int64_t rid, SyncIndex index, SSnapshot* pSnapshot);
|
||||
|
||||
int32_t syncReconfig(int64_t rid, const SSyncCfg* pNewCfg);
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ typedef struct {
|
|||
uint32_t clientIp;
|
||||
uint16_t clientPort;
|
||||
int64_t applyIndex;
|
||||
uint64_t applyTerm;
|
||||
char user[TSDB_USER_LEN];
|
||||
} SRpcConnInfo;
|
||||
|
||||
|
|
|
@ -234,17 +234,14 @@ int32_t vnodeProcessSyncMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
|||
}
|
||||
|
||||
#if 1
|
||||
do {
|
||||
char *syncNodeStr = sync2SimpleStr(pVnode->sync);
|
||||
static int64_t vndTick = 0;
|
||||
if (++vndTick % 10 == 1) {
|
||||
vGTrace("vgId:%d, sync trace msg:%s, %s", syncGetVgId(pVnode->sync), TMSG_INFO(pMsg->msgType), syncNodeStr);
|
||||
}
|
||||
if (gRaftDetailLog) {
|
||||
char logBuf[512] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf), "vnode process syncmsg, msgType:%d, syncNode:%s", pMsg->msgType, syncNodeStr);
|
||||
syncRpcMsgLog2(logBuf, pMsg);
|
||||
}
|
||||
taosMemoryFree(syncNodeStr);
|
||||
} while (0);
|
||||
#endif
|
||||
|
||||
if (syncNodeStrategy(pSyncNode) == SYNC_STRATEGY_NO_SNAPSHOT) {
|
||||
|
@ -297,7 +294,8 @@ int32_t vnodeProcessSyncMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
|||
vGError("vgId:%d, msg:%p failed to process since error msg type:%d", pVnode->config.vgId, pMsg->msgType);
|
||||
code = -1;
|
||||
}
|
||||
} else {
|
||||
|
||||
} else if (syncNodeStrategy(pSyncNode) == SYNC_STRATEGY_WAL_FIRST) {
|
||||
// use wal first strategy
|
||||
if (pMsg->msgType == TDMT_SYNC_TIMEOUT) {
|
||||
SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pMsg);
|
||||
|
@ -404,42 +402,52 @@ static void vnodeSyncReconfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReCon
|
|||
|
||||
static void vnodeSyncCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||
SVnode *pVnode = pFsm->data;
|
||||
SSnapshot snapshot = {0};
|
||||
SyncIndex beginIndex = SYNC_INDEX_INVALID;
|
||||
char logBuf[256] = {0};
|
||||
|
||||
snprintf(logBuf, sizeof(logBuf),
|
||||
"commitCb execute, pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, beginIndex :%ld\n", pFsm,
|
||||
cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state), beginIndex);
|
||||
syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg);
|
||||
vTrace("vgId:%d, commit-cb is excuted, fsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, msgtype:%d %s",
|
||||
syncGetVgId(pVnode->sync), pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state,
|
||||
syncUtilState2String(cbMeta.state), pMsg->msgType, TMSG_INFO(pMsg->msgType));
|
||||
|
||||
SRpcMsg rpcMsg = {.msgType = pMsg->msgType, .contLen = pMsg->contLen};
|
||||
rpcMsg.pCont = rpcMallocCont(rpcMsg.contLen);
|
||||
memcpy(rpcMsg.pCont, pMsg->pCont, pMsg->contLen);
|
||||
syncGetAndDelRespRpc(pVnode->sync, cbMeta.seqNum, &rpcMsg.info);
|
||||
rpcMsg.info.conn.applyIndex = cbMeta.index;
|
||||
rpcMsg.info.conn.applyTerm = cbMeta.term;
|
||||
tmsgPutToQueue(&pVnode->msgCb, APPLY_QUEUE, &rpcMsg);
|
||||
}
|
||||
|
||||
static void vnodeSyncPreCommitMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||
char logBuf[256] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf), "preCommitCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm,
|
||||
cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
|
||||
syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg);
|
||||
SVnode *pVnode = pFsm->data;
|
||||
vTrace("vgId:%d, pre-commit-cb is excuted, fsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, msgtype:%d %s",
|
||||
syncGetVgId(pVnode->sync), pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state,
|
||||
syncUtilState2String(cbMeta.state), pMsg->msgType, TMSG_INFO(pMsg->msgType));
|
||||
}
|
||||
|
||||
static void vnodeSyncRollBackMsg(SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||
char logBuf[256] = {0};
|
||||
snprintf(logBuf, sizeof(logBuf), "rollBackCb== pFsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s \n", pFsm,
|
||||
cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state, syncUtilState2String(cbMeta.state));
|
||||
syncRpcMsgLog2(logBuf, (SRpcMsg *)pMsg);
|
||||
SVnode *pVnode = pFsm->data;
|
||||
vTrace("vgId:%d, rollback-cb is excuted, fsm:%p, index:%ld, isWeak:%d, code:%d, state:%d %s, msgtype:%d %s",
|
||||
syncGetVgId(pVnode->sync), pFsm, cbMeta.index, cbMeta.isWeak, cbMeta.code, cbMeta.state,
|
||||
syncUtilState2String(cbMeta.state), pMsg->msgType, TMSG_INFO(pMsg->msgType));
|
||||
}
|
||||
|
||||
static int32_t vnodeSnapshotStartRead(struct SSyncFSM *pFsm, void *pParam, void **ppReader) { return 0; }
|
||||
static int32_t vnodeSnapshotStartRead(struct SSyncFSM *pFsm, void *pParam, void **ppReader) {
|
||||
SVnode *pVnode = pFsm->data;
|
||||
SSnapshotParam *pSnapshotParam = pParam;
|
||||
int32_t code =
|
||||
vnodeSnapshotReaderOpen(pVnode, (SVSnapshotReader **)ppReader, pSnapshotParam->start, pSnapshotParam->end);
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t vnodeSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) { return 0; }
|
||||
static int32_t vnodeSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) {
|
||||
SVnode *pVnode = pFsm->data;
|
||||
int32_t code = vnodeSnapshotReaderClose(pReader);
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t vnodeSnapshotDoRead(struct SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) { return 0; }
|
||||
static int32_t vnodeSnapshotDoRead(struct SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) {
|
||||
SVnode *pVnode = pFsm->data;
|
||||
int32_t code = vnodeSnapshotRead(pReader, (const void **)ppBuf, len);
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t vnodeSnapshotStartWrite(struct SSyncFSM *pFsm, void *pParam, void **ppWriter) { return 0; }
|
||||
|
||||
|
|
|
@ -394,10 +394,12 @@ static int32_t logicVnodeModifCopy(const SVnodeModifyLogicNode* pSrc, SVnodeModi
|
|||
COPY_SCALAR_FIELD(msgType);
|
||||
CLONE_NODE_FIELD(pAffectedRows);
|
||||
COPY_SCALAR_FIELD(tableId);
|
||||
COPY_SCALAR_FIELD(stableId);
|
||||
COPY_SCALAR_FIELD(tableType);
|
||||
COPY_CHAR_ARRAY_FIELD(tableFName);
|
||||
COPY_OBJECT_FIELD(deleteTimeRange, sizeof(STimeWindow));
|
||||
CLONE_OBJECT_FIELD(pVgroupList, vgroupsInfoClone);
|
||||
CLONE_NODE_LIST_FIELD(pInsertCols);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -2214,6 +2214,8 @@ static int32_t physiDispatchNodeToJson(const void* pObj, SJson* pJson) { return
|
|||
|
||||
static int32_t jsonToPhysiDispatchNode(const SJson* pJson, void* pObj) { return jsonToPhysicDataSinkNode(pJson, pObj); }
|
||||
|
||||
static const char* jkQueryInsertPhysiPlanInsertCols = "InsertCols";
|
||||
static const char* jkQueryInsertPhysiPlanStableId = "StableId";
|
||||
static const char* jkQueryInsertPhysiPlanTableId = "TableId";
|
||||
static const char* jkQueryInsertPhysiPlanTableType = "TableType";
|
||||
static const char* jkQueryInsertPhysiPlanTableFName = "TableFName";
|
||||
|
@ -2224,6 +2226,12 @@ static int32_t physiQueryInsertNodeToJson(const void* pObj, SJson* pJson) {
|
|||
const SQueryInserterNode* pNode = (const SQueryInserterNode*)pObj;
|
||||
|
||||
int32_t code = physicDataSinkNodeToJson(pObj, pJson);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodeListToJson(pJson, jkQueryInsertPhysiPlanInsertCols, pNode->pCols);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkQueryInsertPhysiPlanStableId, pNode->stableId);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkQueryInsertPhysiPlanTableId, pNode->tableId);
|
||||
}
|
||||
|
@ -2247,6 +2255,12 @@ static int32_t jsonToPhysiQueryInsertNode(const SJson* pJson, void* pObj) {
|
|||
SQueryInserterNode* pNode = (SQueryInserterNode*)pObj;
|
||||
|
||||
int32_t code = jsonToPhysicDataSinkNode(pJson, pObj);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = jsonToNodeList(pJson, jkQueryInsertPhysiPlanInsertCols, &pNode->pCols);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetUBigIntValue(pJson, jkQueryInsertPhysiPlanStableId, &pNode->stableId);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetUBigIntValue(pJson, jkQueryInsertPhysiPlanTableId, &pNode->tableId);
|
||||
}
|
||||
|
|
|
@ -1681,5 +1681,10 @@ SNode* createInsertStmt(SAstCreateContext* pCxt, SNode* pTable, SNodeList* pCols
|
|||
pStmt->pTable = pTable;
|
||||
pStmt->pCols = pCols;
|
||||
pStmt->pQuery = pQuery;
|
||||
if (QUERY_NODE_SELECT_STMT == nodeType(pQuery)) {
|
||||
strcpy(((SSelectStmt*)pQuery)->stmtName, ((STableNode*)pTable)->tableAlias);
|
||||
} else if (QUERY_NODE_SET_OPERATOR == nodeType(pQuery)) {
|
||||
strcpy(((SSetOperator*)pQuery)->stmtName, ((STableNode*)pTable)->tableAlias);
|
||||
}
|
||||
return (SNode*)pStmt;
|
||||
}
|
||||
|
|
|
@ -2839,17 +2839,87 @@ static int32_t translateDelete(STranslateContext* pCxt, SDeleteStmt* pDelete) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t translateInsertCols(STranslateContext* pCxt, SInsertStmt* pInsert) {
|
||||
if (NULL == pInsert->pCols) {
|
||||
return createAllColumns(pCxt, false, &pInsert->pCols);
|
||||
}
|
||||
return translateExprList(pCxt, pInsert->pCols);
|
||||
}
|
||||
|
||||
static int32_t translateInsertQuery(STranslateContext* pCxt, SInsertStmt* pInsert) {
|
||||
int32_t code = resetTranslateNamespace(pCxt);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = translateQuery(pCxt, pInsert->pQuery);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t addOrderByPrimaryKeyToQueryImpl(STranslateContext* pCxt, SNode* pPrimaryKeyExpr,
|
||||
SNodeList** pOrderByList) {
|
||||
SOrderByExprNode* pOrderByExpr = (SOrderByExprNode*)nodesMakeNode(QUERY_NODE_ORDER_BY_EXPR);
|
||||
if (NULL == pOrderByExpr) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pOrderByExpr->nullOrder = NULL_ORDER_FIRST;
|
||||
pOrderByExpr->order = ORDER_ASC;
|
||||
pOrderByExpr->pExpr = nodesCloneNode(pPrimaryKeyExpr);
|
||||
if (NULL == pOrderByExpr->pExpr) {
|
||||
nodesDestroyNode((SNode*)pOrderByExpr);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
((SExprNode*)pOrderByExpr->pExpr)->orderAlias = true;
|
||||
NODES_DESTORY_LIST(*pOrderByList);
|
||||
return nodesListMakeStrictAppend(pOrderByList, (SNode*)pOrderByExpr);
|
||||
}
|
||||
|
||||
static int32_t addOrderByPrimaryKeyToQuery(STranslateContext* pCxt, SNode* pPrimaryKeyExpr, SNode* pStmt) {
|
||||
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
|
||||
return addOrderByPrimaryKeyToQueryImpl(pCxt, pPrimaryKeyExpr, &((SSelectStmt*)pStmt)->pOrderByList);
|
||||
}
|
||||
return addOrderByPrimaryKeyToQueryImpl(pCxt, pPrimaryKeyExpr, &((SSetOperator*)pStmt)->pOrderByList);
|
||||
}
|
||||
|
||||
static int32_t translateInsertProject(STranslateContext* pCxt, SInsertStmt* pInsert) {
|
||||
SNodeList* pProjects = getProjectList(pInsert->pQuery);
|
||||
if (LIST_LENGTH(pInsert->pCols) != LIST_LENGTH(pProjects)) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM, "Illegal number of columns");
|
||||
}
|
||||
|
||||
SNode* pPrimaryKeyExpr = NULL;
|
||||
SNode* pBoundCol = NULL;
|
||||
SNode* pProj = NULL;
|
||||
FORBOTH(pBoundCol, pInsert->pCols, pProj, pProjects) {
|
||||
SColumnNode* pCol = (SColumnNode*)pBoundCol;
|
||||
SExprNode* pExpr = (SExprNode*)pProj;
|
||||
if (!dataTypeEqual(&pCol->node.resType, &pExpr->resType)) {
|
||||
SNode* pFunc = NULL;
|
||||
int32_t code = createCastFunc(pCxt, pProj, pCol->node.resType, &pFunc);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
REPLACE_LIST2_NODE(pFunc);
|
||||
pExpr = (SExprNode*)pFunc;
|
||||
}
|
||||
snprintf(pExpr->aliasName, sizeof(pExpr->aliasName), "%s", pCol->colName);
|
||||
if (PRIMARYKEY_TIMESTAMP_COL_ID == pCol->colId) {
|
||||
pPrimaryKeyExpr = pProj;
|
||||
}
|
||||
}
|
||||
|
||||
return addOrderByPrimaryKeyToQuery(pCxt, pPrimaryKeyExpr, pInsert->pQuery);
|
||||
}
|
||||
|
||||
static int32_t translateInsert(STranslateContext* pCxt, SInsertStmt* pInsert) {
|
||||
pCxt->pCurrStmt = (SNode*)pInsert;
|
||||
int32_t code = translateFrom(pCxt, pInsert->pTable);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = translateExprList(pCxt, pInsert->pCols);
|
||||
code = translateInsertCols(pCxt, pInsert);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = resetTranslateNamespace(pCxt);
|
||||
code = translateInsertQuery(pCxt, pInsert);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = translateQuery(pCxt, pInsert->pQuery);
|
||||
code = translateInsertProject(pCxt, pInsert);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -1281,10 +1281,16 @@ static int32_t createVnodeModifLogicNodeByInsert(SLogicPlanContext* pCxt, SInser
|
|||
|
||||
pModify->modifyType = MODIFY_TABLE_TYPE_INSERT;
|
||||
pModify->tableId = pRealTable->pMeta->uid;
|
||||
pModify->stableId = pRealTable->pMeta->suid;
|
||||
pModify->tableType = pRealTable->pMeta->tableType;
|
||||
snprintf(pModify->tableFName, sizeof(pModify->tableFName), "%d.%s.%s", pCxt->pPlanCxt->acctId,
|
||||
pRealTable->table.dbName, pRealTable->table.tableName);
|
||||
TSWAP(pModify->pVgroupList, pRealTable->pVgroupList);
|
||||
pModify->pInsertCols = nodesCloneList(pInsert->pCols);
|
||||
if (NULL == pModify->pInsertCols) {
|
||||
nodesDestroyNode((SNode*)pModify);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
*pLogicNode = (SLogicNode*)pModify;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -1510,19 +1510,22 @@ static int32_t createQueryInserter(SPhysiPlanContext* pCxt, SVnodeModifyLogicNod
|
|||
}
|
||||
|
||||
pInserter->tableId = pModify->tableId;
|
||||
pInserter->stableId = pModify->stableId;
|
||||
pInserter->tableType = pModify->tableType;
|
||||
strcpy(pInserter->tableFName, pModify->tableFName);
|
||||
pInserter->vgId = pModify->pVgroupList->vgroups[0].vgId;
|
||||
pInserter->epSet = pModify->pVgroupList->vgroups[0].epSet;
|
||||
vgroupInfoToNodeAddr(pModify->pVgroupList->vgroups, &pSubplan->execNode);
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
int32_t code = setListSlotId(pCxt, pSubplan->pNode->pOutputDataBlockDesc->dataBlockId, -1, pModify->pInsertCols,
|
||||
&pInserter->pCols);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
pInserter->sink.pInputDataBlockDesc =
|
||||
(SDataBlockDescNode*)nodesCloneNode((SNode*)pSubplan->pNode->pOutputDataBlockDesc);
|
||||
if (NULL == pInserter->sink.pInputDataBlockDesc) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
*pSink = (SDataSinkNode*)pInserter;
|
||||
|
@ -1530,7 +1533,7 @@ static int32_t createQueryInserter(SPhysiPlanContext* pCxt, SVnodeModifyLogicNod
|
|||
nodesDestroyNode((SNode*)pInserter);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t buildInsertSelectSubplan(SPhysiPlanContext* pCxt, SVnodeModifyLogicNode* pModify, SSubplan* pSubplan) {
|
||||
|
|
|
@ -96,4 +96,8 @@ TEST_F(PlanOtherTest, insert) {
|
|||
useDb("root", "test");
|
||||
|
||||
run("INSERT INTO t1 SELECT * FROM t1");
|
||||
|
||||
run("INSERT INTO t1 (ts, c1, c2) SELECT ts, c1, c2 FROM st1");
|
||||
|
||||
run("INSERT INTO t1 (ts, c1, c2) SELECT ts, c1, c2 FROM st1s1 UNION ALL SELECT ts, c1, c2 FROM st2");
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ typedef struct SRaftCfg {
|
|||
TdFilePtr pFile;
|
||||
char path[TSDB_FILENAME_LEN * 2];
|
||||
int8_t isStandBy;
|
||||
int32_t batchSize;
|
||||
int8_t snapshotStrategy;
|
||||
SyncIndex lastConfigIndex;
|
||||
|
||||
|
@ -62,6 +63,7 @@ int32_t raftCfgFromStr(const char *s, SRaftCfg *pRaftCfg);
|
|||
|
||||
typedef struct SRaftCfgMeta {
|
||||
int8_t isStandBy;
|
||||
int32_t batchSize;
|
||||
int8_t snapshotStrategy;
|
||||
SyncIndex lastConfigIndex;
|
||||
} SRaftCfgMeta;
|
||||
|
|
|
@ -834,7 +834,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
|
|||
//
|
||||
// operation:
|
||||
// if hasAppendEntries && pMsg->prevLogIndex == ths->commitIndex, append entry
|
||||
// match my-commit-index or my-commit-index + 1
|
||||
// match my-commit-index or my-commit-index + batchSize
|
||||
do {
|
||||
bool condition = (pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) &&
|
||||
(pMsg->prevLogIndex <= ths->commitIndex);
|
||||
|
@ -928,11 +928,13 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
|
|||
bool condition = condition1 || condition2;
|
||||
|
||||
if (condition) {
|
||||
do {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf),
|
||||
"recv sync-append-entries-batch, not match, pre-index:%ld, pre-term:%lu, datalen:%d", pMsg->prevLogIndex,
|
||||
pMsg->prevLogTerm, pMsg->dataLen);
|
||||
"recv sync-append-entries-batch, not match, pre-index:%ld, pre-term:%lu, datalen:%d",
|
||||
pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen);
|
||||
syncNodeEventLog(ths, logBuf);
|
||||
} while (0);
|
||||
|
||||
// prepare response msg
|
||||
SyncAppendEntriesReply* pReply = syncAppendEntriesReplyBuild(ths->vgId);
|
||||
|
|
|
@ -109,19 +109,30 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p
|
|||
}
|
||||
|
||||
// only start once
|
||||
static void syncNodeStartSnapshot(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex, SyncTerm lastApplyTerm,
|
||||
static void syncNodeStartSnapshotOnce(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex, SyncTerm lastApplyTerm,
|
||||
SyncAppendEntriesReply* pMsg) {
|
||||
// get sender
|
||||
SSyncSnapshotSender* pSender = syncNodeGetSnapshotSender(ths, &(pMsg->srcId));
|
||||
ASSERT(pSender != NULL);
|
||||
|
||||
if (snapshotSenderIsStart(pSender)) {
|
||||
do {
|
||||
char* eventLog = snapshotSender2SimpleStr(pSender, "snapshot sender already start");
|
||||
syncNodeErrorLog(ths, eventLog);
|
||||
taosMemoryFree(eventLog);
|
||||
} while (0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
SSnapshot snapshot = {
|
||||
.data = NULL, .lastApplyIndex = endIndex, .lastApplyTerm = lastApplyTerm, .lastConfigIndex = SYNC_INDEX_INVALID};
|
||||
|
||||
void* pReader = NULL;
|
||||
SSnapshotParam readerParam = {.start = beginIndex, .end = endIndex};
|
||||
ths->pFsm->FpSnapshotStartRead(ths->pFsm, &readerParam, &pReader);
|
||||
if (!snapshotSenderIsStart(pSender) && pMsg->privateTerm < pSender->privateTerm) {
|
||||
int32_t code = ths->pFsm->FpSnapshotStartRead(ths->pFsm, &readerParam, &pReader);
|
||||
ASSERT(code == 0);
|
||||
|
||||
if (pMsg->privateTerm < pSender->privateTerm) {
|
||||
ASSERT(pReader != NULL);
|
||||
snapshotSenderStart(pSender, readerParam, snapshot, pReader);
|
||||
|
||||
|
@ -178,7 +189,9 @@ int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntrie
|
|||
// start snapshot <match+1, old snapshot.end>
|
||||
SSnapshot oldSnapshot;
|
||||
ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &oldSnapshot);
|
||||
syncNodeStartSnapshot(ths, newMatchIndex + 1, oldSnapshot.lastApplyIndex, oldSnapshot.lastApplyTerm, pMsg);
|
||||
ASSERT(oldSnapshot.lastApplyIndex >= newMatchIndex + 1);
|
||||
syncNodeStartSnapshotOnce(ths, newMatchIndex + 1, oldSnapshot.lastApplyIndex, oldSnapshot.lastApplyTerm,
|
||||
pMsg); // term maybe not ok?
|
||||
|
||||
syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), oldSnapshot.lastApplyIndex + 1);
|
||||
syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), newMatchIndex);
|
||||
|
@ -187,7 +200,6 @@ int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntrie
|
|||
} else {
|
||||
SyncIndex nextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
|
||||
|
||||
// notice! int64, uint64
|
||||
if (nextIndex > SYNC_INDEX_BEGIN) {
|
||||
--nextIndex;
|
||||
|
||||
|
@ -198,7 +210,7 @@ int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntrie
|
|||
SSyncRaftEntry* pEntry;
|
||||
int32_t code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, nextIndex, &pEntry);
|
||||
ASSERT(code == 0);
|
||||
syncNodeStartSnapshot(ths, SYNC_INDEX_BEGIN, nextIndex, pEntry->term, pMsg);
|
||||
syncNodeStartSnapshotOnce(ths, SYNC_INDEX_BEGIN, nextIndex, pEntry->term, pMsg);
|
||||
|
||||
// get sender
|
||||
SSyncSnapshotSender* pSender = syncNodeGetSnapshotSender(ths, &(pMsg->srcId));
|
||||
|
|
|
@ -397,6 +397,38 @@ bool syncIsRestoreFinish(int64_t rid) {
|
|||
return b;
|
||||
}
|
||||
|
||||
int32_t syncGetSnapshotByIndex(int64_t rid, SyncIndex index, SSnapshot* pSnapshot) {
|
||||
if (index < SYNC_INDEX_BEGIN) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
|
||||
if (pSyncNode == NULL) {
|
||||
return -1;
|
||||
}
|
||||
ASSERT(rid == pSyncNode->rid);
|
||||
|
||||
SSyncRaftEntry* pEntry = NULL;
|
||||
int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, index, &pEntry);
|
||||
if (code != 0) {
|
||||
if (pEntry != NULL) {
|
||||
syncEntryDestory(pEntry);
|
||||
}
|
||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
||||
return -1;
|
||||
}
|
||||
ASSERT(pEntry != NULL);
|
||||
|
||||
pSnapshot->data = NULL;
|
||||
pSnapshot->lastApplyIndex = index;
|
||||
pSnapshot->lastApplyTerm = pEntry->term;
|
||||
pSnapshot->lastConfigIndex = syncNodeGetSnapshotConfigIndex(pSyncNode, index);
|
||||
|
||||
syncEntryDestory(pEntry);
|
||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncGetSnapshotMeta(int64_t rid, struct SSnapshotMeta* sMeta) {
|
||||
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
|
||||
if (pSyncNode == NULL) {
|
||||
|
@ -786,6 +818,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
|
|||
int32_t code = syncNodeOnClientRequestCb(pSyncNode, pSyncMsg, &retIndex);
|
||||
if (code == 0) {
|
||||
pMsg->info.conn.applyIndex = retIndex;
|
||||
pMsg->info.conn.applyTerm = pSyncNode->pRaftStore->currentTerm;
|
||||
rpcFreeCont(rpcMsg.pCont);
|
||||
syncRespMgrDel(pSyncNode->pSyncRespMgr, seqNum);
|
||||
ret = 1;
|
||||
|
@ -846,6 +879,7 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
|
|||
meta.isStandBy = pSyncInfo->isStandBy;
|
||||
meta.snapshotStrategy = pSyncInfo->snapshotStrategy;
|
||||
meta.lastConfigIndex = SYNC_INDEX_INVALID;
|
||||
meta.batchSize = pSyncInfo->batchSize;
|
||||
ret = raftCfgCreateFile((SSyncCfg*)&(pSyncInfo->syncCfg), meta, pSyncNode->configPath);
|
||||
ASSERT(ret == 0);
|
||||
|
||||
|
|
|
@ -183,6 +183,7 @@ cJSON *raftCfg2Json(SRaftCfg *pRaftCfg) {
|
|||
cJSON_AddItemToObject(pRoot, "SSyncCfg", syncCfg2Json(&(pRaftCfg->cfg)));
|
||||
cJSON_AddNumberToObject(pRoot, "isStandBy", pRaftCfg->isStandBy);
|
||||
cJSON_AddNumberToObject(pRoot, "snapshotStrategy", pRaftCfg->snapshotStrategy);
|
||||
cJSON_AddNumberToObject(pRoot, "batchSize", pRaftCfg->batchSize);
|
||||
|
||||
char buf64[128];
|
||||
snprintf(buf64, sizeof(buf64), "%ld", pRaftCfg->lastConfigIndex);
|
||||
|
@ -228,6 +229,7 @@ int32_t raftCfgCreateFile(SSyncCfg *pCfg, SRaftCfgMeta meta, const char *path) {
|
|||
SRaftCfg raftCfg;
|
||||
raftCfg.cfg = *pCfg;
|
||||
raftCfg.isStandBy = meta.isStandBy;
|
||||
raftCfg.batchSize = meta.batchSize;
|
||||
raftCfg.snapshotStrategy = meta.snapshotStrategy;
|
||||
raftCfg.lastConfigIndex = meta.lastConfigIndex;
|
||||
raftCfg.configIndexCount = 1;
|
||||
|
@ -257,6 +259,9 @@ int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg) {
|
|||
cJSON *pJsonIsStandBy = cJSON_GetObjectItem(pJson, "isStandBy");
|
||||
pRaftCfg->isStandBy = cJSON_GetNumberValue(pJsonIsStandBy);
|
||||
|
||||
cJSON *pJsonBatchSize = cJSON_GetObjectItem(pJson, "batchSize");
|
||||
pRaftCfg->batchSize = cJSON_GetNumberValue(pJsonBatchSize);
|
||||
|
||||
cJSON *pJsonSnapshotStrategy = cJSON_GetObjectItem(pJson, "snapshotStrategy");
|
||||
pRaftCfg->snapshotStrategy = cJSON_GetNumberValue(pJsonSnapshotStrategy);
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@ void print(SHashObj *pNextIndex) {
|
|||
printf("----------------\n");
|
||||
uint64_t *p = (uint64_t *)taosHashIterate(pNextIndex, NULL);
|
||||
while (p) {
|
||||
|
||||
size_t len;
|
||||
void * key = taosHashGetKey(p, &len);
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ SRaftCfg* createRaftCfg() {
|
|||
snprintf(((pCfg->cfg.nodeInfo)[i]).nodeFqdn, sizeof(((pCfg->cfg.nodeInfo)[i]).nodeFqdn), "100.200.300.%d", i);
|
||||
}
|
||||
pCfg->isStandBy = taosGetTimestampSec() % 100;
|
||||
pCfg->batchSize = taosGetTimestampSec() % 100;
|
||||
|
||||
pCfg->configIndexCount = 5;
|
||||
for (int i = 0; i < MAX_CONFIG_INDEX_COUNT; ++i) {
|
||||
|
@ -84,6 +85,7 @@ void test3() {
|
|||
SRaftCfgMeta meta;
|
||||
meta.isStandBy = 7;
|
||||
meta.snapshotStrategy = 9;
|
||||
meta.batchSize = 10;
|
||||
meta.lastConfigIndex = 789;
|
||||
raftCfgCreateFile(pCfg, meta, s);
|
||||
printf("%s create json file: %s \n", (char*)__FUNCTION__, s);
|
||||
|
@ -109,6 +111,7 @@ void test5() {
|
|||
pCfg->cfg.myIndex = taosGetTimestampSec();
|
||||
pCfg->isStandBy += 2;
|
||||
pCfg->snapshotStrategy += 3;
|
||||
pCfg->batchSize += 4;
|
||||
pCfg->lastConfigIndex += 1000;
|
||||
|
||||
pCfg->configIndexCount = 5;
|
||||
|
|
|
@ -0,0 +1,545 @@
|
|||
import taos
|
||||
import sys
|
||||
import datetime
|
||||
import inspect
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
import random
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
|
||||
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
|
||||
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143}
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
self.tb_nums = 10
|
||||
self.row_nums = 20
|
||||
self.ts = 1434938400000
|
||||
self.time_step = 1000
|
||||
|
||||
def insert_datas_and_check_abs(self ,tbnums , rownums , time_step ):
|
||||
tdLog.info(" prepare datas for auto check abs function ")
|
||||
|
||||
tdSql.execute(" create database test ")
|
||||
tdSql.execute(" use test ")
|
||||
tdSql.execute(" create stable stb (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\
|
||||
c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int)")
|
||||
for tbnum in range(tbnums):
|
||||
tbname = "sub_tb_%d"%tbnum
|
||||
tdSql.execute(" create table %s using stb tags(%d) "%(tbname , tbnum))
|
||||
|
||||
ts = self.ts
|
||||
for row in range(rownums):
|
||||
ts = self.ts + time_step*row
|
||||
c1 = random.randint(0,10000)
|
||||
c2 = random.randint(0,100000)
|
||||
c3 = random.randint(0,125)
|
||||
c4 = random.randint(0,125)
|
||||
c5 = random.random()/1.0
|
||||
c6 = random.random()/1.0
|
||||
c7 = "'true'"
|
||||
c8 = "'binary_val'"
|
||||
c9 = "'nchar_val'"
|
||||
c10 = ts
|
||||
tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})")
|
||||
|
||||
tdSql.execute("use test")
|
||||
tbnames = ["stb", "sub_tb_1"]
|
||||
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
|
||||
for tbname in tbnames:
|
||||
tdSql.query("desc {}".format(tbname))
|
||||
coltypes = tdSql.queryResult
|
||||
colnames = []
|
||||
for coltype in coltypes:
|
||||
colname = coltype[0]
|
||||
if coltype[1] in support_types:
|
||||
colnames.append(colname)
|
||||
cols = random.sample(colnames,3)
|
||||
self.check_function("&",False,tbname,cols[0],cols[1],cols[2])
|
||||
self.check_function("|",False,tbname,cols[0],cols[1],cols[2])
|
||||
|
||||
|
||||
def prepare_datas(self):
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
"insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute(
|
||||
"insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute(
|
||||
"insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute(
|
||||
"insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute(
|
||||
"insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(
|
||||
"insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(
|
||||
"insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
|
||||
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
|
||||
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
def prepare_tag_datas(self):
|
||||
# prepare datas
|
||||
tdSql.execute(
|
||||
"create database if not exists testdb keep 3650 duration 1000")
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(
|
||||
f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
"insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute(
|
||||
"insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute(
|
||||
"insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute(
|
||||
"insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute(
|
||||
"insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(
|
||||
"insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(
|
||||
"insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
|
||||
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
|
||||
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
def check_result_auto(self, origin_query, abs_query):
|
||||
abs_result = tdSql.getResult(abs_query)
|
||||
origin_result = tdSql.getResult(origin_query)
|
||||
|
||||
auto_result = []
|
||||
|
||||
for row in origin_result:
|
||||
row_check = []
|
||||
for elem in row:
|
||||
if elem == None:
|
||||
elem = None
|
||||
elif elem >= 0:
|
||||
elem = elem
|
||||
else:
|
||||
elem = -elem
|
||||
row_check.append(elem)
|
||||
auto_result.append(row_check)
|
||||
|
||||
check_status = True
|
||||
for row_index, row in enumerate(abs_result):
|
||||
for col_index, elem in enumerate(row):
|
||||
if auto_result[row_index][col_index] != elem:
|
||||
check_status = False
|
||||
if not check_status:
|
||||
tdLog.notice(
|
||||
"abs function value has not as expected , sql is \"%s\" " % abs_query)
|
||||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info(
|
||||
"abs value check pass , it work as expected ,sql is \"%s\" " % abs_query)
|
||||
|
||||
def check_function(self, opera ,agg, tbname , *args):
|
||||
|
||||
if opera =="&":
|
||||
pass
|
||||
elif opera =="|":
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
work_sql = " select "
|
||||
for ind , arg in enumerate(args):
|
||||
if ind ==len(args)-1:
|
||||
work_sql += f"cast({arg} as bigint) "
|
||||
else:
|
||||
work_sql += f"cast({arg} as bigint){opera}"
|
||||
|
||||
if not agg:
|
||||
work_sql+= f" from {tbname} order by ts"
|
||||
else:
|
||||
work_sql+= f" from {tbname} "
|
||||
tdSql.query(work_sql)
|
||||
work_result = tdSql.queryResult
|
||||
|
||||
origin_sql = " select "
|
||||
for ind , arg in enumerate(args):
|
||||
if ind ==len(args)-1:
|
||||
origin_sql += f"cast({arg} as bigint) "
|
||||
else:
|
||||
origin_sql += f"cast({arg} as bigint),"
|
||||
if not agg:
|
||||
origin_sql+= f" from {tbname} order by ts"
|
||||
else:
|
||||
origin_sql+= f" from {tbname} "
|
||||
tdSql.query(origin_sql)
|
||||
origin_result = tdSql.queryResult
|
||||
|
||||
# compute and or with byte in binary data
|
||||
compute_result = []
|
||||
|
||||
for row in origin_result:
|
||||
if None in row:
|
||||
compute_result.append(None)
|
||||
else:
|
||||
if opera == "&":
|
||||
result = row[0]
|
||||
for elem in row:
|
||||
result = result&elem
|
||||
elif opera == "|":
|
||||
result = row[0]
|
||||
for elem in row:
|
||||
result = result|elem
|
||||
compute_result.append(result)
|
||||
|
||||
tdSql.query(work_sql)
|
||||
for ind , result in enumerate(compute_result):
|
||||
tdSql.checkData(ind,0,result)
|
||||
|
||||
def test_errors(self):
|
||||
tdSql.execute("use testdb")
|
||||
error_sql_lists = [
|
||||
"select c1&&c2 from t1",
|
||||
"select c1&|c2 from t1",
|
||||
"select c1&(c1=c2) from t1",
|
||||
"select c1&* from t1",
|
||||
"select 123&, from t1",
|
||||
"select 123&\" from t1",
|
||||
"select c1&- from t1;",
|
||||
"select c1&&= from t1)",
|
||||
"select c1&! from t1",
|
||||
"select c1&@ from stb1",
|
||||
"select c1&# from stb1",
|
||||
"select c1&$ from stb1",
|
||||
"select c1&% from stb1",
|
||||
"select c1&() from stb1",
|
||||
]
|
||||
for error_sql in error_sql_lists:
|
||||
tdSql.error(error_sql)
|
||||
|
||||
def basic_query(self):
|
||||
# basic query
|
||||
tdSql.query("select c1&c2|c3 from ct1")
|
||||
tdSql.checkRows(13)
|
||||
tdSql.query("select c1 ,c2&c3, c1&c2&c3 from t1")
|
||||
tdSql.checkRows(12)
|
||||
tdSql.query("select c1 ,c1&c1&c1|c1 from stb1")
|
||||
tdSql.checkRows(25)
|
||||
|
||||
# used for empty table , ct3 is empty
|
||||
tdSql.query("select abs(c1)&c2&c3 from ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select abs(c2&c1&c3) from ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query("select abs(c3)+c1&c3+c2 from ct3")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select abs(c1)&c2&c3 from ct4")
|
||||
tdSql.checkRows(12)
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(1,0,8)
|
||||
tdSql.checkData(10,0,0)
|
||||
tdSql.query("select abs(c2&c1&c3) from ct4")
|
||||
tdSql.checkRows(12)
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(1,0,8)
|
||||
tdSql.checkData(10,0,0)
|
||||
tdSql.query("select (abs(c3)+c1)&(c3+c2) from ct4")
|
||||
tdSql.checkRows(12)
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(1,0,640)
|
||||
tdSql.checkData(10,0,0)
|
||||
|
||||
# used for regular table
|
||||
tdSql.query("select abs(c1)&c3&c3 from t1")
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(1, 0, 1)
|
||||
tdSql.checkData(3, 0, 1)
|
||||
tdSql.checkData(5, 0, None)
|
||||
|
||||
tdSql.query("select abs(c1)&c2|ceil(c3)&c4|floor(c5) from t1")
|
||||
tdSql.checkData(1, 0, 11)
|
||||
tdSql.checkData(3, 0, 3)
|
||||
tdSql.checkData(5, 0, None)
|
||||
tdSql.query("select ts,c1, c2, c3&c4|c5 from t1")
|
||||
tdSql.checkData(1, 3, 11)
|
||||
tdSql.checkData(3, 3, 3)
|
||||
tdSql.checkData(5, 3, None)
|
||||
|
||||
self.check_function("&",False,"stb1","c1","ceil(c2)","abs(c3)","c4+1")
|
||||
self.check_function("|",False,"stb1","c1","ceil(c2)","abs(c3)","c4+1")
|
||||
self.check_function("&",False,"stb1","c1+c2","ceil(c2)","abs(c3+c2)","c4+1")
|
||||
self.check_function("&",False,"ct4","123","ceil(c2)","abs(c3+c2)","c4+1")
|
||||
self.check_function("&",False,"ct4","123","ceil(t1)","abs(c3+c2)","c4+1")
|
||||
self.check_function("&",False,"ct4","t1+c1","-ceil(t1)","abs(c3+c2)","c4+1")
|
||||
self.check_function("&",False,"stb1","c1","floor(t1)","abs(c1+c2)","t1+1")
|
||||
self.check_function("&",True,"stb1","max(c1)","min(floor(t1))","sum(abs(c1+c2))","last(t1)+1")
|
||||
self.check_function("&",False,"stb1","abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))))","floor(t1)","abs(c1+c2)","t1+1")
|
||||
|
||||
# mix with common col
|
||||
tdSql.query("select c1&abs(c1)&c2&c3 ,c1,c2, t1 from ct1")
|
||||
tdSql.checkData(0, 0, 8)
|
||||
tdSql.checkData(1, 0, 1)
|
||||
tdSql.checkData(4, 0, 0)
|
||||
tdSql.checkData(4, 3, 0)
|
||||
tdSql.checkData(3, 2, 55555)
|
||||
|
||||
|
||||
# mix with common functions
|
||||
tdSql.query(" select c1&abs(c1)&c2&c3, abs(c1), c5, floor(c5) from ct4 ")
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(0, 1, None)
|
||||
tdSql.checkData(0, 2, None)
|
||||
tdSql.checkData(0, 3, None)
|
||||
|
||||
tdSql.checkData(3, 0, 2)
|
||||
tdSql.checkData(3, 1, 6)
|
||||
tdSql.checkData(3, 2, 6.66000)
|
||||
tdSql.checkData(3, 3, 6.00000)
|
||||
|
||||
tdSql.query("select c1&abs(c1)&c2&c3, abs(c1),c5, floor(c5) from stb1 order by ts ")
|
||||
tdSql.checkData(3, 0, 2)
|
||||
tdSql.checkData(3, 1, 6)
|
||||
tdSql.checkData(3, 2, 6.66000)
|
||||
tdSql.checkData(3, 3, 6.00000)
|
||||
|
||||
# mix with agg functions , not support
|
||||
tdSql.error("select c1&abs(c1)&c2&c3, abs(c1),c5, count(c5) from stb1 ")
|
||||
tdSql.error("select c1&abs(c1)&c2&c3, abs(c1),c5, count(c5) from ct1 ")
|
||||
tdSql.error("select c1&abs(c1)&c2&c3, count(c5) from stb1 ")
|
||||
tdSql.error("select c1&abs(c1)&c2&c3, count(c5) from ct1 ")
|
||||
tdSql.error("select c1&abs(c1)&c2&c3, count(c5) from ct1 ")
|
||||
tdSql.error("select c1&abs(c1)&c2&c3, count(c5) from stb1 ")
|
||||
|
||||
# agg functions mix with agg functions
|
||||
|
||||
tdSql.query("select sum(c1&abs(c1)&c2&c3) ,max(c5), count(c5) from stb1")
|
||||
|
||||
tdSql.query("select max(c1)&max(c2)|first(ts), count(c5) from ct1")
|
||||
|
||||
# bug fix for compute
|
||||
tdSql.query("select c1&abs(c1)&c2&c3, abs(c1&abs(c1)&c2&c3) -0 ,ceil(c1&abs(c1)&c2&c3)-0 from ct4 ")
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(0, 1, None)
|
||||
tdSql.checkData(0, 2, None)
|
||||
tdSql.checkData(1, 0, 8)
|
||||
tdSql.checkData(1, 1, 8.000000000)
|
||||
tdSql.checkData(1, 2, 8.000000000)
|
||||
|
||||
tdSql.query(" select c1&c2|c3, abs(c1&c2|c3) -0 ,ceil(c1&c2|c3-0.1)-0.1 from ct4")
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(0, 1, None)
|
||||
tdSql.checkData(0, 2, None)
|
||||
tdSql.checkData(1, 0, 888)
|
||||
tdSql.checkData(1, 1, 888.000000000)
|
||||
tdSql.checkData(1, 2, 894.900000000)
|
||||
|
||||
|
||||
|
||||
|
||||
def check_boundary_values(self):
|
||||
|
||||
tdSql.execute("drop database if exists bound_test")
|
||||
tdSql.execute("create database if not exists bound_test")
|
||||
time.sleep(3)
|
||||
tdSql.execute("use bound_test")
|
||||
tdSql.execute(
|
||||
"create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
|
||||
)
|
||||
tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.error(
|
||||
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_function("&", False , "sub1_bound" ,"c1","c2","c3","c4","c5","c6" )
|
||||
self.check_function("&", False ,"sub1_bound","abs(c1)","abs(c2)","abs(c3)","abs(c4)","abs(c5)","abs(c6)" )
|
||||
self.check_function("&", False ,"stb_bound","123","abs(c2)","t1","abs(c4)","abs(c5)","abs(c6)" )
|
||||
|
||||
# check basic elem for table per row
|
||||
tdSql.query(
|
||||
"select abs(c1) ,abs(c2) , abs(c3) , abs(c4), abs(c5), abs(c6) from sub1_bound ")
|
||||
tdSql.checkData(0, 0, 2147483647)
|
||||
tdSql.checkData(0, 1, 9223372036854775807)
|
||||
tdSql.checkData(0, 2, 32767)
|
||||
tdSql.checkData(0, 3, 127)
|
||||
tdSql.checkData(0, 4, 339999995214436424907732413799364296704.00000)
|
||||
tdSql.checkData(0, 5, 169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000)
|
||||
tdSql.checkData(1, 0, 2147483647)
|
||||
tdSql.checkData(1, 1, 9223372036854775807)
|
||||
tdSql.checkData(1, 2, 32767)
|
||||
tdSql.checkData(1, 3, 127)
|
||||
tdSql.checkData(1, 4, 339999995214436424907732413799364296704.00000)
|
||||
tdSql.checkData(1, 5, 169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000)
|
||||
tdSql.checkData(3, 0, 2147483646)
|
||||
tdSql.checkData(3, 1, 9223372036854775806)
|
||||
tdSql.checkData(3, 2, 32766)
|
||||
tdSql.checkData(3, 3, 126)
|
||||
tdSql.checkData(3, 4, 339999995214436424907732413799364296704.00000)
|
||||
tdSql.checkData(3, 5, 169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000)
|
||||
|
||||
# check + - * / in functions
|
||||
self.check_function("&", False ,"stb_bound","abs(c1+1)","abs(c2)","t1","abs(c3*1)","abs(c5)/2","abs(c6)" )
|
||||
|
||||
tdSql.query(
|
||||
"select abs(c1+1) ,abs(c2) , abs(c3*1) , abs(c4/2), abs(c5)/2, abs(c6) from sub1_bound ")
|
||||
tdSql.checkData(0, 0, 2147483648.000000000)
|
||||
tdSql.checkData(0, 1, 9223372036854775807)
|
||||
tdSql.checkData(0, 2, 32767.000000000)
|
||||
tdSql.checkData(0, 3, 63.500000000)
|
||||
tdSql.checkData(
|
||||
0, 4, 169999997607218212453866206899682148352.000000000)
|
||||
tdSql.checkData(0, 5, 169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000)
|
||||
|
||||
tdSql.checkData(1, 0, 2147483646.000000000)
|
||||
tdSql.checkData(1, 1, 9223372036854775808.000000000)
|
||||
tdSql.checkData(1, 2, 32767.000000000)
|
||||
tdSql.checkData(1, 3, 63.500000000)
|
||||
tdSql.checkData(
|
||||
1, 4, 169999997607218212453866206899682148352.000000000)
|
||||
|
||||
|
||||
def test_tag_compute_for_scalar_function(self):
|
||||
|
||||
tdSql.execute("use testdb")
|
||||
|
||||
self.check_function("&", False ,"ct4","123","abs(c1)","t1","abs(t2)","abs(t3)","abs(t4)","t5")
|
||||
self.check_function("&", False ,"ct4","c1+2","abs(t2+2)","t3","abs(t4)","abs(t5)","abs(c1)","t5")
|
||||
|
||||
tdSql.query(" select sum(c1) from stb1 where t1+10 >1; ")
|
||||
tdSql.query("select c1 ,t1 from stb1 where t1 =0 ")
|
||||
tdSql.checkRows(13)
|
||||
self.check_function("&", False ,"t1","c1+2","abs(c2)")
|
||||
tdSql.query("select t1 from stb1 where t1 >0 ")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select t1 from stb1 where t1 =3 ")
|
||||
tdSql.checkRows(1)
|
||||
# tdSql.query("select sum(t1) from (select c1 ,t1 from stb1)")
|
||||
# tdSql.checkData(0,0,61)
|
||||
# tdSql.query("select distinct(c1) ,t1 from stb1")
|
||||
# tdSql.checkRows(20)
|
||||
tdSql.query("select max(c1) , t1&c2&t2 from stb1;")
|
||||
tdSql.checkData(0,1,0)
|
||||
|
||||
# tag filter with abs function
|
||||
tdSql.query("select t1 from stb1 where abs(t1)=1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select t1 from stb1 where abs(c1+t1)=1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,0)
|
||||
|
||||
tdSql.query(
|
||||
"select abs(c1+t1)*t1 from stb1 where abs(c1)/floor(abs(ceil(t1))) ==1")
|
||||
|
||||
def support_super_table_test(self):
|
||||
tdSql.execute(" use testdb ")
|
||||
self.check_function("|", False , "stb1" , "c1","c2","c3","c4" )
|
||||
self.check_function("|", False , "stb1" , "c1","c2","abs(c3)","c4","ceil(t1)" )
|
||||
self.check_function("&", False , "stb1" , "c1","c2","abs(c3)","floor(c4)","ceil(t1)" )
|
||||
self.check_function("&", True , "stb1" , "max(c1)","max(c2)","sum(abs(c3))","max(floor(c4))","min(ceil(t1))" )
|
||||
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
tdSql.prepare()
|
||||
self.prepare_datas()
|
||||
self.prepare_tag_datas()
|
||||
self.test_errors()
|
||||
self.basic_query()
|
||||
self.check_boundary_values()
|
||||
self.test_tag_compute_for_scalar_function()
|
||||
self.support_super_table_test()
|
||||
self.insert_datas_and_check_abs(self.tb_nums,self.row_nums,self.time_step)
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -420,11 +420,65 @@ class TDTestCase:
|
|||
tdSql.query("select csum(abs(c1))+2 from t1 ")
|
||||
tdSql.checkRows(4)
|
||||
|
||||
def csum_support_stable(self):
|
||||
tdSql.query(" select csum(1) from stb1 ")
|
||||
tdSql.checkRows(70)
|
||||
tdSql.query("select csum(c1) from stb1 partition by tbname ")
|
||||
tdSql.checkRows(40)
|
||||
# tdSql.query("select csum(st1) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(70)
|
||||
tdSql.query("select csum(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(40)
|
||||
tdSql.query("select csum(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(40)
|
||||
tdSql.query("select csum(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(40)
|
||||
|
||||
# # bug need fix
|
||||
# tdSql.query("select csum(st1+c1) from stb1 partition by tbname slimit 1 ")
|
||||
# tdSql.checkRows(4)
|
||||
# tdSql.error("select csum(st1+c1) from stb1 partition by tbname limit 1 ")
|
||||
|
||||
|
||||
# bug need fix
|
||||
tdSql.query("select csum(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(40)
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query("select tbname , csum(c1) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(40)
|
||||
# tdSql.query("select tbname , csum(st1) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(70)
|
||||
# tdSql.query("select tbname , csum(st1) from stb1 partition by tbname slimit 1")
|
||||
# tdSql.checkRows(7)
|
||||
|
||||
# partition by tags
|
||||
# tdSql.query("select st1 , csum(c1) from stb1 partition by st1")
|
||||
# tdSql.checkRows(40)
|
||||
# tdSql.query("select csum(c1) from stb1 partition by st1")
|
||||
# tdSql.checkRows(40)
|
||||
# tdSql.query("select st1 , csum(c1) from stb1 partition by st1 slimit 1")
|
||||
# tdSql.checkRows(4)
|
||||
# tdSql.query("select csum(c1) from stb1 partition by st1 slimit 1")
|
||||
# tdSql.checkRows(4)
|
||||
|
||||
# partition by col
|
||||
# tdSql.query("select c1 , csum(c1) from stb1 partition by c1")
|
||||
# tdSql.checkRows(41)
|
||||
# tdSql.query("select csum(c1) from stb1 partition by c1")
|
||||
# tdSql.checkRows(41)
|
||||
# tdSql.query("select c1 , csum(c1) from stb1 partition by st1 slimit 1")
|
||||
# tdSql.checkRows(4)
|
||||
# tdSql.query("select csum(c1) from stb1 partition by st1 slimit 1")
|
||||
# tdSql.checkRows(4)
|
||||
|
||||
|
||||
def run(self):
|
||||
import traceback
|
||||
try:
|
||||
# run in develop branch
|
||||
self.csum_test_run()
|
||||
self.csum_support_stable()
|
||||
pass
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -355,9 +355,63 @@ class TDTestCase:
|
|||
tdSql.execute(f"create table tt{i} using stb2 tags({i})")
|
||||
|
||||
pass
|
||||
def diff_support_stable(self):
|
||||
tdSql.query(" select diff(1) from stb1 ")
|
||||
tdSql.checkRows(229)
|
||||
tdSql.checkData(0,0,0)
|
||||
tdSql.query("select diff(c1) from stb1 partition by tbname ")
|
||||
tdSql.checkRows(199)
|
||||
# tdSql.query("select diff(st1) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(229)
|
||||
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(199)
|
||||
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(199)
|
||||
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(199)
|
||||
|
||||
# # bug need fix
|
||||
# tdSql.query("select diff(st1+c1) from stb1 partition by tbname slimit 1 ")
|
||||
# tdSql.checkRows(19)
|
||||
# tdSql.error("select diff(st1+c1) from stb1 partition by tbname limit 1 ")
|
||||
|
||||
|
||||
# bug need fix
|
||||
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(199)
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query("select tbname , diff(c1) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(199)
|
||||
# tdSql.query("select tbname , diff(st1) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(199)
|
||||
# tdSql.query("select tbname , diff(st1) from stb1 partition by tbname slimit 1")
|
||||
# tdSql.checkRows(19)
|
||||
|
||||
# partition by tags
|
||||
# tdSql.query("select st1 , diff(c1) from stb1 partition by st1")
|
||||
# tdSql.checkRows(199)
|
||||
# tdSql.query("select diff(c1) from stb1 partition by st1")
|
||||
# tdSql.checkRows(199)
|
||||
# tdSql.query("select st1 , diff(c1) from stb1 partition by st1 slimit 1")
|
||||
# tdSql.checkRows(19)
|
||||
# tdSql.query("select diff(c1) from stb1 partition by st1 slimit 1")
|
||||
# tdSql.checkRows(19)
|
||||
|
||||
# partition by col
|
||||
# tdSql.query("select c1 , diff(c1) from stb1 partition by c1")
|
||||
# tdSql.checkRows(199)
|
||||
# tdSql.query("select diff(c1) from stb1 partition by c1")
|
||||
# tdSql.checkRows(41)
|
||||
# tdSql.query("select c1 , diff(c1) from stb1 partition by st1 slimit 1")
|
||||
# tdSql.checkRows(19)
|
||||
# tdSql.query("select diff(c1) from stb1 partition by st1 slimit 1")
|
||||
# tdSql.checkRows(19)
|
||||
|
||||
|
||||
|
||||
def diff_test_run(self) :
|
||||
tdLog.printNoPrefix("==========TD-10594==========")
|
||||
tdLog.printNoPrefix("==========run test case for diff function==========")
|
||||
tbnum = 10
|
||||
nowtime = int(round(time.time() * 1000))
|
||||
per_table_rows = 10
|
||||
|
@ -422,6 +476,7 @@ class TDTestCase:
|
|||
try:
|
||||
# run in develop branch
|
||||
self.diff_test_run()
|
||||
self.diff_support_stable()
|
||||
pass
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
|
|
|
@ -673,11 +673,65 @@ class TDTestCase:
|
|||
tdSql.query("select mavg(abs(c1),1) from t1")
|
||||
tdSql.checkRows(4)
|
||||
|
||||
def mavg_support_stable(self):
|
||||
tdSql.query(" select mavg(1,3) from stb1 ")
|
||||
tdSql.checkRows(68)
|
||||
tdSql.checkData(0,0,1.000000000)
|
||||
tdSql.query("select mavg(c1,3) from stb1 partition by tbname ")
|
||||
tdSql.checkRows(38)
|
||||
# tdSql.query("select mavg(st1,3) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(38)
|
||||
tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
|
||||
tdSql.checkRows(38)
|
||||
tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
|
||||
tdSql.checkRows(38)
|
||||
tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
|
||||
tdSql.checkRows(38)
|
||||
|
||||
# # bug need fix
|
||||
# tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname slimit 1 ")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.error("select mavg(st1+c1,3) from stb1 partition by tbname limit 1 ")
|
||||
|
||||
|
||||
# bug need fix
|
||||
tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
|
||||
tdSql.checkRows(38)
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query("select tbname , mavg(c1,3) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(38)
|
||||
# tdSql.query("select tbname , mavg(st1,3) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(38)
|
||||
# tdSql.query("select tbname , mavg(st1,3) from stb1 partition by tbname slimit 1")
|
||||
# tdSql.checkRows(2)
|
||||
|
||||
# partition by tags
|
||||
# tdSql.query("select st1 , mavg(c1,3) from stb1 partition by st1")
|
||||
# tdSql.checkRows(38)
|
||||
# tdSql.query("select mavg(c1,3) from stb1 partition by st1")
|
||||
# tdSql.checkRows(38)
|
||||
# tdSql.query("select st1 , mavg(c1,3) from stb1 partition by st1 slimit 1")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.query("select mavg(c1,3) from stb1 partition by st1 slimit 1")
|
||||
# tdSql.checkRows(2)
|
||||
|
||||
# partition by col
|
||||
# tdSql.query("select c1 , mavg(c1,3) from stb1 partition by c1")
|
||||
# tdSql.checkRows(38)
|
||||
# tdSql.query("select mavg(c1 ,3) from stb1 partition by c1")
|
||||
# tdSql.checkRows(38)
|
||||
# tdSql.query("select c1 , mavg(c1,3) from stb1 partition by st1 slimit 1")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.query("select diff(c1) from stb1 partition by st1 slimit 1")
|
||||
# tdSql.checkRows(2)
|
||||
|
||||
def run(self):
|
||||
import traceback
|
||||
try:
|
||||
# run in develop branch
|
||||
self.mavg_test_run()
|
||||
self.mavg_support_stable()
|
||||
pass
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
|
|
|
@ -798,6 +798,36 @@ class TDTestCase:
|
|||
tdSql.query("select sample(c1,100)+2 from ct1")
|
||||
tdSql.query("select abs(sample(c1,100)) from ct1")
|
||||
|
||||
# support stable and tbname
|
||||
tdSql.query("select tbname ,sample(c1,2) from stb1 partition by tbname order by tbname")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkData(0,0,'ct1')
|
||||
tdSql.checkData(3,0,'ct4')
|
||||
|
||||
# # bug need fix
|
||||
# tdSql.query(" select tbname ,c1 ,t1, sample(c1,2) from stb1 partition by tbname order by tbname ")
|
||||
# tdSql.checkRows(4)
|
||||
# tdSql.checkData(0,0,'ct1')
|
||||
# tdSql.checkData(3,0,'ct4')
|
||||
# tdSql.checkData(0,2,1)
|
||||
# tdSql.checkData(3,2,4)
|
||||
|
||||
tdSql.query(" select tbname ,c1 ,t1, sample(c1,2) from stb1 partition by t1 order by t1 ")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkData(0,0,'ct1')
|
||||
tdSql.checkData(3,0,'ct4')
|
||||
tdSql.checkData(0,2,1)
|
||||
tdSql.checkData(3,2,4)
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query(" select tbname ,c1 ,t1, sample(c1,2) from stb1 partition by c1 order by c1 ")
|
||||
# tdSql.checkRows(21)
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query(" select sample(c1,2) from stb1 partition by c1 ")
|
||||
# tdSql.checkRows(21)
|
||||
|
||||
|
||||
def sample_test_run(self) :
|
||||
tdLog.printNoPrefix("==========support sample function==========")
|
||||
tbnum = 10
|
||||
|
|
|
@ -18,6 +18,7 @@ class TDTestCase:
|
|||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record
|
||||
|
||||
def prepare_datas(self):
|
||||
tdSql.execute(
|
||||
|
@ -344,6 +345,8 @@ class TDTestCase:
|
|||
tdSql.error("select stateduration(c1,'GT',1,1b) from ct1")
|
||||
tdSql.error("select stateduration(c1,'GT',1,1u) from ct1")
|
||||
tdSql.error("select stateduration(c1,'GT',1,1000s) from t1")
|
||||
tdSql.error("select stateduration(c1,'GT',1,10m) from t1")
|
||||
tdSql.error("select stateduration(c1,'GT',1,10d) from t1")
|
||||
tdSql.query("select stateduration(c1,'GT',1,1s) from t1")
|
||||
tdSql.checkData(10,0,63072035)
|
||||
tdSql.query("select stateduration(c1,'GT',1,1m) from t1")
|
||||
|
@ -355,6 +358,58 @@ class TDTestCase:
|
|||
tdSql.query("select stateduration(c1,'GT',1,1w) from t1")
|
||||
tdSql.checkData(10,0,int(63072035/60/7/24/60))
|
||||
|
||||
def query_precision(self):
|
||||
def generate_data(precision="ms"):
|
||||
tdSql.execute("create database if not exists db_%s precision '%s';" %(precision, precision))
|
||||
tdSql.execute("use db_%s;" %precision)
|
||||
tdSql.execute("create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision)
|
||||
tdSql.execute("create table db_%s.tb1 using st tags(1);"%precision)
|
||||
tdSql.execute("create table db_%s.tb2 using st tags(2);"%precision)
|
||||
|
||||
if precision == "ms":
|
||||
start_ts = self.ts
|
||||
step = 10000
|
||||
elif precision == "us":
|
||||
start_ts = self.ts*1000
|
||||
step = 10000000
|
||||
elif precision == "ns":
|
||||
start_ts = self.ts*1000000
|
||||
step = 10000000000
|
||||
else:
|
||||
pass
|
||||
|
||||
for i in range(10):
|
||||
|
||||
sql1 = "insert into db_%s.tb1 values (%d,%d)"%(precision ,start_ts+i*step,i)
|
||||
sql2 = "insert into db_%s.tb1 values (%d,%d)"%(precision, start_ts+i*step,i)
|
||||
tdSql.execute(sql1)
|
||||
tdSql.execute(sql2)
|
||||
|
||||
time_units = ["1s","1a","1u","1b"]
|
||||
|
||||
precision_list = ["ms","us","ns"]
|
||||
for pres in precision_list:
|
||||
generate_data(pres)
|
||||
|
||||
for index,unit in enumerate(time_units):
|
||||
|
||||
if pres == "ms":
|
||||
if unit in ["1u","1b"]:
|
||||
tdSql.error("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
|
||||
pass
|
||||
else:
|
||||
tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
|
||||
elif pres == "us" and unit in ["1b"]:
|
||||
if unit in ["1b"]:
|
||||
tdSql.error("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
|
||||
pass
|
||||
else:
|
||||
tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
|
||||
else:
|
||||
|
||||
tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
|
||||
basic_result = 70
|
||||
tdSql.checkData(9,0,basic_result*pow(1000,index))
|
||||
|
||||
def check_boundary_values(self):
|
||||
|
||||
|
@ -420,6 +475,8 @@ class TDTestCase:
|
|||
tdLog.printNoPrefix("==========step6: statecount unit time test ============")
|
||||
|
||||
self.check_unit_time()
|
||||
self.query_precision()
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
|
|
|
@ -337,7 +337,7 @@ class TDTestCase:
|
|||
tdSql.checkData(2,0,5)
|
||||
|
||||
# nest query
|
||||
# tdSql.query("select tail(c1,2) from (select c1 from ct1)")
|
||||
# tdSql.query("select tail(c1,2) from (select _rowts , c1 from ct1)")
|
||||
tdSql.query("select c1 from (select tail(c1,2) c1 from ct4) order by 1 nulls first")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
@ -363,9 +363,58 @@ class TDTestCase:
|
|||
tdSql.error("select tail(c1,2) from ct1 group by tbname")
|
||||
|
||||
# super table
|
||||
tdSql.error("select tbname , tail(c1,2) from stb1 group by tbname")
|
||||
tdSql.query("select tail(c1,2) from stb1 partition by tbname")
|
||||
tdSql.checkRows(4)
|
||||
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(4)
|
||||
|
||||
# tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname order by tbname")
|
||||
# tdSql.checkRows(4)
|
||||
|
||||
# tdSql.query(" select tbname , count(c1) from stb1 partition by tbname order by tbname ")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.query(" select tbname , max(c1) ,c1 from stb1 partition by tbname order by tbname ")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.query(" select tbname ,first(c1) from stb1 partition by tbname order by tbname ")
|
||||
# tdSql.checkRows(2)
|
||||
|
||||
tdSql.query("select tail(c1,2) from stb1 partition by tbname")
|
||||
tdSql.checkRows(4)
|
||||
|
||||
|
||||
# # bug need fix
|
||||
# tdSql.query(" select tbname , tail(c1,2) from stb1 where t1 = 0 partition by tbname ")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.query(" select tbname , tail(c1,2) from stb1 where t1 = 0 partition by tbname order by tbname ")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname order by tbname ")
|
||||
# tdSql.checkRows(3)
|
||||
# tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname ")
|
||||
# tdSql.checkRows(3)
|
||||
# tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname ")
|
||||
# tdSql.checkRows(3)
|
||||
tdSql.query(" select tail(t1,2) from stb1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.query(" select tail(t1+c1,2) from stb1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.query(" select tail(t1+c1,2) from stb1 partition by tbname ")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.query(" select tail(t1,2) from stb1 partition by tbname ")
|
||||
tdSql.checkRows(4)
|
||||
|
||||
# nest query
|
||||
tdSql.query(" select tail(c1,2) from (select _rowts , t1 ,c1 , tbname from stb1 ) ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(1,0,9)
|
||||
tdSql.query("select tail(t1,2) from (select _rowts , t1 , tbname from stb1 )")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,4)
|
||||
tdSql.checkData(1,0,1)
|
||||
|
||||
def check_boundary_values(self):
|
||||
|
||||
|
|
|
@ -387,8 +387,58 @@ class TDTestCase:
|
|||
|
||||
# super table
|
||||
|
||||
# super table
|
||||
tdSql.error("select tbname , tail(c1,2) from stb1 group by tbname")
|
||||
tdSql.query("select tail(c1,2) from stb1 partition by tbname")
|
||||
tdSql.checkRows(4)
|
||||
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname")
|
||||
# tdSql.checkRows(4)
|
||||
|
||||
# tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname order by tbname")
|
||||
# tdSql.checkRows(4)
|
||||
|
||||
# tdSql.query(" select tbname , count(c1) from stb1 partition by tbname order by tbname ")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.query(" select tbname , max(c1) ,c1 from stb1 partition by tbname order by tbname ")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.query(" select tbname ,first(c1) from stb1 partition by tbname order by tbname ")
|
||||
# tdSql.checkRows(2)
|
||||
|
||||
tdSql.query("select tail(c1,2) from stb1 partition by tbname")
|
||||
tdSql.checkRows(4)
|
||||
|
||||
|
||||
# # bug need fix
|
||||
# tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname ")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname order by tbname ")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.query(" select tbname , unique(c1) from stb1 where c1 = 0 partition by tbname order by tbname ")
|
||||
# tdSql.checkRows(3)
|
||||
# tdSql.query(" select tbname , unique(c1) from stb1 where c1 = 0 partition by tbname ")
|
||||
# tdSql.checkRows(3)
|
||||
|
||||
tdSql.query(" select unique(t1) from stb1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.query(" select unique(t1+c1) from stb1 ")
|
||||
tdSql.checkRows(13)
|
||||
tdSql.query(" select unique(t1+c1) from stb1 partition by tbname ")
|
||||
tdSql.checkRows(13)
|
||||
tdSql.query(" select unique(t1) from stb1 partition by tbname ")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
# nest query
|
||||
tdSql.query(" select unique(c1) from (select _rowts , t1 ,c1 , tbname from stb1 ) ")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkData(0,0,6)
|
||||
tdSql.checkData(10,0,3)
|
||||
tdSql.query("select unique(t1) from (select _rowts , t1 , tbname from stb1 )")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,4)
|
||||
tdSql.checkData(1,0,1)
|
||||
|
||||
def check_boundary_values(self):
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py
|
|||
# python3 ./test.py -f 2-query/nestedQuery_str.py
|
||||
|
||||
python3 ./test.py -f 2-query/avg.py
|
||||
#python3 ./test.py -f 2-query/elapsed.py
|
||||
python3 ./test.py -f 2-query/elapsed.py
|
||||
python3 ./test.py -f 2-query/csum.py
|
||||
python3 ./test.py -f 2-query/mavg.py
|
||||
python3 ./test.py -f 2-query/diff.py
|
||||
|
@ -118,6 +118,7 @@ python3 ./test.py -f 2-query/distribute_agg_avg.py
|
|||
python3 ./test.py -f 2-query/distribute_agg_stddev.py
|
||||
python3 ./test.py -f 2-query/twa.py
|
||||
python3 ./test.py -f 2-query/irate.py
|
||||
python3 ./test.py -f 2-query/and_or_for_byte.py
|
||||
|
||||
python3 ./test.py -f 2-query/function_null.py
|
||||
python3 ./test.py -f 2-query/queryQnode.py
|
||||
|
|
Loading…
Reference in New Issue