Merge branch '3.0' of https://github.com/taosdata/TDengine into enh/tsdb_optimize

This commit is contained in:
Hongze Cheng 2023-07-07 10:38:04 +08:00
commit 090ca39132
47 changed files with 1598 additions and 418 deletions

View File

@ -81,10 +81,6 @@ Set<String> subscription() throws SQLException;
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
void commitAsync();
void commitAsync(OffsetCommitCallback callback);
void commitSync() throws SQLException;
void close() throws SQLException;

View File

@ -36,15 +36,16 @@ REST connection supports all platforms that can run Java.
| taos-jdbcdriver version | major changes | TDengine version |
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | 3.0.5.0 or later |
| 3.2.3 | Fixed resultSet data parsing failure in some cases | 3.0.5.0 or later |
| 3.2.2 | subscription add seek function | 3.0.5.0 or later |
| 3.2.2 | Subscription add seek function | 3.0.5.0 or later |
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
| 3.2.0 | This version has been deprecated | - |
| 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | - |
| 3.0.0 | Support for TDengine 3.0 | 3.0.0.0 or later |
| 2.0.42 | fix wasNull interface return value in WebSocket connection | - |
| 2.0.41 | fix decode method of username and password in REST connection | - |
| 2.0.42 | Fix wasNull interface return value in WebSocket connection | - |
| 2.0.41 | Fix decode method of username and password in REST connection | - |
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | - |
| 2.0.38 | JDBC REST connections add bulk pull function | - |
| 2.0.37 | Support json tags | - |

View File

@ -55,4 +55,4 @@ def taos_get_assignment_and_seek_demo():
if __name__ == '__main__':
taosws_get_assignment_and_seek_demo()
taos_get_assignment_and_seek_demo()

View File

@ -81,10 +81,6 @@ Set<String> subscription() throws SQLException;
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
void commitAsync();
void commitAsync(OffsetCommitCallback callback);
void commitSync() throws SQLException;
void close() throws SQLException;

View File

@ -36,6 +36,7 @@ REST 连接支持所有能运行 Java 的平台。
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
| 3.2.1 | 新增功能WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更consumer poll 返回结果集为 ConsumerRecord可通过 value() 获取指定结果集数据。 | 3.0.3.0 及更高版本 |

View File

@ -3025,6 +3025,7 @@ typedef struct {
char* sql;
char* ast;
int64_t deleteMark;
int64_t lastTs;
} SMCreateSmaReq;
int32_t tSerializeSMCreateSmaReq(void* buf, int32_t bufLen, SMCreateSmaReq* pReq);

View File

@ -319,6 +319,7 @@ typedef struct SIndexOptions {
SNode* pInterval;
SNode* pOffset;
SNode* pSliding;
int8_t tsPrecision;
SNode* pStreamOptions;
} SIndexOptions;
@ -332,6 +333,8 @@ typedef struct SCreateIndexStmt {
char tableName[TSDB_TABLE_NAME_LEN];
SNodeList* pCols;
SIndexOptions* pOptions;
SNode* pPrevQuery;
SMCreateSmaReq* pReq;
} SCreateIndexStmt;
typedef struct SDropIndexStmt {

View File

@ -459,10 +459,10 @@ typedef struct {
int64_t streamId;
int32_t taskId;
int32_t childId;
} SStreamRecoverFinishReq, SStreamTransferReq;
} SStreamScanHistoryFinishReq, SStreamTransferReq;
int32_t tEncodeStreamRecoverFinishReq(SEncoder* pEncoder, const SStreamRecoverFinishReq* pReq);
int32_t tDecodeStreamRecoverFinishReq(SDecoder* pDecoder, SStreamRecoverFinishReq* pReq);
int32_t tEncodeStreamScanHistoryFinishReq(SEncoder* pEncoder, const SStreamScanHistoryFinishReq* pReq);
int32_t tDecodeStreamScanHistoryFinishReq(SDecoder* pDecoder, SStreamScanHistoryFinishReq* pReq);
typedef struct {
int64_t streamId;
@ -537,8 +537,8 @@ int32_t tDecodeStreamTaskCheckReq(SDecoder* pDecoder, SStreamTaskCheckReq* pReq)
int32_t tEncodeStreamTaskCheckRsp(SEncoder* pEncoder, const SStreamTaskCheckRsp* pRsp);
int32_t tDecodeStreamTaskCheckRsp(SDecoder* pDecoder, SStreamTaskCheckRsp* pRsp);
int32_t tEncodeSStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamRecoverDownstreamReq* pReq);
int32_t tDecodeSStreamTaskRecoverReq(SDecoder* pDecoder, SStreamRecoverDownstreamReq* pReq);
int32_t tEncodeSStreamTaskScanHistoryReq(SEncoder* pEncoder, const SStreamRecoverDownstreamReq* pReq);
int32_t tDecodeSStreamTaskScanHistoryReq(SDecoder* pDecoder, SStreamRecoverDownstreamReq* pReq);
int32_t tEncodeSStreamTaskRecoverRsp(SEncoder* pEncoder, const SStreamRecoverDownstreamRsp* pRsp);
int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamRecoverDownstreamRsp* pRsp);
@ -578,15 +578,17 @@ int32_t streamTaskCheckDownstreamTasks(SStreamTask* pTask);
int32_t streamTaskLaunchScanHistory(SStreamTask* pTask);
int32_t streamTaskCheckStatus(SStreamTask* pTask);
int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp);
int32_t streamCheckHistoryTaskDownstrem(SStreamTask* pTask);
int32_t streamCheckHistoryTaskDownstream(SStreamTask* pTask);
int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask);
int32_t streamStartRecoverTask(SStreamTask* pTask, int8_t igUntreated);
void streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask);
bool streamTaskRecoverScanStep1Finished(SStreamTask* pTask);
bool streamTaskRecoverScanStep2Finished(SStreamTask* pTask);
int32_t streamTaskRecoverSetAllStepFinished(SStreamTask* pTask);
// common
int32_t streamSetParamForScanHistoryData(SStreamTask* pTask);
int32_t streamSetParamForScanHistory(SStreamTask* pTask);
int32_t streamRestoreParam(SStreamTask* pTask);
int32_t streamSetStatusNormal(SStreamTask* pTask);
const char* streamGetTaskStatusStr(int32_t status);
@ -596,32 +598,29 @@ int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* p
int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
int32_t streamBuildSourceRecover1Req(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated);
int32_t streamSourceScanHistoryData(SStreamTask* pTask);
// int32_t streamSourceRecoverScanStep2(SStreamTask* pTask, int64_t ver);
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
int32_t streamDispatchTransferStateMsg(SStreamTask* pTask);
// agg level
int32_t streamAggRecoverPrepare(SStreamTask* pTask);
int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t taskId, int32_t childId);
int32_t streamAggScanHistoryPrepare(SStreamTask* pTask);
int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, int32_t taskId, int32_t childId);
// stream task meta
void streamMetaInit();
void streamMetaCleanup();
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId);
void streamMetaClose(SStreamMeta* streamMeta);
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaAddDeployedTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask);
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t checkpointVer, char* msg, int32_t msgLen);
int32_t streamMetaGetNumOfTasks(const SStreamMeta* pMeta);
int32_t streamMetaGetNumOfTasks(const SStreamMeta* pMeta); // todo remove it
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
int32_t streamMetaBegin(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaRollBack(SStreamMeta* pMeta);
int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver);
// checkpoint

View File

@ -706,6 +706,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_TIMELINE_QUERY TAOS_DEF_ERROR_CODE(0, 0x2666)
#define TSDB_CODE_PAR_INVALID_OPTR_USAGE TAOS_DEF_ERROR_CODE(0, 0x2667)
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668)
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED TAOS_DEF_ERROR_CODE(0, 0x2669)
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
//planner

View File

@ -835,6 +835,7 @@ int32_t tSerializeSMCreateSmaReq(void *buf, int32_t bufLen, SMCreateSmaReq *pReq
if (tEncodeBinary(&encoder, pReq->ast, pReq->astLen) < 0) return -1;
}
if (tEncodeI64(&encoder, pReq->deleteMark) < 0) return -1;
if (tEncodeI64(&encoder, pReq->lastTs) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -884,6 +885,7 @@ int32_t tDeserializeSMCreateSmaReq(void *buf, int32_t bufLen, SMCreateSmaReq *pR
if (tDecodeCStrTo(&decoder, pReq->ast) < 0) return -1;
}
if (tDecodeI64(&decoder, &pReq->deleteMark) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->lastTs) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;

View File

@ -279,11 +279,11 @@ int32_t sndProcessTaskRecoverFinishReq(SSnode *pSnode, SRpcMsg *pMsg) {
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
// deserialize
SStreamRecoverFinishReq req;
SStreamScanHistoryFinishReq req;
SDecoder decoder;
tDecoderInit(&decoder, msg, msgLen);
tDecodeStreamRecoverFinishReq(&decoder, &req);
tDecodeStreamScanHistoryFinishReq(&decoder, &req);
tDecoderClear(&decoder);
// find task
@ -292,7 +292,7 @@ int32_t sndProcessTaskRecoverFinishReq(SSnode *pSnode, SRpcMsg *pMsg) {
return -1;
}
// do process request
if (streamProcessRecoverFinishReq(pTask, req.taskId, req.childId) < 0) {
if (streamProcessScanHistoryFinishReq(pTask, req.taskId, req.childId) < 0) {
streamMetaReleaseTask(pSnode->pMeta, pTask);
return -1;
}

View File

@ -80,15 +80,17 @@ typedef struct {
} STtlDelTtlCtx;
int ttlMgrOpen(STtlManger** ppTtlMgr, TDB* pEnv, int8_t rollback);
int ttlMgrClose(STtlManger* pTtlMgr);
int ttlMgrBegin(STtlManger* pTtlMgr, void* pMeta);
void ttlMgrClose(STtlManger* pTtlMgr);
int ttlMgrPostOpen(STtlManger* pTtlMgr, void* pMeta);
int ttlMgrConvert(TTB* pOldTtlIdx, TTB* pNewTtlIdx, void* pMeta);
int ttlMgrFlush(STtlManger* pTtlMgr, TXN* pTxn);
bool ttlMgrNeedUpgrade(TDB* pEnv);
int ttlMgrUpgrade(STtlManger* pTtlMgr, void* pMeta);
int ttlMgrInsertTtl(STtlManger* pTtlMgr, const STtlUpdTtlCtx* pUpdCtx);
int ttlMgrDeleteTtl(STtlManger* pTtlMgr, const STtlDelTtlCtx* pDelCtx);
int ttlMgrUpdateChangeTime(STtlManger* pTtlMgr, const STtlUpdCtimeCtx* pUpdCtimeCtx);
int ttlMgrFlush(STtlManger* pTtlMgr, TXN* pTxn);
int ttlMgrFindExpired(STtlManger* pTtlMgr, int64_t timePointMs, SArray* pTbUids);
#ifdef __cplusplus

View File

@ -136,6 +136,7 @@ typedef struct STbUidStore STbUidStore;
#define META_BEGIN_HEAP_NIL 2
int metaOpen(SVnode* pVnode, SMeta** ppMeta, int8_t rollback);
int metaUpgrade(SVnode* pVnode, SMeta** ppMeta);
int metaClose(SMeta** pMeta);
int metaBegin(SMeta* pMeta, int8_t fromSys);
TXN* metaGetTxn(SMeta* pMeta);
@ -248,7 +249,7 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskTransferStateReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessTaskRecoverFinishReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessStreamTaskScanHistoryFinishReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRecoverFinishRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tqCheckLogInWal(STQ* pTq, int64_t version);

View File

@ -40,10 +40,6 @@ int metaBegin(SMeta *pMeta, int8_t heap) {
return -1;
}
if (ttlMgrBegin(pMeta->pTtlMgr, pMeta) < 0) {
return -1;
}
tdbCommit(pMeta->pEnv, pMeta->txn);
return 0;

View File

@ -29,6 +29,8 @@ static int ncolIdxCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen
static int32_t metaInitLock(SMeta *pMeta) { return taosThreadRwlockInit(&pMeta->lock, NULL); }
static int32_t metaDestroyLock(SMeta *pMeta) { return taosThreadRwlockDestroy(&pMeta->lock); }
static void metaCleanup(SMeta **ppMeta);
int metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
SMeta *pMeta = NULL;
int ret;
@ -180,51 +182,43 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
return 0;
_err:
if (pMeta->pIdx) metaCloseIdx(pMeta);
if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb);
if (pMeta->pNcolIdx) tdbTbClose(pMeta->pNcolIdx);
if (pMeta->pBtimeIdx) tdbTbClose(pMeta->pBtimeIdx);
if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx);
if (pMeta->pTtlMgr) ttlMgrClose(pMeta->pTtlMgr);
if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx);
if (pMeta->pTagIdx) tdbTbClose(pMeta->pTagIdx);
if (pMeta->pCtbIdx) tdbTbClose(pMeta->pCtbIdx);
if (pMeta->pSuidIdx) tdbTbClose(pMeta->pSuidIdx);
if (pMeta->pNameIdx) tdbTbClose(pMeta->pNameIdx);
if (pMeta->pUidIdx) tdbTbClose(pMeta->pUidIdx);
if (pMeta->pSkmDb) tdbTbClose(pMeta->pSkmDb);
if (pMeta->pTbDb) tdbTbClose(pMeta->pTbDb);
if (pMeta->pEnv) tdbClose(pMeta->pEnv);
metaDestroyLock(pMeta);
taosMemoryFree(pMeta);
metaCleanup(&pMeta);
return -1;
}
int metaClose(SMeta **ppMeta) {
int metaUpgrade(SVnode *pVnode, SMeta **ppMeta) {
int code = TSDB_CODE_SUCCESS;
SMeta *pMeta = *ppMeta;
if (pMeta) {
if (pMeta->pEnv) metaAbort(pMeta);
if (pMeta->pCache) metaCacheClose(pMeta);
if (pMeta->pIdx) metaCloseIdx(pMeta);
if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb);
if (pMeta->pNcolIdx) tdbTbClose(pMeta->pNcolIdx);
if (pMeta->pBtimeIdx) tdbTbClose(pMeta->pBtimeIdx);
if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx);
if (pMeta->pTtlMgr) ttlMgrClose(pMeta->pTtlMgr);
if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx);
if (pMeta->pTagIdx) tdbTbClose(pMeta->pTagIdx);
if (pMeta->pCtbIdx) tdbTbClose(pMeta->pCtbIdx);
if (pMeta->pSuidIdx) tdbTbClose(pMeta->pSuidIdx);
if (pMeta->pNameIdx) tdbTbClose(pMeta->pNameIdx);
if (pMeta->pUidIdx) tdbTbClose(pMeta->pUidIdx);
if (pMeta->pSkmDb) tdbTbClose(pMeta->pSkmDb);
if (pMeta->pTbDb) tdbTbClose(pMeta->pTbDb);
if (pMeta->pEnv) tdbClose(pMeta->pEnv);
metaDestroyLock(pMeta);
taosMemoryFreeClear(*ppMeta);
if (ttlMgrNeedUpgrade(pMeta->pEnv)) {
code = metaBegin(pMeta, META_BEGIN_HEAP_OS);
if (code < 0) {
metaError("vgId:%d, failed to upgrade meta, meta begin failed since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
code = ttlMgrUpgrade(pMeta->pTtlMgr, pMeta);
if (code < 0) {
metaError("vgId:%d, failed to upgrade meta ttl since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
code = metaCommit(pMeta, pMeta->txn);
if (code < 0) {
metaError("vgId:%d, failed to upgrade meta ttl, meta commit failed since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
}
return TSDB_CODE_SUCCESS;
_err:
metaCleanup(ppMeta);
return code;
}
int metaClose(SMeta **ppMeta) {
metaCleanup(ppMeta);
return 0;
}
@ -270,6 +264,32 @@ int32_t metaULock(SMeta *pMeta) {
return ret;
}
static void metaCleanup(SMeta **ppMeta) {
SMeta *pMeta = *ppMeta;
if (pMeta) {
if (pMeta->pEnv) metaAbort(pMeta);
if (pMeta->pCache) metaCacheClose(pMeta);
if (pMeta->pIdx) metaCloseIdx(pMeta);
if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb);
if (pMeta->pNcolIdx) tdbTbClose(pMeta->pNcolIdx);
if (pMeta->pBtimeIdx) tdbTbClose(pMeta->pBtimeIdx);
if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx);
if (pMeta->pTtlMgr) ttlMgrClose(pMeta->pTtlMgr);
if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx);
if (pMeta->pTagIdx) tdbTbClose(pMeta->pTagIdx);
if (pMeta->pCtbIdx) tdbTbClose(pMeta->pCtbIdx);
if (pMeta->pSuidIdx) tdbTbClose(pMeta->pSuidIdx);
if (pMeta->pNameIdx) tdbTbClose(pMeta->pNameIdx);
if (pMeta->pUidIdx) tdbTbClose(pMeta->pUidIdx);
if (pMeta->pSkmDb) tdbTbClose(pMeta->pSkmDb);
if (pMeta->pTbDb) tdbTbClose(pMeta->pTbDb);
if (pMeta->pEnv) tdbClose(pMeta->pEnv);
metaDestroyLock(pMeta);
taosMemoryFreeClear(*ppMeta);
}
}
static int tbDbKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) {
STbDbKey *pTbDbKey1 = (STbDbKey *)pKey1;
STbDbKey *pTbDbKey2 = (STbDbKey *)pKey2;

View File

@ -21,6 +21,10 @@ typedef struct {
SMeta *pMeta;
} SConvertData;
static void ttlMgrCleanup(STtlManger *pTtlMgr);
static int ttlMgrConvert(TTB *pOldTtlIdx, TTB *pNewTtlIdx, void *pMeta);
static void ttlMgrBuildKey(STtlIdxKeyV1 *pTtlKey, int64_t ttlDays, int64_t changeTimeMs, tb_uid_t uid);
static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
static int ttlIdxKeyV1Cmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2);
@ -36,27 +40,17 @@ const char *ttlTbname = "ttl.idx";
const char *ttlV1Tbname = "ttlv1.idx";
int ttlMgrOpen(STtlManger **ppTtlMgr, TDB *pEnv, int8_t rollback) {
int ret;
int ret = TSDB_CODE_SUCCESS;
int64_t startNs = taosGetTimestampNs();
*ppTtlMgr = NULL;
STtlManger *pTtlMgr = (STtlManger *)tdbOsCalloc(1, sizeof(*pTtlMgr));
if (pTtlMgr == NULL) {
return -1;
}
if (tdbTbExist(ttlTbname, pEnv)) {
ret = tdbTbOpen(ttlTbname, sizeof(STtlIdxKey), 0, ttlIdxKeyCmpr, pEnv, &pTtlMgr->pOldTtlIdx, rollback);
if (ret < 0) {
metaError("failed to open %s index since %s", ttlTbname, tstrerror(terrno));
return ret;
}
}
if (pTtlMgr == NULL) return TSDB_CODE_OUT_OF_MEMORY;
ret = tdbTbOpen(ttlV1Tbname, TDB_VARIANT_LEN, TDB_VARIANT_LEN, ttlIdxKeyV1Cmpr, pEnv, &pTtlMgr->pTtlIdx, rollback);
if (ret < 0) {
metaError("failed to open %s since %s", ttlV1Tbname, tstrerror(terrno));
tdbOsFree(pTtlMgr);
return ret;
}
@ -66,28 +60,47 @@ int ttlMgrOpen(STtlManger **ppTtlMgr, TDB *pEnv, int8_t rollback) {
taosThreadRwlockInit(&pTtlMgr->lock, NULL);
ret = ttlMgrFillCache(pTtlMgr);
if (ret < 0) {
metaError("failed to fill hash since %s", tstrerror(terrno));
ttlMgrCleanup(pTtlMgr);
return ret;
}
int64_t endNs = taosGetTimestampNs();
metaInfo("ttl mgr open end, hash size: %d, time consumed: %" PRId64 " ns", taosHashGetSize(pTtlMgr->pTtlCache),
endNs - startNs);
*ppTtlMgr = pTtlMgr;
return 0;
return TSDB_CODE_SUCCESS;
}
int ttlMgrClose(STtlManger *pTtlMgr) {
taosHashCleanup(pTtlMgr->pTtlCache);
taosHashCleanup(pTtlMgr->pDirtyUids);
tdbTbClose(pTtlMgr->pTtlIdx);
taosThreadRwlockDestroy(&pTtlMgr->lock);
tdbOsFree(pTtlMgr);
return 0;
void ttlMgrClose(STtlManger *pTtlMgr) { ttlMgrCleanup(pTtlMgr); }
bool ttlMgrNeedUpgrade(TDB *pEnv) {
bool needUpgrade = tdbTbExist(ttlTbname, pEnv);
if (needUpgrade) {
metaInfo("find ttl idx in old version , will convert");
}
return needUpgrade;
}
int ttlMgrBegin(STtlManger *pTtlMgr, void *pMeta) {
metaInfo("ttl mgr start open");
int ret;
int ttlMgrUpgrade(STtlManger *pTtlMgr, void *pMeta) {
SMeta *meta = (SMeta *)pMeta;
int ret = TSDB_CODE_SUCCESS;
if (!tdbTbExist(ttlTbname, meta->pEnv)) return TSDB_CODE_SUCCESS;
metaInfo("ttl mgr start upgrade");
int64_t startNs = taosGetTimestampNs();
SMeta *meta = (SMeta *)pMeta;
ret = tdbTbOpen(ttlTbname, sizeof(STtlIdxKey), 0, ttlIdxKeyCmpr, meta->pEnv, &pTtlMgr->pOldTtlIdx, 0);
if (ret < 0) {
metaError("failed to open %s index since %s", ttlTbname, tstrerror(terrno));
goto _out;
}
if (pTtlMgr->pOldTtlIdx) {
ret = ttlMgrConvert(pTtlMgr->pOldTtlIdx, pTtlMgr->pTtlIdx, pMeta);
if (ret < 0) {
metaError("failed to convert ttl index since %s", tstrerror(terrno));
@ -100,10 +113,6 @@ int ttlMgrBegin(STtlManger *pTtlMgr, void *pMeta) {
goto _out;
}
tdbTbClose(pTtlMgr->pOldTtlIdx);
pTtlMgr->pOldTtlIdx = NULL;
}
ret = ttlMgrFillCache(pTtlMgr);
if (ret < 0) {
metaError("failed to fill hash since %s", tstrerror(terrno));
@ -111,13 +120,23 @@ int ttlMgrBegin(STtlManger *pTtlMgr, void *pMeta) {
}
int64_t endNs = taosGetTimestampNs();
metaInfo("ttl mgr open end, hash size: %d, time consumed: %" PRId64 " ns", taosHashGetSize(pTtlMgr->pTtlCache),
metaInfo("ttl mgr upgrade end, hash size: %d, time consumed: %" PRId64 " ns", taosHashGetSize(pTtlMgr->pTtlCache),
endNs - startNs);
_out:
tdbTbClose(pTtlMgr->pOldTtlIdx);
pTtlMgr->pOldTtlIdx = NULL;
return ret;
}
static void ttlMgrCleanup(STtlManger *pTtlMgr) {
taosHashCleanup(pTtlMgr->pTtlCache);
taosHashCleanup(pTtlMgr->pDirtyUids);
tdbTbClose(pTtlMgr->pTtlIdx);
taosThreadRwlockDestroy(&pTtlMgr->lock);
tdbOsFree(pTtlMgr);
}
static void ttlMgrBuildKey(STtlIdxKeyV1 *pTtlKey, int64_t ttlDays, int64_t changeTimeMs, tb_uid_t uid) {
if (ttlDays <= 0) return;
@ -205,7 +224,7 @@ _out:
return ret;
}
int ttlMgrConvert(TTB *pOldTtlIdx, TTB *pNewTtlIdx, void *pMeta) {
static int ttlMgrConvert(TTB *pOldTtlIdx, TTB *pNewTtlIdx, void *pMeta) {
SMeta *meta = pMeta;
metaInfo("ttlMgr convert ttl start.");

View File

@ -1071,13 +1071,6 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
return -1;
}
// check param
int64_t fillVer1 = pTask->chkInfo.version;
if (fillVer1 <= 0) {
streamMetaReleaseTask(pMeta, pTask);
return -1;
}
// do recovery step 1
const char* pId = pTask->id.idStr;
tqDebug("s-task:%s start history data scan stage(step 1), status:%s", pId,
@ -1091,7 +1084,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
return 0;
}
if (!pReq->igUntreated && !streamTaskRecoverScanStep1Finished(pTask)) {
if (!streamTaskRecoverScanStep1Finished(pTask)) {
streamSourceScanHistoryData(pTask);
}
@ -1121,39 +1114,23 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
// wait for the stream task get ready for scan history data
while (((pStreamTask->status.downstreamReady == 0) && (pStreamTask->status.taskStatus != TASK_STATUS__STOP)) ||
pStreamTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY) {
tqDebug(
"s-task:%s level:%d related stream task:%s not ready for halt, wait for it continue and recheck in 100ms",
pTask->id.idStr, pTask->info.taskLevel, pStreamTask->id.idStr);
tqDebug("s-task:%s level:%d related stream task:%s not ready for halt, wait for it and recheck in 100ms", pId,
pTask->info.taskLevel, pId);
taosMsleep(100);
}
// now we can stop the stream task execution
pStreamTask->status.taskStatus = TASK_STATUS__HALT;
tqDebug("s-task:%s level:%d status is set to halt by history scan task:%s", pStreamTask->id.idStr,
tqDebug("s-task:%s level:%d status is set to halt by history scan task:%s", pId,
pStreamTask->info.taskLevel, pId);
// if it's an source task, extract the last version in wal.
pRange = &pTask->dataRange.range;
int64_t latestVer = walReaderGetCurrentVer(pStreamTask->exec.pWalReader);
ASSERT(latestVer >= pRange->maxVer);
int64_t nextStartVer = pRange->maxVer + 1;
if (nextStartVer > latestVer - 1) {
// no input data yet. no need to execute the secondardy scan while stream task halt
streamTaskRecoverSetAllStepFinished(pTask);
tqDebug("s-task:%s no need to perform secondary scan-history-data(step 2), since no data ingest during secondary scan", pId);
} else {
// 2. do secondary scan of the history data, the time window remain, and the version range is updated to
// [pTask->dataRange.range.maxVer, ver1]
pRange->minVer = nextStartVer;
pRange->maxVer = latestVer - 1;
}
streamHistoryTaskSetVerRangeStep2(pTask);
}
if (!streamTaskRecoverScanStep1Finished(pTask)) {
tqDebug("s-task:%s level:%d verRange:%" PRId64 " - %" PRId64
" do secondary scan-history-data after halt the related stream task:%s",
pId, pTask->info.taskLevel, pRange->minVer, pRange->maxVer, pStreamTask->id.idStr);
tqDebug("s-task:%s level:%d verRange:%" PRId64 " - %" PRId64 " do secondary scan-history-data after halt the related stream task:%s",
pId, pTask->info.taskLevel, pRange->minVer, pRange->maxVer, pId);
ASSERT(pTask->status.schedStatus == TASK_SCHED_STATUS__WAITING);
st = taosGetTimestampMs();
@ -1162,6 +1139,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
if (!streamTaskRecoverScanStep2Finished(pTask)) {
streamSourceScanHistoryData(pTask);
if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__DROPPING || streamTaskShouldPause(&pTask->status)) {
tqDebug("s-task:%s is dropped or paused, abort recover in step1", pId);
streamMetaReleaseTask(pMeta, pTask);
@ -1174,7 +1152,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
el = (taosGetTimestampMs() - st) / 1000.0;
tqDebug("s-task:%s history data scan stage(step 2) ended, elapsed time:%.2fs", pId, el);
// 3. notify the downstream tasks to transfer executor state after handle all history blocks.
// 3. notify downstream tasks to transfer executor state after handle all history blocks.
if (!pTask->status.transferState) {
code = streamDispatchTransferStateMsg(pTask);
if (code != TSDB_CODE_SUCCESS) {
@ -1210,14 +1188,16 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
if (pTask->historyTaskId.taskId == 0) {
*pWindow = (STimeWindow){INT64_MIN, INT64_MAX};
tqDebug("s-task:%s no associated task, reset the time window:%" PRId64 " - %" PRId64, pId, pWindow->skey,
pWindow->ekey);
tqDebug("s-task:%s no related scan-history-data task, reset the time window:%" PRId64 " - %" PRId64, pId,
pWindow->skey, pWindow->ekey);
} else {
tqDebug("s-task:%s history data scan completed, now start to scan data from wal, start ver:%" PRId64
", window:%" PRId64 " - %" PRId64,
tqDebug(
"s-task:%s history data in current time window scan completed, now start to handle data from WAL, start "
"ver:%" PRId64 ", window:%" PRId64 " - %" PRId64,
pId, pTask->chkInfo.currentVer, pWindow->skey, pWindow->ekey);
}
// notify the downstream agg tasks that upstream tasks are ready to processing the WAL data, update the
code = streamTaskScanHistoryDataComplete(pTask);
streamMetaReleaseTask(pMeta, pTask);
@ -1238,7 +1218,7 @@ int32_t tqProcessTaskTransferStateReq(STQ* pTq, int64_t sversion, char* msg, int
SDecoder decoder;
tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
int32_t code = tDecodeStreamRecoverFinishReq(&decoder, &req);
int32_t code = tDecodeStreamScanHistoryFinishReq(&decoder, &req);
tDecoderClear(&decoder);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.taskId);
@ -1251,61 +1231,17 @@ int32_t tqProcessTaskTransferStateReq(STQ* pTq, int64_t sversion, char* msg, int
streamTaskReleaseState(pTask);
tqDebug("s-task:%s receive state transfer req", pTask->id.idStr);
// related stream task load the state from the state storage backend
SStreamTask* pStreamTask = streamMetaAcquireTask(pTq->pStreamMeta, pTask->streamTaskId.taskId);
if (pStreamTask == NULL) {
tqError("failed to find related stream task:0x%x, it may have been dropped already", req.taskId);
return -1;
}
streamTaskReloadState(pStreamTask);
ASSERT(pTask->streamTaskId.taskId != 0);
pTask->status.transferState = true; // persistent data?
#if 0
// do check if current task handle all data in the input queue
int64_t st = taosGetTimestampMs();
tqDebug("s-task:%s start step2 recover, ts:%" PRId64, pTask->id.idStr, st);
code = streamSourceRecoverScanStep2(pTask, sversion);
if (code < 0) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return -1;
}
qDebug("s-task:%s set start wal scan start ver:%"PRId64, pTask->id.idStr, sversion);
walReaderSeekVer(pTask->exec.pWalReader, sversion);
pTask->chkInfo.currentVer = sversion;
if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__DROPPING) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return 0;
}
// restore param
code = streamRestoreParam(pTask);
if (code < 0) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return -1;
}
// set status normal
tqDebug("s-task:%s blocking stage completed, set the status to be normal", pTask->id.idStr);
code = streamSetStatusNormal(pTask);
if (code < 0) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return -1;
}
double el = (taosGetTimestampMs() - st) / 1000.0;
tqDebug("s-task:%s step2 recover finished, el:%.2fs", pTask->id.idStr, el);
// dispatch recover finish req to all related downstream task
code = streamDispatchScanHistoryFinishMsg(pTask);
if (code < 0) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return -1;
}
atomic_store_8(&pTask->info.fillHistory, 0);
streamMetaSaveTask(pTq->pStreamMeta, pTask);
#endif
pTask->status.transferState = true;
streamSchedExec(pTask);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
@ -1313,31 +1249,28 @@ int32_t tqProcessTaskTransferStateReq(STQ* pTq, int64_t sversion, char* msg, int
return 0;
}
int32_t tqProcessTaskRecoverFinishReq(STQ* pTq, SRpcMsg* pMsg) {
int32_t tqProcessStreamTaskScanHistoryFinishReq(STQ* pTq, SRpcMsg* pMsg) {
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
// deserialize
SStreamRecoverFinishReq req;
SStreamScanHistoryFinishReq req = {0};
SDecoder decoder;
tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
tDecodeStreamRecoverFinishReq(&decoder, &req);
tDecodeStreamScanHistoryFinishReq(&decoder, &req);
tDecoderClear(&decoder);
// find task
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.taskId);
if (pTask == NULL) {
return -1;
}
// do process request
if (streamProcessRecoverFinishReq(pTask, req.taskId, req.childId) < 0) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
tqError("failed to find task:0x%x, it may be destroyed, vgId:%d", req.taskId, pTq->pStreamMeta->vgId);
return -1;
}
int32_t code = streamProcessScanHistoryFinishReq(pTask, req.taskId, req.childId);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return 0;
return code;
}
int32_t tqProcessTaskRecoverFinishRsp(STQ* pTq, SRpcMsg* pMsg) {
@ -1423,10 +1356,7 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
pTask->chkInfo.version);
streamProcessRunReq(pTask);
} else {
// if (streamTaskShouldPause(&pTask->status)) {
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
// }
tqDebug("vgId:%d s-task:%s ignore run req since not in ready state, status:%s, sched-status:%d", vgId,
pTask->id.idStr, streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus);
}
@ -1517,7 +1447,10 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion, int8_t igUntreated) {
int32_t vgId = pTq->pStreamMeta->vgId;
if (pTask) {
if (pTask == NULL) {
return -1;
}
if (streamTaskShouldPause(&pTask->status)) {
atomic_store_8(&pTask->status.taskStatus, pTask->status.keepTaskStatus);
@ -1541,10 +1474,8 @@ int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion,
streamSchedExec(pTask);
}
}
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
} else {
return -1;
}
return 0;
}
@ -1560,6 +1491,7 @@ int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
if (pHistoryTask) {
code = tqProcessTaskResumeImpl(pTq, pHistoryTask, sversion, pReq->igUntreated);
}
return code;
}

View File

@ -372,6 +372,10 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
goto _err;
}
if (metaUpgrade(pVnode, &pVnode->pMeta) < 0) {
vError("vgId:%d, failed to upgrade meta since %s", TD_VID(pVnode), tstrerror(terrno));
}
// open tsdb
if (!VND_IS_RSMA(pVnode) && tsdbOpen(pVnode, &VND_TSDB(pVnode), VNODE_TSDB_DIR, NULL, rollback) < 0) {
vError("vgId:%d, failed to open vnode tsdb since %s", TD_VID(pVnode), tstrerror(terrno));

View File

@ -667,7 +667,7 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo)
return tqProcessTaskTransferStateReq(pVnode->pTq, 0, pReq, len);
}
case TDMT_STREAM_SCAN_HISTORY_FINISH:
return tqProcessTaskRecoverFinishReq(pVnode->pTq, pMsg);
return tqProcessStreamTaskScanHistoryFinishReq(pVnode->pTq, pMsg);
case TDMT_STREAM_SCAN_HISTORY_FINISH_RSP:
return tqProcessTaskRecoverFinishRsp(pVnode->pTq, pMsg);
default:

View File

@ -911,8 +911,8 @@ int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRan
pStreamInfo->recoverStep1Finished = true;
pStreamInfo->recoverStep2Finished = false;
qDebug("%s step 2. set param for stream scanner for scan history data, verRange:%" PRId64 " - %" PRId64 ", window:%" PRId64
" - %" PRId64,
qDebug("%s step 2. set param for stream scanner for scan history data, verRange:%" PRId64 " - %" PRId64
", window:%" PRId64 " - %" PRId64,
GET_TASKID(pTaskInfo), pStreamInfo->fillHistoryVer.minVer, pStreamInfo->fillHistoryVer.maxVer, pWindow->skey,
pWindow->ekey);
return 0;
@ -999,31 +999,35 @@ int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) {
int32_t qRestoreStreamOperatorOption(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
const char* id = GET_TASKID(pTaskInfo);
SOperatorInfo* pOperator = pTaskInfo->pRoot;
while (1) {
if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL ||
pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL ||
pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL) {
uint16_t type = pOperator->operatorType;
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL ||
type == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL) {
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
pInfo->twAggSup.calTrigger = pInfo->twAggSup.calTriggerSaved;
pInfo->twAggSup.deleteMark = pInfo->twAggSup.deleteMarkSaved;
pInfo->ignoreExpiredData = pInfo->ignoreExpiredDataSaved;
qInfo("restore stream param for interval: %d, %" PRId64, pInfo->twAggSup.calTrigger, pInfo->twAggSup.deleteMark);
} else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION ||
pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) {
qInfo("%s restore stream agg executors param for interval: %d, %" PRId64, id, pInfo->twAggSup.calTrigger,
pInfo->twAggSup.deleteMark);
} else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION ||
type == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) {
SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
pInfo->twAggSup.calTrigger = pInfo->twAggSup.calTriggerSaved;
pInfo->twAggSup.deleteMark = pInfo->twAggSup.deleteMarkSaved;
pInfo->ignoreExpiredData = pInfo->ignoreExpiredDataSaved;
qInfo("restore stream param for session: %d, %" PRId64, pInfo->twAggSup.calTrigger, pInfo->twAggSup.deleteMark);
} else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE) {
qInfo("%s restore stream agg executor param for session: %d, %" PRId64, id, pInfo->twAggSup.calTrigger,
pInfo->twAggSup.deleteMark);
} else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE) {
SStreamStateAggOperatorInfo* pInfo = pOperator->info;
pInfo->twAggSup.calTrigger = pInfo->twAggSup.calTriggerSaved;
pInfo->twAggSup.deleteMark = pInfo->twAggSup.deleteMarkSaved;
pInfo->ignoreExpiredData = pInfo->ignoreExpiredDataSaved;
qInfo("restore stream param for state: %d, %" PRId64, pInfo->twAggSup.calTrigger, pInfo->twAggSup.deleteMark);
qInfo("%s restore stream agg executor param for state: %d, %" PRId64, id, pInfo->twAggSup.calTrigger,
pInfo->twAggSup.deleteMark);
}
// iterate operator tree
@ -1037,7 +1041,6 @@ int32_t qRestoreStreamOperatorOption(qTaskInfo_t tinfo) {
pOperator = pOperator->pDownstream[0];
}
}
return 0;
}
bool qStreamRecoverScanFinished(qTaskInfo_t tinfo) {

View File

@ -907,6 +907,10 @@ void nodesDestroyNode(SNode* pNode) {
SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pNode;
nodesDestroyNode((SNode*)pStmt->pOptions);
nodesDestroyList(pStmt->pCols);
if (pStmt->pReq) {
tFreeSMCreateSmaReq(pStmt->pReq);
taosMemoryFreeClear(pStmt->pReq);
}
break;
}
case QUERY_NODE_DROP_INDEX_STMT: // no pointer field
@ -1053,6 +1057,7 @@ void nodesDestroyNode(SNode* pNode) {
}
case QUERY_NODE_QUERY: {
SQuery* pQuery = (SQuery*)pNode;
nodesDestroyNode(pQuery->pPrevRoot);
nodesDestroyNode(pQuery->pRoot);
nodesDestroyNode(pQuery->pPostRoot);
taosMemoryFreeClear(pQuery->pResSchema);

View File

@ -35,6 +35,7 @@ int32_t translate(SParseContext* pParseCxt, SQuery* pQuery, SParseMetaCache* pMe
int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema);
int32_t calculateConstant(SParseContext* pParseCxt, SQuery* pQuery);
int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow);
int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow);
#ifdef __cplusplus
}

View File

@ -3520,6 +3520,10 @@ static int32_t translateWindow(STranslateContext* pCxt, SSelectStmt* pSelect) {
if (NULL == pSelect->pWindow) {
return TSDB_CODE_SUCCESS;
}
if (pSelect->pFromTable->type == QUERY_NODE_REAL_TABLE &&
((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType == TSDB_SYSTEM_TABLE) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, "WINDOW");
}
pCxt->currClause = SQL_CLAUSE_WINDOW;
int32_t code = translateExpr(pCxt, &pSelect->pWindow);
if (TSDB_CODE_SUCCESS == code) {
@ -5803,6 +5807,15 @@ static int32_t buildCreateSmaReq(STranslateContext* pCxt, SCreateIndexStmt* pStm
if (TSDB_CODE_SUCCESS == code) {
code = getSmaIndexAst(pCxt, pStmt, &pReq->ast, &pReq->astLen, &pReq->expr, &pReq->exprLen);
}
if (TSDB_CODE_SUCCESS == code) {
STableMeta* pMetaCache = NULL;
code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pMetaCache);
if (TSDB_CODE_SUCCESS == code) {
pStmt->pOptions->tsPrecision = pMetaCache->tableInfo.precision;
code = createLastTsSelectStmt(pStmt->dbName, pStmt->tableName, pMetaCache, &pStmt->pPrevQuery);
}
taosMemoryFreeClear(pMetaCache);
}
return code;
}
@ -5828,15 +5841,60 @@ static int32_t checkCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pS
}
static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) {
SMCreateSmaReq createSmaReq = {0};
int32_t code = checkCreateSmaIndex(pCxt, pStmt);
pStmt->pReq = taosMemoryCalloc(1, sizeof(SMCreateSmaReq));
if (pStmt->pReq == NULL) code = TSDB_CODE_OUT_OF_MEMORY;
if (TSDB_CODE_SUCCESS == code) {
code = buildCreateSmaReq(pCxt, pStmt, &createSmaReq);
code = buildCreateSmaReq(pCxt, pStmt, pStmt->pReq);
}
TSWAP(pCxt->pPrevRoot, pStmt->pPrevQuery);
return code;
}
int32_t createIntervalFromCreateSmaIndexStmt(SCreateIndexStmt* pStmt, SInterval* pInterval) {
pInterval->interval = ((SValueNode*)pStmt->pOptions->pInterval)->datum.i;
pInterval->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit;
pInterval->offset = NULL != pStmt->pOptions->pOffset ? ((SValueNode*)pStmt->pOptions->pOffset)->datum.i : 0;
pInterval->sliding = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval;
pInterval->slidingUnit = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit;
pInterval->precision = pStmt->pOptions->tsPrecision;
return TSDB_CODE_SUCCESS;
}
int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void ** pResRow) {
int32_t code = TSDB_CODE_SUCCESS;
SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pQuery->pRoot;
int64_t lastTs = 0;
SInterval interval = {0};
STranslateContext pCxt = {0};
code = initTranslateContext(pParseCxt, NULL, &pCxt);
if (TSDB_CODE_SUCCESS == code) {
code = createIntervalFromCreateSmaIndexStmt(pStmt, &interval);
}
if (TSDB_CODE_SUCCESS == code) {
code = buildCmdMsg(pCxt, TDMT_MND_CREATE_SMA, (FSerializeFunc)tSerializeSMCreateSmaReq, &createSmaReq);
if (pResRow && pResRow[0]) {
lastTs = *(int64_t*)pResRow[0];
} else if (interval.interval > 0) {
lastTs = convertTimePrecision(taosGetTimestampMs(), TSDB_TIME_PRECISION_MILLI, interval.precision);
} else {
lastTs = taosGetTimestampMs();
}
tFreeSMCreateSmaReq(&createSmaReq);
}
if (TSDB_CODE_SUCCESS == code) {
if (interval.interval > 0) {
pStmt->pReq->lastTs = taosTimeTruncate(lastTs, &interval);
} else {
pStmt->pReq->lastTs = lastTs;
}
code = buildCmdMsg(&pCxt, TDMT_MND_CREATE_SMA, (FSerializeFunc)tSerializeSMCreateSmaReq, pStmt->pReq);
}
if (TSDB_CODE_SUCCESS == code) {
code = setQuery(&pCxt, pQuery);
}
setRefreshMate(&pCxt, pQuery);
destroyTranslateContext(&pCxt);
tFreeSMCreateSmaReq(pStmt->pReq);
taosMemoryFreeClear(pStmt->pReq);
return code;
}
@ -6989,7 +7047,7 @@ static int32_t translateCreateStream(STranslateContext* pCxt, SCreateStreamStmt*
return code;
}
int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* pInterval) {
static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* pInterval) {
int32_t code = TSDB_CODE_SUCCESS;
if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery)) {
return code;

View File

@ -172,6 +172,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "%s function is not supported in group query";
case TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC:
return "%s function is not supported in system table query";
case TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED:
return "%s is not supported in system table query";
case TSDB_CODE_PAR_INVALID_INTERP_CLAUSE:
return "Invalid usage of RANGE clause, EVERY clause or FILL clause";
case TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN:

View File

@ -227,6 +227,8 @@ int32_t qContinueParsePostQuery(SParseContext* pCxt, SQuery* pQuery, void** pRes
case QUERY_NODE_CREATE_STREAM_STMT:
code = translatePostCreateStream(pCxt, pQuery, pResRow);
break;
case QUERY_NODE_CREATE_INDEX_STMT:
code = translatePostCreateSmaIndex(pCxt, pQuery, pResRow);
default:
break;
}

View File

@ -542,6 +542,18 @@ TEST_F(ParserInitialCTest, createSmaIndex) {
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_INDEX_STMT);
SMCreateSmaReq req = {0};
ASSERT_TRUE(pQuery->pPrevRoot);
ASSERT_EQ(QUERY_NODE_SELECT_STMT, nodeType(pQuery->pPrevRoot));
SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pQuery->pRoot;
SCmdMsgInfo* pCmdMsg = (SCmdMsgInfo*)taosMemoryMalloc(sizeof(SCmdMsgInfo));
if (NULL == pCmdMsg) FAIL();
pCmdMsg->msgType = TDMT_MND_CREATE_SMA;
pCmdMsg->msgLen = tSerializeSMCreateSmaReq(NULL, 0, pStmt->pReq);
pCmdMsg->pMsg = taosMemoryMalloc(pCmdMsg->msgLen);
if (!pCmdMsg->pMsg) FAIL();
tSerializeSMCreateSmaReq(pCmdMsg->pMsg, pCmdMsg->msgLen, pStmt->pReq);
((SQuery*)pQuery)->pCmdMsg = pCmdMsg;
ASSERT_TRUE(TSDB_CODE_SUCCESS == tDeserializeSMCreateSmaReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
ASSERT_EQ(std::string(req.name), std::string(expect.name));

View File

@ -291,4 +291,13 @@ TEST_F(ParserInitialDTest, dropUser) {
run("DROP USER wxy");
}
TEST_F(ParserInitialDTest, IntervalOnSysTable) {
login("root");
run("SELECT count('reboot_time') FROM information_schema.ins_dnodes interval(14m) sliding(9m)",
TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, PARSER_STAGE_TRANSLATE);
run("SELECT count('create_time') FROM information_schema.ins_qnodes interval(14m) sliding(9m)",
TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, PARSER_STAGE_TRANSLATE);
}
} // namespace ParserTest

View File

@ -441,6 +441,16 @@ class PlannerTestBaseImpl {
pCxt->topicQuery = true;
} else if (QUERY_NODE_CREATE_INDEX_STMT == nodeType(pQuery->pRoot)) {
SMCreateSmaReq req = {0};
SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pQuery->pRoot;
SCmdMsgInfo* pCmdMsg = (SCmdMsgInfo*)taosMemoryMalloc(sizeof(SCmdMsgInfo));
if (NULL == pCmdMsg) FAIL();
pCmdMsg->msgType = TDMT_MND_CREATE_SMA;
pCmdMsg->msgLen = tSerializeSMCreateSmaReq(NULL, 0, pStmt->pReq);
pCmdMsg->pMsg = taosMemoryMalloc(pCmdMsg->msgLen);
if (!pCmdMsg->pMsg) FAIL();
tSerializeSMCreateSmaReq(pCmdMsg->pMsg, pCmdMsg->msgLen, pStmt->pReq);
((SQuery*)pQuery)->pCmdMsg = pCmdMsg;
tDeserializeSMCreateSmaReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req);
g_mockCatalogService->createSmaIndex(&req);
nodesStringToNode(req.ast, &pCxt->pAstRoot);

View File

@ -49,7 +49,7 @@ int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* p
int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData);
int32_t streamDispatchCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet);
int32_t streamDoDispatchScanHistoryFinishMsg(SStreamTask* pTask, const SStreamRecoverFinishReq* pReq, int32_t vgId,
int32_t streamDoDispatchScanHistoryFinishMsg(SStreamTask* pTask, const SStreamScanHistoryFinishReq* pReq, int32_t vgId,
SEpSet* pEpSet);
SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem);

View File

@ -13,7 +13,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "streamInc.h"
#include "streamInt.h"
#include "ttimer.h"
#define STREAM_TASK_INPUT_QUEUE_CAPACITY 20480

View File

@ -16,7 +16,7 @@
#include "streamBackendRocksdb.h"
#include "executor.h"
#include "query.h"
#include "streamInc.h"
#include "streamInt.h"
#include "tcommon.h"
#include "tref.h"

View File

@ -13,7 +13,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "streamInc.h"
#include "streamInt.h"
SStreamDataBlock* createStreamDataFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg) {
SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, pReq->totalLen);

View File

@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "streamInt.h"
#include "ttimer.h"
#include "streamInc.h"
#define MAX_BLOCK_NAME_NUM 1024
#define DISPATCH_RETRY_INTERVAL_MS 300
@ -276,14 +276,14 @@ int32_t streamDispatchCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pR
return 0;
}
int32_t streamDoDispatchScanHistoryFinishMsg(SStreamTask* pTask, const SStreamRecoverFinishReq* pReq, int32_t vgId,
int32_t streamDoDispatchScanHistoryFinishMsg(SStreamTask* pTask, const SStreamScanHistoryFinishReq* pReq, int32_t vgId,
SEpSet* pEpSet) {
void* buf = NULL;
int32_t code = -1;
SRpcMsg msg = {0};
int32_t tlen;
tEncodeSize(tEncodeStreamRecoverFinishReq, pReq, tlen, code);
tEncodeSize(tEncodeStreamScanHistoryFinishReq, pReq, tlen, code);
if (code < 0) {
return -1;
}
@ -299,7 +299,7 @@ int32_t streamDoDispatchScanHistoryFinishMsg(SStreamTask* pTask, const SStreamRe
SEncoder encoder;
tEncoderInit(&encoder, abuf, tlen);
if ((code = tEncodeStreamRecoverFinishReq(&encoder, pReq)) < 0) {
if ((code = tEncodeStreamScanHistoryFinishReq(&encoder, pReq)) < 0) {
if (buf) {
rpcFreeCont(buf);
}

View File

@ -13,7 +13,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "streamInc.h"
#include "streamInt.h"
// maximum allowed processed block batches. One block may include several submit blocks
#define MAX_STREAM_EXEC_BATCH_NUM 32
@ -351,14 +351,19 @@ static void waitForTaskIdle(SStreamTask* pTask, SStreamTask* pStreamTask) {
static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
SStreamTask* pStreamTask = streamMetaAcquireTask(pTask->pMeta, pTask->streamTaskId.taskId);
if (pStreamTask == NULL) {
qError("s-task:%s failed to find related stream task:0x%x, it may have been destoryed or closed",
pTask->id.idStr, pTask->streamTaskId.taskId);
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
} else {
qDebug("s-task:%s scan history task end, update stream task:%s info, transfer exec state", pTask->id.idStr, pStreamTask->id.idStr);
// todo handle stream task is dropped here
}
ASSERT(pStreamTask != NULL && pStreamTask->historyTaskId.taskId == pTask->id.taskId);
STimeWindow* pTimeWindow = &pStreamTask->dataRange.window;
// here we need to wait for the stream task handle all data in the input queue.
// It must be halted for a source stream task, since when the related scan-history-data task start scan the history
// for the step 2. For a agg task
if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) {
ASSERT(pStreamTask->status.taskStatus == TASK_STATUS__HALT);
} else {
@ -369,21 +374,18 @@ static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
// wait for the stream task to be idle
waitForTaskIdle(pTask, pStreamTask);
// In case of sink tasks, no need to be halted for them.
// In case of source tasks and agg tasks, we should HALT them, and wait for them to be idle. And then, it's safe to
// start the task state transfer procedure.
// When a task is idle with halt status, all data in inputQ are consumed.
if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) {
// update the scan data range for source task.
qDebug("s-task:%s level:%d stream task window %" PRId64 " - %" PRId64 " update to %" PRId64 " - %" PRId64
", status:%s, sched-status:%d",
pStreamTask->id.idStr, TASK_LEVEL__SOURCE, pTimeWindow->skey, pTimeWindow->ekey, INT64_MIN,
pTimeWindow->ekey, streamGetTaskStatusStr(TASK_STATUS__NORMAL), pStreamTask->status.schedStatus);
// todo transfer state
} else {
// for sink tasks, they are continue to execute, no need to be halt.
// the process should be stopped for a while, during the term of transfer task state.
// OR wait for the inputQ && outputQ of agg tasks are all consumed, and then start the state transfer
qDebug("s-task:%s no need to update time window, for non-source task", pStreamTask->id.idStr);
// todo transfer state
qDebug("s-task:%s no need to update time window for non-source task", pStreamTask->id.idStr);
}
// expand the query time window for stream scanner
@ -401,6 +403,61 @@ static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
return TSDB_CODE_SUCCESS;
}
static int32_t extractMsgFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks,
const char* id) {
int32_t retryTimes = 0;
int32_t MAX_RETRY_TIMES = 5;
while (1) {
if (streamTaskShouldPause(&pTask->status)) {
qDebug("s-task:%s task should pause, input blocks:%d", pTask->id.idStr, *numOfBlocks);
return TSDB_CODE_SUCCESS;
}
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputQueue);
if (qItem == NULL) {
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE && (++retryTimes) < MAX_RETRY_TIMES) {
taosMsleep(10);
qDebug("===stream===try again batchSize:%d, retry:%d", *numOfBlocks, retryTimes);
continue;
}
qDebug("===stream===break batchSize:%d", *numOfBlocks);
return TSDB_CODE_SUCCESS;
}
// do not merge blocks for sink node
if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
*numOfBlocks = 1;
*pInput = qItem;
return TSDB_CODE_SUCCESS;
}
if (*pInput == NULL) {
ASSERT((*numOfBlocks) == 0);
*pInput = qItem;
} else {
// todo we need to sort the data block, instead of just appending into the array list.
void* newRet = streamMergeQueueItem(*pInput, qItem);
if (newRet == NULL) {
qError("s-task:%s failed to merge blocks from inputQ, numOfBlocks:%d", id, *numOfBlocks);
streamQueueProcessFail(pTask->inputQueue);
return TSDB_CODE_SUCCESS;
}
*pInput = newRet;
}
*numOfBlocks += 1;
streamQueueProcessSuccess(pTask->inputQueue);
if (*numOfBlocks >= MAX_STREAM_EXEC_BATCH_NUM) {
qDebug("s-task:%s batch size limit:%d reached, start to process blocks", id, MAX_STREAM_EXEC_BATCH_NUM);
return TSDB_CODE_SUCCESS;
}
}
}
/**
* todo: the batch of blocks should be tuned dynamic, according to the total elapsed time of each batch of blocks, the
* appropriate batch of blocks should be handled in 5 to 10 sec.
@ -409,75 +466,20 @@ int32_t streamExecForAll(SStreamTask* pTask) {
const char* id = pTask->id.idStr;
while (1) {
int32_t batchSize = 1;
int16_t times = 0;
int32_t batchSize = 0;
SStreamQueueItem* pInput = NULL;
// merge multiple input data if possible in the input queue.
qDebug("s-task:%s start to extract data block from inputQ, status:%s", id, streamGetTaskStatusStr(pTask->status.taskStatus));
while (1) {
// downstream task's input queue is blocked, stop immediately
if (streamTaskShouldPause(&pTask->status) || (pTask->outputStatus == TASK_OUTPUT_STATUS__BLOCKED) ||
streamTaskShouldStop(&pTask->status)) {
if (batchSize > 1) {
break;
} else {
qDebug("123 %s", pTask->id.idStr);
return 0;
}
}
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputQueue);
if (qItem == NULL) {
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE && batchSize < MIN_STREAM_EXEC_BATCH_NUM && times < 5) {
times++;
taosMsleep(10);
qDebug("===stream===try again batchSize:%d", batchSize);
continue;
}
qDebug("===stream===break batchSize:%d", batchSize);
break;
}
if (pInput == NULL) {
pInput = qItem;
streamQueueProcessSuccess(pTask->inputQueue);
if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
break;
}
} else {
// todo we need to sort the data block, instead of just appending into the array list.
void* newRet = NULL;
if ((newRet = streamMergeQueueItem(pInput, qItem)) == NULL) {
streamQueueProcessFail(pTask->inputQueue);
break;
} else {
batchSize++;
pInput = newRet;
streamQueueProcessSuccess(pTask->inputQueue);
if (batchSize > MAX_STREAM_EXEC_BATCH_NUM) {
qDebug("s-task:%s batch size limit:%d reached, start to process blocks", id,
MAX_STREAM_EXEC_BATCH_NUM);
break;
}
}
}
}
if (streamTaskShouldStop(&pTask->status)) {
if (pInput) {
streamFreeQitem(pInput);
}
return 0;
}
qDebug("s-task:%s start to extract data block from inputQ", id);
/*int32_t code = */extractMsgFromInputQ(pTask, &pInput, &batchSize, id);
if (pInput == NULL) {
ASSERT(batchSize == 0);
if (pTask->info.fillHistory && pTask->status.transferState) {
int32_t code = streamTransferStateToStreamTask(pTask);
if (code != TSDB_CODE_SUCCESS) { // todo handle this
return 0;
}
}
break;
@ -534,8 +536,8 @@ int32_t streamExecForAll(SStreamTask* pTask) {
streamTaskExecImpl(pTask, pInput, &resSize, &totalBlocks);
double el = (taosGetTimestampMs() - st) / 1000.0;
qDebug("s-task:%s batch of (%d)input blocks exec end, elapsed time:%.2fs, result size:%.2fMiB, numOfBlocks:%d",
id, batchSize, el, resSize / 1048576.0, totalBlocks);
qDebug("s-task:%s batch of input blocks exec end, elapsed time:%.2fs, result size:%.2fMiB, numOfBlocks:%d",
id, el, resSize / 1048576.0, totalBlocks);
streamFreeQitem(pInput);
}
@ -569,7 +571,7 @@ int32_t streamTryExec(SStreamTask* pTask) {
if (schedStatus == TASK_SCHED_STATUS__WAITING) {
int32_t code = streamExecForAll(pTask);
if (code < 0) {
if (code < 0) { // todo this status shoudl be removed
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__FAILED);
return -1;
}

View File

@ -15,7 +15,7 @@
#include "executor.h"
#include "streamBackendRocksdb.h"
#include "streamInc.h"
#include "streamInt.h"
#include "tref.h"
#include "ttimer.h"

View File

@ -13,7 +13,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "streamInc.h"
#include "streamInt.h"
SStreamQueue* streamQueueOpen(int64_t cap) {
SStreamQueue* pQueue = taosMemoryCalloc(1, sizeof(SStreamQueue));

View File

@ -13,7 +13,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "streamInc.h"
#include "streamInt.h"
#include "ttimer.h"
#include "wal.h"
@ -53,7 +53,7 @@ static int32_t doLaunchScanHistoryTask(SStreamTask* pTask) {
qDebug("s-task:%s vgId:%d status:%s, start scan-history-data task, verRange:%" PRId64 " - %" PRId64, pTask->id.idStr,
pTask->info.nodeId, streamGetTaskStatusStr(pTask->status.taskStatus), pRange->minVer, pRange->maxVer);
streamSetParamForScanHistoryData(pTask);
streamSetParamForScanHistory(pTask);
streamSetParamForStreamScannerStep1(pTask, pRange, &pTask->dataRange.window);
int32_t code = streamStartRecoverTask(pTask, 0);
@ -72,8 +72,8 @@ int32_t streamTaskLaunchScanHistory(SStreamTask* pTask) {
}
} else if (pTask->info.taskLevel == TASK_LEVEL__AGG) {
streamSetStatusNormal(pTask);
streamSetParamForScanHistoryData(pTask);
streamAggRecoverPrepare(pTask);
streamSetParamForScanHistory(pTask);
streamAggScanHistoryPrepare(pTask);
} else if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
streamSetStatusNormal(pTask);
qDebug("s-task:%s sink task convert to normal immediately", pTask->id.idStr);
@ -202,10 +202,10 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs
pTask->checkReqIds = NULL;
if (pTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY) {
qDebug("s-task:%s all %d downstream tasks are ready, now enter into scan-history-data stage, status:%s", id, numOfReqs,
streamGetTaskStatusStr(pTask->status.taskStatus));
qDebug("s-task:%s all %d downstream tasks are ready, now enter into scan-history-data stage, status:%s", id,
numOfReqs, streamGetTaskStatusStr(pTask->status.taskStatus));
streamTaskLaunchScanHistory(pTask);
} else {
} else { // todo add assert, agg tasks?
ASSERT(pTask->status.taskStatus == TASK_STATUS__NORMAL);
qDebug("s-task:%s fixed downstream task is ready, now ready for data from wal, status:%s", id,
streamGetTaskStatusStr(pTask->status.taskStatus));
@ -215,7 +215,8 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs
qDebug("s-task:%s (vgId:%d) recv check rsp from task:0x%x (vgId:%d) status:%d, total:%d not ready:%d", id,
pRsp->upstreamNodeId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->status, total, left);
}
} else if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH) {
} else {
ASSERT(pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH);
if (pRsp->reqId != pTask->checkReqId) {
return -1;
}
@ -233,8 +234,6 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs
qDebug("s-task:%s fixed downstream task is ready, ready for data from inputQ, status:%s", id,
streamGetTaskStatusStr(pTask->status.taskStatus));
}
} else {
ASSERT(0);
}
} else { // not ready, wait for 100ms and retry
qDebug("s-task:%s downstream taskId:0x%x (vgId:%d) not ready, wait for 100ms and retry", id, pRsp->downstreamTaskId,
@ -248,7 +247,7 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs
}
// common
int32_t streamSetParamForScanHistoryData(SStreamTask* pTask) {
int32_t streamSetParamForScanHistory(SStreamTask* pTask) {
qDebug("s-task:%s set operator option for scan-history-data", pTask->id.idStr);
return qSetStreamOperatorOptionForScanHistory(pTask->exec.pExecutor);
}
@ -286,7 +285,7 @@ int32_t streamSourceScanHistoryData(SStreamTask* pTask) {
}
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask) {
SStreamRecoverFinishReq req = { .streamId = pTask->id.streamId, .childId = pTask->info.selfChildId };
SStreamScanHistoryFinishReq req = { .streamId = pTask->id.streamId, .childId = pTask->info.selfChildId };
// serialize
if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH) {
@ -314,7 +313,7 @@ static int32_t doDispatchTransferMsg(SStreamTask* pTask, const SStreamTransferRe
SRpcMsg msg = {0};
int32_t tlen;
tEncodeSize(tEncodeStreamRecoverFinishReq, pReq, tlen, code);
tEncodeSize(tEncodeStreamScanHistoryFinishReq, pReq, tlen, code);
if (code < 0) {
return -1;
}
@ -330,7 +329,7 @@ static int32_t doDispatchTransferMsg(SStreamTask* pTask, const SStreamTransferRe
SEncoder encoder;
tEncoderInit(&encoder, abuf, tlen);
if ((code = tEncodeStreamRecoverFinishReq(&encoder, pReq)) < 0) {
if ((code = tEncodeStreamScanHistoryFinishReq(&encoder, pReq)) < 0) {
if (buf) {
rpcFreeCont(buf);
}
@ -375,7 +374,7 @@ int32_t streamDispatchTransferStateMsg(SStreamTask* pTask) {
}
// agg
int32_t streamAggRecoverPrepare(SStreamTask* pTask) {
int32_t streamAggScanHistoryPrepare(SStreamTask* pTask) {
pTask->numOfWaitingUpstream = taosArrayGetSize(pTask->pUpstreamEpInfoList);
qDebug("s-task:%s agg task is ready and wait for %d upstream tasks complete scan-history procedure", pTask->id.idStr,
pTask->numOfWaitingUpstream);
@ -391,19 +390,19 @@ int32_t streamAggUpstreamScanHistoryFinish(SStreamTask* pTask) {
if (qStreamRecoverFinish(exec) < 0) {
return -1;
}
// streamSetStatusNormal(pTask);
return 0;
}
int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t taskId, int32_t childId) {
int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, int32_t taskId, int32_t childId) {
if (pTask->info.taskLevel == TASK_LEVEL__AGG) {
int32_t left = atomic_sub_fetch_32(&pTask->numOfWaitingUpstream, 1);
ASSERT(left >= 0);
if (left == 0) {
int32_t numOfTasks = taosArrayGetSize(pTask->pUpstreamEpInfoList);
qDebug("s-task:%s all %d upstream tasks finish scan-history data", pTask->id.idStr, numOfTasks);
qDebug("s-task:%s all %d upstream tasks finish scan-history data, set param for agg task for stream data",
pTask->id.idStr, numOfTasks);
streamAggUpstreamScanHistoryFinish(pTask);
} else {
qDebug("s-task:%s receive scan-history data finish msg from upstream:0x%x(index:%d), unfinished:%d",
@ -411,6 +410,7 @@ int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t taskId, int32_
}
}
return 0;
}
@ -467,8 +467,8 @@ static void tryLaunchHistoryTask(void* param, void* tmrId) {
if (pHTask == NULL && (!streamTaskShouldStop(&pTask->status))) {
const char* pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
qWarn(
"s-task:%s vgId:%d status:%s failed to launch history task:0x%x, since it may not be built, or have been "
"destroyed, or should stop exec",
"s-task:%s vgId:%d status:%s failed to launch history task:0x%x, since it may not be built, or may have been "
"destroyed, or should stop",
pTask->id.idStr, pMeta->vgId, pStatus, pTask->historyTaskId.taskId);
taosTmrReset(tryLaunchHistoryTask, 100, pInfo, streamEnv.timer, &pTask->launchTaskTimer);
@ -493,7 +493,7 @@ static void tryLaunchHistoryTask(void* param, void* tmrId) {
// todo fix the bug: 2. race condition
// an fill history task needs to be started.
int32_t streamCheckHistoryTaskDownstrem(SStreamTask* pTask) {
int32_t streamCheckHistoryTaskDownstream(SStreamTask* pTask) {
SStreamMeta* pMeta = pTask->pMeta;
int32_t hTaskId = pTask->historyTaskId.taskId;
@ -573,6 +573,27 @@ int32_t streamTaskRecoverSetAllStepFinished(SStreamTask* pTask) {
return qStreamRecoverSetAllStepFinished(exec);
}
void streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask) {
SVersionRange* pRange = &pTask->dataRange.range;
int64_t latestVer = walReaderGetCurrentVer(pTask->exec.pWalReader);
ASSERT(latestVer >= pRange->maxVer);
int64_t nextStartVer = pRange->maxVer + 1;
if (nextStartVer > latestVer - 1) {
// no input data yet. no need to execute the secondardy scan while stream task halt
streamTaskRecoverSetAllStepFinished(pTask);
qDebug(
"s-task:%s no need to perform secondary scan-history-data(step 2), since no data ingest during secondary scan",
pTask->id.idStr);
} else {
// 2. do secondary scan of the history data, the time window remain, and the version range is updated to
// [pTask->dataRange.range.maxVer, ver1]
pRange->minVer = nextStartVer;
pRange->maxVer = latestVer - 1;
}
}
int32_t tEncodeStreamTaskCheckReq(SEncoder* pEncoder, const SStreamTaskCheckReq* pReq) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->reqId) < 0) return -1;
@ -627,7 +648,7 @@ int32_t tDecodeStreamTaskCheckRsp(SDecoder* pDecoder, SStreamTaskCheckRsp* pRsp)
return 0;
}
int32_t tEncodeStreamRecoverFinishReq(SEncoder* pEncoder, const SStreamRecoverFinishReq* pReq) {
int32_t tEncodeStreamScanHistoryFinishReq(SEncoder* pEncoder, const SStreamScanHistoryFinishReq* pReq) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1;
@ -635,7 +656,7 @@ int32_t tEncodeStreamRecoverFinishReq(SEncoder* pEncoder, const SStreamRecoverFi
tEndEncode(pEncoder);
return pEncoder->pos;
}
int32_t tDecodeStreamRecoverFinishReq(SDecoder* pDecoder, SStreamRecoverFinishReq* pReq) {
int32_t tDecodeStreamScanHistoryFinishReq(SDecoder* pDecoder, SStreamScanHistoryFinishReq* pReq) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1;
@ -652,7 +673,7 @@ void streamPrepareNdoCheckDownstream(SStreamTask* pTask) {
// calculate the correct start time window, and start the handle the history data for the main task.
if (pTask->historyTaskId.taskId != 0) {
// check downstream tasks for associated scan-history-data tasks
streamCheckHistoryTaskDownstrem(pTask);
streamCheckHistoryTaskDownstream(pTask);
// launch current task
SHistDataRange* pRange = &pTask->dataRange;

View File

@ -18,7 +18,7 @@
#include "osMemory.h"
#include "rocksdb/c.h"
#include "streamBackendRocksdb.h"
#include "streamInc.h"
#include "streamInt.h"
#include "tcoding.h"
#include "tcommon.h"
#include "tcompare.h"

View File

@ -233,7 +233,11 @@ int tdbBtreeDelete(SBTree *pBt, const void *pKey, int kLen, TXN *pTxn) {
int ret;
tdbBtcOpen(&btc, pBt, pTxn);
/*
btc.coder.ofps = taosArrayInit(8, sizeof(SPage *));
// btc.coder.ofps = taosArrayInit(8, sizeof(SPgno));
//pBtc->coder.ofps = taosArrayInit(8, sizeof(SPage *));
*/
tdbTrace("tdb delete, btc: %p, pTxn: %p", &btc, pTxn);
// move the cursor
@ -254,7 +258,18 @@ int tdbBtreeDelete(SBTree *pBt, const void *pKey, int kLen, TXN *pTxn) {
tdbBtcClose(&btc);
return -1;
}
/*
SArray *ofps = btc.coder.ofps;
if (ofps) {
for (int i = 0; i < TARRAY_SIZE(ofps); ++i) {
SPage *ofp = *(SPage **)taosArrayGet(ofps, i);
tdbPagerInsertFreePage(btc.pBt->pPager, ofp, btc.pTxn);
}
taosArrayDestroy(ofps);
btc.coder.ofps = NULL;
}
*/
tdbBtcClose(&btc);
return 0;
}
@ -563,6 +578,7 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
}
}
// copy the parent key out if child pages are not leaf page
// childNotLeaf = !(TDB_BTREE_PAGE_IS_LEAF(pOlds[0]) || TDB_BTREE_PAGE_IS_OVFL(pOlds[0]));
childNotLeaf = !TDB_BTREE_PAGE_IS_LEAF(pOlds[0]);
if (childNotLeaf) {
for (int i = 0; i < nOlds; i++) {
@ -592,7 +608,30 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
for (int i = 0; i < nOlds; i++) {
nCells = TDB_PAGE_TOTAL_CELLS(pParent);
if (sIdx < nCells) {
bool destroyOfps = false;
if (!childNotLeaf) {
if (!pParent->pPager->ofps) {
pParent->pPager->ofps = taosArrayInit(8, sizeof(SPage *));
destroyOfps = true;
}
}
tdbPageDropCell(pParent, sIdx, pTxn, pBt);
if (!childNotLeaf) {
SArray *ofps = pParent->pPager->ofps;
if (ofps) {
for (int i = 0; i < TARRAY_SIZE(ofps); ++i) {
SPage *ofp = *(SPage **)taosArrayGet(ofps, i);
tdbPagerInsertFreePage(pParent->pPager, ofp, pTxn);
}
if (destroyOfps) {
taosArrayDestroy(ofps);
pParent->pPager->ofps = NULL;
}
}
}
} else {
((SIntHdr *)pParent->pData)->pgno = 0;
}
@ -861,6 +900,8 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
if (!TDB_BTREE_PAGE_IS_LEAF(pNews[0])) {
((SIntHdr *)(pParent->pData))->pgno = ((SIntHdr *)(pNews[0]->pData))->pgno;
}
tdbPagerInsertFreePage(pBt->pPager, pNews[0], pTxn);
}
for (int i = 0; i < 3; i++) {
@ -870,6 +911,9 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
}
for (pageIdx = 0; pageIdx < nOlds; ++pageIdx) {
if (pageIdx >= nNews) {
tdbPagerInsertFreePage(pBt->pPager, pOlds[pageIdx], pTxn);
}
tdbPagerReturnPage(pBt->pPager, pOlds[pageIdx], pTxn);
}
for (; pageIdx < nNews; ++pageIdx) {
@ -1311,7 +1355,11 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
if (ret < 0) {
return -1;
}
/*
if (pDecoder->ofps) {
taosArrayPush(pDecoder->ofps, &ofp);
}
*/
ofpCell = tdbPageGetCell(ofp, 0);
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
@ -1346,11 +1394,17 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
int lastKeyPageSpace = 0;
// load left key & val to ovpages
while (pgno != 0) {
tdbTrace("tdb decode-ofp, pTxn: %p, pgno:%u by cell:%p", pTxn, pgno, pCell);
// printf("tdb decode-ofp, pTxn: %p, pgno:%u by cell:%p\n", pTxn, pgno, pCell);
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
/*
if (pDecoder->ofps) {
taosArrayPush(pDecoder->ofps, &ofp);
}
*/
ofpCell = tdbPageGetCell(ofp, 0);
int lastKeyPage = 0;
@ -1518,8 +1572,8 @@ static int tdbBtreeCellSize(const SPage *pPage, SCell *pCell, int dropOfp, TXN *
if (pPage->vLen == TDB_VARIANT_LEN) {
if (!leaf) {
tdbError("tdb/btree-cell-size: not a leaf page.");
return -1;
tdbError("tdb/btree-cell-size: not a leaf page:%p, pgno:%" PRIu32 ".", pPage, TDB_PAGE_PGNO(pPage));
// return -1;
}
nHeader += tdbGetVarInt(pCell + nHeader, &vLen);
} else if (leaf) {
@ -1559,8 +1613,27 @@ static int tdbBtreeCellSize(const SPage *pPage, SCell *pCell, int dropOfp, TXN *
bytes = ofp->maxLocal - sizeof(SPgno);
}
// SPgno origPgno = pgno;
memcpy(&pgno, ofpCell + bytes, sizeof(pgno));
ret = tdbPagerWrite(pBt->pPager, ofp);
if (ret < 0) {
tdbError("failed to write page since %s", terrstr());
return -1;
}
/*
tdbPageDropCell(ofp, 0, pTxn, pBt);
*/
// SIntHdr *pIntHdr = (SIntHdr *)(ofp->pData);
// pIntHdr->flags = TDB_FLAG_ADD(0, TDB_BTREE_OVFL);
// pIntHdr->pgno = 0;
// ofp->pPager = NULL;
SArray *ofps = pPage->pPager->ofps;
if (ofps) {
taosArrayPush(ofps, &ofp);
}
tdbPagerReturnPage(pPage->pPager, ofp, pTxn);
nLeft -= bytes;
@ -1980,6 +2053,11 @@ static int tdbBtcMoveDownward(SBTC *pBtc) {
return -1;
}
if (TDB_BTREE_PAGE_IS_OVFL(pBtc->pPage)) {
tdbError("tdb/btc-move-downward: should not be a ovfl page here.");
return -1;
}
if (pBtc->idx < TDB_PAGE_TOTAL_CELLS(pBtc->pPage)) {
pCell = tdbPageGetCell(pBtc->pPage, pBtc->idx);
pgno = ((SPgno *)pCell)[0];
@ -2068,8 +2146,27 @@ int tdbBtcDelete(SBTC *pBtc) {
return -1;
}
bool destroyOfps = false;
if (!pBtc->pPage->pPager->ofps) {
pBtc->pPage->pPager->ofps = taosArrayInit(8, sizeof(SPage *));
destroyOfps = true;
}
tdbPageDropCell(pBtc->pPage, idx, pBtc->pTxn, pBtc->pBt);
SArray *ofps = pBtc->pPage->pPager->ofps;
if (ofps) {
for (int i = 0; i < TARRAY_SIZE(ofps); ++i) {
SPage *ofp = *(SPage **)taosArrayGet(ofps, i);
tdbPagerInsertFreePage(pBtc->pPage->pPager, ofp, pBtc->pTxn);
}
if (destroyOfps) {
taosArrayDestroy(ofps);
pBtc->pPage->pPager->ofps = NULL;
}
}
// update interior page or do balance
if (idx == nCells - 1) {
if (idx) {
@ -2113,6 +2210,8 @@ int tdbBtcDelete(SBTC *pBtc) {
return -1;
}
// printf("tdb/btc-delete: btree balance delete pgno: %d.\n", TDB_PAGE_PGNO(pBtc->pPage));
ret = tdbBtreeBalance(pBtc);
if (ret < 0) {
tdbError("tdb/btc-delete: btree balance failed with ret: %d.", ret);
@ -2181,7 +2280,13 @@ int tdbBtcUpsert(SBTC *pBtc, const void *pKey, int kLen, const void *pData, int
tdbError("tdb/btc-upsert: page insert/update cell failed with ret: %d.", ret);
return -1;
}
/*
bool destroyOfps = false;
if (!pBtc->pPage->pPager->ofps) {
pBtc->pPage->pPager->ofps = taosArrayInit(8, sizeof(SPage *));
destroyOfps = true;
}
*/
// check balance
if (pBtc->pPage->nOverflow > 0) {
ret = tdbBtreeBalance(pBtc);
@ -2190,7 +2295,20 @@ int tdbBtcUpsert(SBTC *pBtc, const void *pKey, int kLen, const void *pData, int
return -1;
}
}
/*
SArray *ofps = pBtc->pPage->pPager->ofps;
if (ofps) {
for (int i = 0; i < TARRAY_SIZE(ofps); ++i) {
SPage *ofp = *(SPage **)taosArrayGet(ofps, i);
tdbPagerInsertFreePage(pBtc->pPage->pPager, ofp, pBtc->pTxn);
}
if (destroyOfps) {
taosArrayDestroy(ofps);
pBtc->pPage->pPager->ofps = NULL;
}
}
*/
return 0;
}

View File

@ -70,6 +70,11 @@ int32_t tdbOpen(const char *dbname, int32_t szPage, int32_t pages, TDB **ppDb, i
if (ret < 0) {
return -1;
}
ret = tdbTbOpen(TDB_FREEDB_NAME, sizeof(SPgno), 0, NULL, pDb, &pDb->pFreeDb, rollback);
if (ret < 0) {
return -1;
}
#endif
*ppDb = pDb;
@ -82,6 +87,7 @@ int tdbClose(TDB *pDb) {
if (pDb) {
#ifdef USE_MAINDB
if (pDb->pMainDb) tdbTbClose(pDb->pMainDb);
if (pDb->pFreeDb) tdbTbClose(pDb->pFreeDb);
#endif
for (pPager = pDb->pgrList; pPager; pPager = pDb->pgrList) {

View File

@ -292,7 +292,23 @@ int tdbPagerBegin(SPager *pPager, TXN *pTxn) {
*/
return 0;
}
/*
int tdbPagerCancelDirty(SPager *pPager, SPage *pPage, TXN *pTxn) {
SRBTreeNode *pNode = tRBTreeGet(&pPager->rbt, (SRBTreeNode *)pPage);
if (pNode) {
pPage->isDirty = 0;
tRBTreeDrop(&pPager->rbt, (SRBTreeNode *)pPage);
if (pTxn->jPageSet) {
hashset_remove(pTxn->jPageSet, (void *)((long)TDB_PAGE_PGNO(pPage)));
}
tdbPCacheRelease(pPager->pCache, pPage, pTxn);
}
return 0;
}
*/
int tdbPagerCommit(SPager *pPager, TXN *pTxn) {
SPage *pPage;
int ret;
@ -338,10 +354,13 @@ int tdbPagerCommit(SPager *pPager, TXN *pTxn) {
if (pTxn->jPageSet) {
hashset_remove(pTxn->jPageSet, (void *)((long)TDB_PAGE_PGNO(pPage)));
}
tdbTrace("tdb/pager-commit: remove page: %p %d from dirty tree: %p", pPage, TDB_PAGE_PGNO(pPage), &pPager->rbt);
tdbPCacheRelease(pPager->pCache, pPage, pTxn);
}
tdbTrace("pager/commit reset dirty tree: %p", &pPager->rbt);
tdbTrace("tdb/pager-commit reset dirty tree: %p", &pPager->rbt);
tRBTreeCreate(&pPager->rbt, pageCmpFn);
// sync the db file
@ -629,6 +648,8 @@ int tdbPagerFlushPage(SPager *pPager, TXN *pTxn) {
return 0;
}
static int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno, TXN *pTxn);
int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initPage)(SPage *, void *, int), void *arg,
TXN *pTxn) {
SPage *pPage;
@ -643,7 +664,7 @@ int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initPa
// alloc new page
if (pgno == 0) {
loadPage = 0;
ret = tdbPagerAllocPage(pPager, &pgno);
ret = tdbPagerAllocPage(pPager, &pgno, pTxn);
if (ret < 0) {
tdbError("tdb/pager: %p, ret: %d pgno: %" PRIu32 ", alloc page failed.", pPager, ret, pgno);
return -1;
@ -695,23 +716,86 @@ void tdbPagerReturnPage(SPager *pPager, SPage *pPage, TXN *pTxn) {
// TDB_PAGE_PGNO(pPage), pPage);
}
static int tdbPagerAllocFreePage(SPager *pPager, SPgno *ppgno) {
// TODO: Allocate a page from the free list
int tdbPagerInsertFreePage(SPager *pPager, SPage *pPage, TXN *pTxn) {
int code = 0;
SPgno pgno = TDB_PAGE_PGNO(pPage);
// memset(pPage->pData, 0, pPage->pageSize);
tdbTrace("tdb/insert-free-page: tbc recycle page: %d.", pgno);
// printf("tdb/insert-free-page: tbc recycle page: %d.\n", pgno);
code = tdbTbInsert(pPager->pEnv->pFreeDb, &pgno, sizeof(pgno), NULL, 0, pTxn);
if (code < 0) {
tdbError("tdb/insert-free-page: tb insert failed with ret: %d.", code);
return -1;
}
pPage->pPager = NULL;
return code;
}
static int tdbPagerRemoveFreePage(SPager *pPager, SPgno *pPgno, TXN *pTxn) {
int code = 0;
TBC *pCur;
if (!pPager->pEnv->pFreeDb) {
return 0;
}
code = tdbTbcOpen(pPager->pEnv->pFreeDb, &pCur, pTxn);
if (code < 0) {
return 0;
}
code = tdbTbcMoveToFirst(pCur);
if (code) {
tdbError("tdb/remove-free-page: moveto first failed with ret: %d.", code);
tdbTbcClose(pCur);
return 0;
}
void *pKey = NULL;
int nKey = 0;
code = tdbTbcGet(pCur, (const void **)&pKey, &nKey, NULL, NULL);
if (code < 0) {
// tdbError("tdb/remove-free-page: tbc get failed with ret: %d.", code);
tdbTbcClose(pCur);
return 0;
}
*pPgno = *(SPgno *)pKey;
tdbTrace("tdb/remove-free-page: tbc get page: %d.", *pPgno);
// printf("tdb/remove-free-page: tbc get page: %d.\n", *pPgno);
code = tdbTbcDelete(pCur);
if (code < 0) {
tdbError("tdb/remove-free-page: tbc delete failed with ret: %d.", code);
tdbTbcClose(pCur);
return 0;
}
tdbTbcClose(pCur);
return 0;
}
static int tdbPagerAllocFreePage(SPager *pPager, SPgno *ppgno, TXN *pTxn) {
// Allocate a page from the free list
return tdbPagerRemoveFreePage(pPager, ppgno, pTxn);
}
static int tdbPagerAllocNewPage(SPager *pPager, SPgno *ppgno) {
*ppgno = ++pPager->dbFileSize;
// tdbError("tdb/alloc-new-page: %d.", *ppgno);
return 0;
}
int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno) {
static int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno, TXN *pTxn) {
int ret;
*ppgno = 0;
// Try to allocate from the free list of the pager
ret = tdbPagerAllocFreePage(pPager, ppgno);
ret = tdbPagerAllocFreePage(pPager, ppgno, pTxn);
if (ret < 0) {
return -1;
}

View File

@ -138,6 +138,7 @@ typedef struct {
SPgno pgno;
u8 *pBuf;
u8 freeKV;
SArray *ofps;
} SCellDecoder;
struct SBTC {
@ -198,7 +199,8 @@ int tdbPagerAbort(SPager *pPager, TXN *pTxn);
int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initPage)(SPage *, void *, int), void *arg,
TXN *pTxn);
void tdbPagerReturnPage(SPager *pPager, SPage *pPage, TXN *pTxn);
int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno);
int tdbPagerInsertFreePage(SPager *pPager, SPage *pPage, TXN *pTxn);
// int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno);
int tdbPagerRestoreJournals(SPager *pPager);
int tdbPagerRollback(SPager *pPager);
@ -373,6 +375,7 @@ static inline SCell *tdbPageGetCell(SPage *pPage, int idx) {
#ifdef USE_MAINDB
#define TDB_MAINDB_NAME "main.tdb"
#define TDB_FREEDB_NAME "_free.db"
#endif
struct STDB {
@ -386,6 +389,7 @@ struct STDB {
SPager **pgrHash;
#ifdef USE_MAINDB
TTB *pMainDb;
TTB *pFreeDb;
#endif
int64_t txnId;
};
@ -403,6 +407,7 @@ struct SPager {
SRBTree rbt;
// u8 inTran;
TXN *pActiveTxn;
SArray *ofps;
SPager *pNext; // used by TDB
SPager *pHashNext; // used by TDB
#ifdef USE_MAINDB

View File

@ -14,3 +14,7 @@ target_link_libraries(tdbExOVFLTest tdb gtest gtest_main)
add_executable(tdbPageDefragmentTest "tdbPageDefragmentTest.cpp")
target_link_libraries(tdbPageDefragmentTest tdb gtest gtest_main)
# page recycling testing
add_executable(tdbPageRecycleTest "tdbPageRecycleTest.cpp")
target_link_libraries(tdbPageRecycleTest tdb gtest gtest_main)

View File

@ -190,6 +190,15 @@ static void insertOfp(void) {
// commit current transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
closePool(pPool);
// Close a database
tdbTbClose(pDb);
// Close Env
ret = tdbClose(pEnv);
GTEST_ASSERT_EQ(ret, 0);
}
// TEST(TdbOVFLPagesTest, DISABLED_TbInsertTest) {
@ -233,6 +242,13 @@ TEST(TdbOVFLPagesTest, TbGetTest) {
tdbFree(pVal);
}
// Close a database
tdbTbClose(pDb);
// Close Env
ret = tdbClose(pEnv);
GTEST_ASSERT_EQ(ret, 0);
}
// TEST(TdbOVFLPagesTest, DISABLED_TbDeleteTest) {
@ -334,6 +350,15 @@ tdbBegin(pEnv, &txn);
// commit current transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
closePool(pPool);
// Close a database
tdbTbClose(pDb);
// Close Env
ret = tdbClose(pEnv);
GTEST_ASSERT_EQ(ret, 0);
}
// TEST(tdb_test, DISABLED_simple_insert1) {
@ -407,6 +432,8 @@ TEST(tdb_test, simple_insert1) {
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
closePool(pPool);
{ // Query the data
void *pVal = NULL;
int vLen;

View File

@ -0,0 +1,835 @@
#include <gtest/gtest.h>
#define ALLOW_FORBID_FUNC
#include "os.h"
#include "tdb.h"
#include <shared_mutex>
#include <string>
#include <thread>
#include <vector>
#include "tlog.h"
typedef struct SPoolMem {
int64_t size;
struct SPoolMem *prev;
struct SPoolMem *next;
} SPoolMem;
static SPoolMem *openPool() {
SPoolMem *pPool = (SPoolMem *)taosMemoryMalloc(sizeof(*pPool));
pPool->prev = pPool->next = pPool;
pPool->size = 0;
return pPool;
}
static void clearPool(SPoolMem *pPool) {
SPoolMem *pMem;
do {
pMem = pPool->next;
if (pMem == pPool) break;
pMem->next->prev = pMem->prev;
pMem->prev->next = pMem->next;
pPool->size -= pMem->size;
taosMemoryFree(pMem);
} while (1);
assert(pPool->size == 0);
}
static void closePool(SPoolMem *pPool) {
clearPool(pPool);
taosMemoryFree(pPool);
}
static void *poolMalloc(void *arg, size_t size) {
void *ptr = NULL;
SPoolMem *pPool = (SPoolMem *)arg;
SPoolMem *pMem;
pMem = (SPoolMem *)taosMemoryMalloc(sizeof(*pMem) + size);
if (pMem == NULL) {
assert(0);
}
pMem->size = sizeof(*pMem) + size;
pMem->next = pPool->next;
pMem->prev = pPool;
pPool->next->prev = pMem;
pPool->next = pMem;
pPool->size += pMem->size;
ptr = (void *)(&pMem[1]);
return ptr;
}
static void poolFree(void *arg, void *ptr) {
SPoolMem *pPool = (SPoolMem *)arg;
SPoolMem *pMem;
pMem = &(((SPoolMem *)ptr)[-1]);
pMem->next->prev = pMem->prev;
pMem->prev->next = pMem->next;
pPool->size -= pMem->size;
taosMemoryFree(pMem);
}
static int tKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) {
int k1, k2;
std::string s1((char *)pKey1 + 3, kLen1 - 3);
std::string s2((char *)pKey2 + 3, kLen2 - 3);
k1 = stoi(s1);
k2 = stoi(s2);
if (k1 < k2) {
return -1;
} else if (k1 > k2) {
return 1;
} else {
return 0;
}
}
static int tDefaultKeyCmpr(const void *pKey1, int keyLen1, const void *pKey2, int keyLen2) {
int mlen;
int cret;
ASSERT(keyLen1 > 0 && keyLen2 > 0 && pKey1 != NULL && pKey2 != NULL);
mlen = keyLen1 < keyLen2 ? keyLen1 : keyLen2;
cret = memcmp(pKey1, pKey2, mlen);
if (cret == 0) {
if (keyLen1 < keyLen2) {
cret = -1;
} else if (keyLen1 > keyLen2) {
cret = 1;
} else {
cret = 0;
}
}
return cret;
}
static TDB *openEnv(char const *envName, int const pageSize, int const pageNum) {
TDB *pEnv = NULL;
int ret = tdbOpen(envName, pageSize, pageNum, &pEnv, 0);
if (ret) {
pEnv = NULL;
}
return pEnv;
}
static void generateBigVal(char *val, int valLen) {
for (int i = 0; i < valLen; ++i) {
char c = char(i & 0xff);
if (c == 0) {
c = 1;
}
val[i] = c;
}
}
static void insertOfp(void) {
int ret = 0;
// open Env
int const pageSize = 4096;
int const pageNum = 64;
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
GTEST_ASSERT_NE(pEnv, nullptr);
// open db
TTB *pDb = NULL;
tdb_cmpr_fn_t compFunc = tKeyCmpr;
// ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
GTEST_ASSERT_EQ(ret, 0);
// open the pool
SPoolMem *pPool = openPool();
// start a transaction
TXN *txn = NULL;
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
// generate value payload
// char val[((4083 - 4 - 3 - 2) + 1) * 100]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
char val[32605];
int valLen = sizeof(val) / sizeof(val[0]);
generateBigVal(val, valLen);
// insert the generated big data
// char const *key = "key1";
char const *key = "key123456789";
ret = tdbTbInsert(pDb, key, strlen(key) + 1, val, valLen, txn);
GTEST_ASSERT_EQ(ret, 0);
// commit current transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
closePool(pPool);
// Close a database
tdbTbClose(pDb);
// Close Env
ret = tdbClose(pEnv);
GTEST_ASSERT_EQ(ret, 0);
}
static void clearDb(char const *db) { taosRemoveDir(db); }
TEST(TdbPageRecycleTest, DISABLED_TbInsertTest) {
// TEST(TdbPageRecycleTest, TbInsertTest) {
// ofp inserting
clearDb("tdb");
insertOfp();
}
TEST(TdbPageRecycleTest, DISABLED_TbGetTest) {
// TEST(TdbPageRecycleTest, TbGetTest) {
clearDb("tdb");
insertOfp();
// open Env
int const pageSize = 4096;
int const pageNum = 64;
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
GTEST_ASSERT_NE(pEnv, nullptr);
// open db
TTB *pDb = NULL;
tdb_cmpr_fn_t compFunc = tKeyCmpr;
// int ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
int ret = tdbTbOpen("ofp_insert.db", 12, -1, compFunc, pEnv, &pDb, 0);
GTEST_ASSERT_EQ(ret, 0);
// generate value payload
// char val[((4083 - 4 - 3 - 2) + 1) * 100]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
char val[32605];
int valLen = sizeof(val) / sizeof(val[0]);
generateBigVal(val, valLen);
{ // Query the data
void *pVal = NULL;
int vLen;
// char const *key = "key1";
char const *key = "key123456789";
ret = tdbTbGet(pDb, key, strlen(key), &pVal, &vLen);
ASSERT(ret == 0);
GTEST_ASSERT_EQ(ret, 0);
GTEST_ASSERT_EQ(vLen, valLen);
GTEST_ASSERT_EQ(memcmp(val, pVal, vLen), 0);
tdbFree(pVal);
}
}
TEST(TdbPageRecycleTest, DISABLED_TbDeleteTest) {
// TEST(TdbPageRecycleTest, TbDeleteTest) {
int ret = 0;
taosRemoveDir("tdb");
// open Env
int const pageSize = 4096;
int const pageNum = 64;
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
GTEST_ASSERT_NE(pEnv, nullptr);
// open db
TTB *pDb = NULL;
tdb_cmpr_fn_t compFunc = tKeyCmpr;
ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
GTEST_ASSERT_EQ(ret, 0);
// open the pool
SPoolMem *pPool = openPool();
// start a transaction
TXN *txn;
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
// generate value payload
// char val[((4083 - 4 - 3 - 2) + 1) * 100]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
char val[((4083 - 4 - 3 - 2) + 1) * 2]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
int valLen = sizeof(val) / sizeof(val[0]);
generateBigVal(val, valLen);
{ // insert the generated big data
ret = tdbTbInsert(pDb, "key1", strlen("key1"), val, valLen, txn);
GTEST_ASSERT_EQ(ret, 0);
}
{ // query the data
void *pVal = NULL;
int vLen;
ret = tdbTbGet(pDb, "key1", strlen("key1"), &pVal, &vLen);
ASSERT(ret == 0);
GTEST_ASSERT_EQ(ret, 0);
GTEST_ASSERT_EQ(vLen, valLen);
GTEST_ASSERT_EQ(memcmp(val, pVal, vLen), 0);
tdbFree(pVal);
}
/* open to debug committed file
tdbCommit(pEnv, &txn);
tdbTxnClose(&txn);
++txnid;
tdbTxnOpen(&txn, txnid, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
tdbBegin(pEnv, &txn);
*/
{ // upsert the data
ret = tdbTbUpsert(pDb, "key1", strlen("key1"), "value1", strlen("value1"), txn);
GTEST_ASSERT_EQ(ret, 0);
}
{ // query the upserted data
void *pVal = NULL;
int vLen;
ret = tdbTbGet(pDb, "key1", strlen("key1"), &pVal, &vLen);
ASSERT(ret == 0);
GTEST_ASSERT_EQ(ret, 0);
GTEST_ASSERT_EQ(vLen, strlen("value1"));
GTEST_ASSERT_EQ(memcmp("value1", pVal, vLen), 0);
tdbFree(pVal);
}
{ // delete the data
ret = tdbTbDelete(pDb, "key1", strlen("key1"), txn);
GTEST_ASSERT_EQ(ret, 0);
}
{ // query the deleted data
void *pVal = NULL;
int vLen = -1;
ret = tdbTbGet(pDb, "key1", strlen("key1"), &pVal, &vLen);
ASSERT(ret == -1);
GTEST_ASSERT_EQ(ret, -1);
GTEST_ASSERT_EQ(vLen, -1);
GTEST_ASSERT_EQ(pVal, nullptr);
tdbFree(pVal);
}
// commit current transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
}
TEST(TdbPageRecycleTest, DISABLED_simple_insert1) {
// TEST(TdbPageRecycleTest, simple_insert1) {
int ret;
TDB *pEnv;
TTB *pDb;
tdb_cmpr_fn_t compFunc;
int nData = 1;
TXN *txn;
int const pageSize = 4096;
taosRemoveDir("tdb");
// Open Env
ret = tdbOpen("tdb", pageSize, 64, &pEnv, 0);
GTEST_ASSERT_EQ(ret, 0);
// Create a database
compFunc = tKeyCmpr;
ret = tdbTbOpen("db.db", -1, -1, compFunc, pEnv, &pDb, 0);
GTEST_ASSERT_EQ(ret, 0);
{
char key[64];
// char val[(4083 - 4 - 3 - 2)]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
char val[(4083 - 4 - 3 - 2) + 1]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
int64_t poolLimit = 4096; // 1M pool limit
SPoolMem *pPool;
// open the pool
pPool = openPool();
// start a transaction
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
for (int iData = 1; iData <= nData; iData++) {
sprintf(key, "key0");
sprintf(val, "value%d", iData);
// ret = tdbTbInsert(pDb, key, strlen(key), val, strlen(val), &txn);
// GTEST_ASSERT_EQ(ret, 0);
// generate value payload
int valLen = sizeof(val) / sizeof(val[0]);
for (int i = 6; i < valLen; ++i) {
char c = char(i & 0xff);
if (c == 0) {
c = 1;
}
val[i] = c;
}
ret = tdbTbInsert(pDb, "key1", strlen("key1"), val, valLen, txn);
GTEST_ASSERT_EQ(ret, 0);
// if pool is full, commit the transaction and start a new one
if (pPool->size >= poolLimit) {
// commit current transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
// start a new transaction
clearPool(pPool);
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
}
}
// commit the transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
{ // Query the data
void *pVal = NULL;
int vLen;
for (int i = 1; i <= nData; i++) {
sprintf(key, "key%d", i);
// sprintf(val, "value%d", i);
ret = tdbTbGet(pDb, key, strlen(key), &pVal, &vLen);
ASSERT(ret == 0);
GTEST_ASSERT_EQ(ret, 0);
GTEST_ASSERT_EQ(vLen, sizeof(val) / sizeof(val[0]));
GTEST_ASSERT_EQ(memcmp(val, pVal, vLen), 0);
}
tdbFree(pVal);
}
{ // Iterate to query the DB data
TBC *pDBC;
void *pKey = NULL;
void *pVal = NULL;
int vLen, kLen;
int count = 0;
ret = tdbTbcOpen(pDb, &pDBC, NULL);
GTEST_ASSERT_EQ(ret, 0);
tdbTbcMoveToFirst(pDBC);
for (;;) {
ret = tdbTbcNext(pDBC, &pKey, &kLen, &pVal, &vLen);
if (ret < 0) break;
// std::cout.write((char *)pKey, kLen) /* << " " << kLen */ << " ";
// std::cout.write((char *)pVal, vLen) /* << " " << vLen */;
// std::cout << std::endl;
count++;
}
GTEST_ASSERT_EQ(count, nData);
tdbTbcClose(pDBC);
tdbFree(pKey);
tdbFree(pVal);
}
}
ret = tdbTbDrop(pDb);
GTEST_ASSERT_EQ(ret, 0);
// Close a database
tdbTbClose(pDb);
// Close Env
ret = tdbClose(pEnv);
GTEST_ASSERT_EQ(ret, 0);
}
static void insertDb(int nData) {
int ret = 0;
TDB *pEnv = NULL;
TTB *pDb = NULL;
tdb_cmpr_fn_t compFunc;
TXN *txn = NULL;
int const pageSize = 4 * 1024;
// Open Env
ret = tdbOpen("tdb", pageSize, 64, &pEnv, 0);
GTEST_ASSERT_EQ(ret, 0);
// Create a database
compFunc = tKeyCmpr;
ret = tdbTbOpen("db.db", -1, -1, compFunc, pEnv, &pDb, 0);
GTEST_ASSERT_EQ(ret, 0);
// 1, insert nData kv
{
char key[64];
char val[(4083 - 4 - 3 - 2) + 1]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
int64_t poolLimit = 4096; // 1M pool limit
SPoolMem *pPool;
// open the pool
pPool = openPool();
// start a transaction
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
for (int iData = 0; iData < nData; ++iData) {
sprintf(key, "key%03d", iData);
sprintf(val, "value%03d", iData);
ret = tdbTbInsert(pDb, key, strlen(key), val, strlen(val), txn);
GTEST_ASSERT_EQ(ret, 0);
// if pool is full, commit the transaction and start a new one
if (pPool->size >= poolLimit) {
// commit current transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
// start a new transaction
clearPool(pPool);
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
}
}
// commit the transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
// 2, delete nData/2 records
closePool(pPool);
}
// Close a database
tdbTbClose(pDb);
// Close Env
ret = tdbClose(pEnv);
GTEST_ASSERT_EQ(ret, 0);
system("ls -l ./tdb");
}
static void deleteDb(int nData) {
int ret = 0;
TDB *pEnv = NULL;
TTB *pDb = NULL;
tdb_cmpr_fn_t compFunc;
TXN *txn = NULL;
int const pageSize = 4 * 1024;
// Open Env
ret = tdbOpen("tdb", pageSize, 64, &pEnv, 0);
GTEST_ASSERT_EQ(ret, 0);
// Create a database
compFunc = tKeyCmpr;
ret = tdbTbOpen("db.db", -1, -1, compFunc, pEnv, &pDb, 0);
GTEST_ASSERT_EQ(ret, 0);
// 2, delete nData/2 records
{
char key[64];
char val[(4083 - 4 - 3 - 2) + 1]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
int64_t poolLimit = 4096; // 1M pool limit
SPoolMem *pPool;
// open the pool
pPool = openPool();
// start a transaction
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
for (int iData = 0; iData < nData; iData++) {
// if (iData % 2 == 0) continue;
sprintf(key, "key%03d", iData);
sprintf(val, "value%03d", iData);
{ // delete the data
ret = tdbTbDelete(pDb, key, strlen(key), txn);
GTEST_ASSERT_EQ(ret, 0);
}
// if pool is full, commit the transaction and start a new one
if (pPool->size >= poolLimit) {
// commit current transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
// start a new transaction
clearPool(pPool);
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
}
}
// commit the transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
closePool(pPool);
}
// Close a database
tdbTbClose(pDb);
// Close Env
ret = tdbClose(pEnv);
GTEST_ASSERT_EQ(ret, 0);
system("ls -l ./tdb");
}
static const int nDataConst = 256 * 19;
// TEST(TdbPageRecycleTest, DISABLED_seq_insert) {
TEST(TdbPageRecycleTest, seq_insert) {
clearDb("tdb");
insertDb(nDataConst);
}
// TEST(TdbPageRecycleTest, DISABLED_seq_delete) {
TEST(TdbPageRecycleTest, seq_delete) { deleteDb(nDataConst); }
// TEST(TdbPageRecycleTest, DISABLED_recycly_insert) {
TEST(TdbPageRecycleTest, recycly_insert) { insertDb(nDataConst); }
// TEST(TdbPageRecycleTest, DISABLED_recycly_seq_insert_ofp) {
TEST(TdbPageRecycleTest, recycly_seq_insert_ofp) {
clearDb("tdb");
insertOfp();
system("ls -l ./tdb");
}
static void deleteOfp(void) {
// open Env
int ret = 0;
int const pageSize = 4096;
int const pageNum = 64;
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
GTEST_ASSERT_NE(pEnv, nullptr);
// open db
TTB *pDb = NULL;
tdb_cmpr_fn_t compFunc = tKeyCmpr;
ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
GTEST_ASSERT_EQ(ret, 0);
// open the pool
SPoolMem *pPool = openPool();
// start a transaction
TXN *txn;
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
{ // delete the data
char const *key = "key123456789";
ret = tdbTbDelete(pDb, key, strlen(key) + 1, txn);
GTEST_ASSERT_EQ(ret, 0);
}
// commit current transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
closePool(pPool);
ret = tdbTbDrop(pDb);
GTEST_ASSERT_EQ(ret, 0);
// Close a database
tdbTbClose(pDb);
// Close Env
ret = tdbClose(pEnv);
GTEST_ASSERT_EQ(ret, 0);
}
// TEST(TdbPageRecycleTest, DISABLED_seq_delete_ofp) {
TEST(TdbPageRecycleTest, seq_delete_ofp) {
deleteOfp();
system("ls -l ./tdb");
}
// TEST(TdbPageRecycleTest, DISABLED_recycly_seq_insert_ofp_again) {
TEST(TdbPageRecycleTest, recycly_seq_insert_ofp_again) {
insertOfp();
system("ls -l ./tdb");
}
// TEST(TdbPageRecycleTest, DISABLED_recycly_seq_insert_ofp_nocommit) {
TEST(TdbPageRecycleTest, recycly_seq_insert_ofp_nocommit) {
clearDb("tdb");
insertOfp();
system("ls -l ./tdb");
// open Env
int ret = 0;
int const pageSize = 4096;
int const pageNum = 64;
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
GTEST_ASSERT_NE(pEnv, nullptr);
// open db
TTB *pDb = NULL;
tdb_cmpr_fn_t compFunc = tKeyCmpr;
ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
GTEST_ASSERT_EQ(ret, 0);
// open the pool
SPoolMem *pPool = openPool();
// start a transaction
TXN *txn;
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
{ // delete the data
char const *key = "key123456789";
ret = tdbTbDelete(pDb, key, strlen(key) + 1, txn);
GTEST_ASSERT_EQ(ret, 0);
}
// 1, insert nData kv
{
int nData = nDataConst;
char key[64];
char val[(4083 - 4 - 3 - 2) + 1]; // pSize(4096) - amSize(1) - pageHdr(8) - footerSize(4)
int64_t poolLimit = 4096; // 1M pool limit
for (int iData = 0; iData < nData; ++iData) {
sprintf(key, "key%03d", iData);
sprintf(val, "value%03d", iData);
ret = tdbTbInsert(pDb, key, strlen(key), val, strlen(val), txn);
GTEST_ASSERT_EQ(ret, 0);
// if pool is full, commit the transaction and start a new one
if (pPool->size >= poolLimit) {
// commit current transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
// start a new transaction
clearPool(pPool);
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
}
}
}
// commit current transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
closePool(pPool);
// Close a database
tdbTbClose(pDb);
// Close Env
ret = tdbClose(pEnv);
GTEST_ASSERT_EQ(ret, 0);
system("ls -l ./tdb");
}
// TEST(TdbPageRecycleTest, DISABLED_recycly_delete_interior_ofp_nocommit) {
TEST(TdbPageRecycleTest, recycly_delete_interior_ofp_nocommit) {
clearDb("tdb");
// open Env
int ret = 0;
int const pageSize = 4096;
int const pageNum = 64;
TDB *pEnv = openEnv("tdb", pageSize, pageNum);
GTEST_ASSERT_NE(pEnv, nullptr);
// open db
TTB *pDb = NULL;
tdb_cmpr_fn_t compFunc = NULL; // tKeyCmpr;
ret = tdbTbOpen("ofp_insert.db", -1, -1, compFunc, pEnv, &pDb, 0);
GTEST_ASSERT_EQ(ret, 0);
// open the pool
SPoolMem *pPool = openPool();
// start a transaction
TXN *txn;
tdbBegin(pEnv, &txn, poolMalloc, poolFree, pPool, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
char key[1024] = {0};
int count = sizeof(key) / sizeof(key[0]);
for (int i = 0; i < count - 1; ++i) {
key[i] = 'a';
}
// insert n ofp keys to form 2-layer btree
{
for (int i = 0; i < 7; ++i) {
// sprintf(&key[count - 2], "%c", i);
key[count - 2] = '0' + i;
ret = tdbTbInsert(pDb, key, count, NULL, NULL, txn);
GTEST_ASSERT_EQ(ret, 0);
}
}
/*
// delete one interior key
{
sprintf(&key[count - 2], "%c", 2);
key[count - 2] = '0' + 2;
ret = tdbTbDelete(pDb, key, strlen(key) + 1, txn);
GTEST_ASSERT_EQ(ret, 0);
}
*/
// commit current transaction
tdbCommit(pEnv, txn);
tdbPostCommit(pEnv, txn);
closePool(pPool);
// Close a database
tdbTbClose(pDb);
// Close Env
ret = tdbClose(pEnv);
GTEST_ASSERT_EQ(ret, 0);
system("ls -l ./tdb");
}

View File

@ -568,6 +568,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_SELECTED_EXPR, "Invalid SELECTed ex
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_GET_META_ERROR, "Fail to get table info")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, "Not unique table/alias")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, "System table not allowed")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, "System table not allowed")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error")
//planner

View File

@ -129,6 +129,7 @@ sql DROP INDEX sma_index_3 ;
print ========== step8
sql drop database if exists db;
sleep 2000
sql create database db duration 300;
sql use db;
sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int);