Merge branch '3.0' of https://github.com/taosdata/TDengine into enh/TD-31890-8

This commit is contained in:
Hongze Cheng 2024-09-19 08:55:34 +08:00
commit 86122d81ea
22 changed files with 224 additions and 88 deletions

View File

@ -69,7 +69,7 @@ dataDir /mnt/data6 2 0
在配置文件 /etc/taos/taos.cfg 中,添加用于 S3 访问的参数:
|参数名称 | 参数含义 |
|:-------------:|:-----------------------------------------------:|
|:-------------|:-----------------------------------------------|
|s3EndPoint | 用户所在地域的 COS 服务域名,支持 http 和 httpsbucket 的区域需要与 endpoint 的保持一致否则无法访问。例如http://cos.ap-beijing.myqcloud.com |
|s3AccessKey |冒号分隔的用户 SecretId:SecretKey。例如AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E |
|s3BucketName | 存储桶名称,减号后面是用户注册 COS 服务的 AppId。其中 AppId 是 COS 特有AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分使用减号分隔。参数值均为字符串类型但不需要引号。例如test0711-1309024725 |
@ -111,3 +111,27 @@ s3migrate database <db_name>;
| 1 | s3_keeplocal | 3650 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位支持 m分钟、h小时和 d三个单位 |
| 2 | s3_chunksize | 262144 | 131072 | 1048576 | 上传对象的大小阈值,与 TSDB_PAGESIZE 参数一样,不可修改,单位为 TSDB 页 |
| 3 | s3_compact | 0 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 |
## Azure Blob 存储
本节介绍在 TDengine Enterprise 如何使用微软 Azure Blob 对象存储。本功能是上一小节‘对象存储’功能的扩展,需额外依赖 Flexify 服务提供的 S3 网关。通过适当的参数配置,可以把大部分较冷的时序数据存储到 Azure Blob 服务中。
### Flexify 服务
Flexify 是 Azure Marketplace 中的一款应用程序,允许兼容 S3 的应用程序通过标准 S3 API 在 Azure Blob Storage 中存储数据。可使用多个 Flexify 服务对同一个 Blob 存储建立多个 S3 网关。
部署方式请参考 [Flexify](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/flexify.azure-s3-api?tab=Overview) 应用页面说明。
### 配置方式
在配置文件 /etc/taos/taos.cfg 中,添加用于 S3 访问的参数:
```
s3EndPoint http //20.191.157.23,http://20.191.157.24,http://20.191.157.25
s3AccessKey FLIOMMNL0:uhRNdeZMLD4wo,ABCIOMMN:uhRNdeZMD4wog,DEFOMMNL049ba:uhRNdeZMLD4wogXd
s3BucketName td-test
```
- 允许对 s3EndPoint、s3AccessKey 配置多项,但要求二者项数一致。多个配置项间使用 ',' 分隔。s3BucketName 仅允许配置一项
- 认为每一组 `{s3EndPoint、s3AccessKey}` 配置对应一个 S3 服务,每次发起 S3 请求时将随机选择一个服务
- 认为全部 S3 服务均指向同一数据源,对各个 S3 服务操作完全等价
- 在某一 S3 服务上操作失败后会切换至其他服务,全部服务都失败后将返回最后产生的错误码
- 最大支持的 S3 服务配置数为 10

1
docs/zh/14-reference/01-components/02-taosc.md Normal file → Executable file
View File

@ -38,6 +38,7 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API并且在
|enableCoreFile | crash 时是否生成 core 文件0: 不生成, 1 生成缺省值1 |
|enableScience | 是否开启科学计数法显示浮点数; 0: 不开始, 1: 开启缺省值1 |
|compressMsgSize | 是否对 RPC 消息进行压缩; -1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩; 缺省值 -1|
|queryTableNotExistAsEmpty | 查询表不存在时是否返回空结果集; false: 返回错误; true: 返回空结果集; 缺省值 false|
## API

View File

@ -187,8 +187,8 @@ AllowWebSockets
- 兼容 InfluxDB v1 写接口
[https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/)
- 兼容 OpenTSDB JSON 和 telnet 格式写入
- \<http://opentsdb.net/docs/build/html/api_http/put.html\>
- \<http://opentsdb.net/docs/build/html/api_telnet/put.html\>
- [http://opentsdb.net/docs/build/html/api_http/put.html](http://opentsdb.net/docs/build/html/api_http/put.html)
- [http://opentsdb.net/docs/build/html/api_telnet/put.html](http://opentsdb.net/docs/build/html/api_telnet/put.html)
- 与 collectd 无缝连接。
collectd 是一个系统统计收集守护程序,请访问 [https://collectd.org/](https://collectd.org/) 了解更多信息。
- Seamless connection with StatsD。

View File

@ -194,7 +194,7 @@ Active: inactive (dead)
可以访问 taosKeeper 的 `check_health` 接口来判断服务是否存活,如果服务正常则会返回 HTTP 200 状态码:
```
$ curl -i http://127.0.0.1:6043/check_health
curl -i http://127.0.0.1:6043/check_health
```
返回结果:

View File

@ -8,7 +8,7 @@ toc_max_heading_level: 4
PHP 连接器依赖 TDengine 客户端驱动。
项目地址:\<https://github.com/Yurunsoft/php-tdengine>
项目地址:[https://github.com/Yurunsoft/php-tdengine](https://github.com/Yurunsoft/php-tdengine)
TDengine 服务端或客户端安装后,`taos.h` 位于:

View File

@ -16,7 +16,7 @@ MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/em
## 安装并启动 EMQX
用户可以根据当前的操作系统,到 EMQX 官网下载安装包,并执行安装。下载地址如下:\<https://www.emqx.io/zh/downloads>。安装后使用 `sudo emqx start``sudo systemctl start emqx` 启动 EMQX 服务。
用户可以根据当前的操作系统,到 [EMQX 官网下载安装包](https://www.emqx.io/zh/downloads),并执行安装。安装后使用 `sudo emqx start``sudo systemctl start emqx` 启动 EMQX 服务。
注意:本文基于 EMQX v4.4.5 版本,其他版本由于相关配置界面、配置方法以及功能可能随着版本升级有所区别。

View File

@ -924,6 +924,9 @@ int stmtPrepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
}
pStmt->sql.sqlStr = strndup(sql, length);
if (!pStmt->sql.sqlStr) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pStmt->sql.sqlLen = length;
pStmt->sql.stbInterlaceMode = pStmt->stbInterlaceMode;

View File

@ -869,6 +869,9 @@ int stmtPrepare2(TAOS_STMT2* stmt, const char* sql, unsigned long length) {
}
pStmt->sql.sqlStr = strndup(sql, length);
if (!pStmt->sql.sqlStr) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pStmt->sql.sqlLen = length;
pStmt->sql.stbInterlaceMode = pStmt->stbInterlaceMode;

View File

@ -360,6 +360,12 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
int32_t vnodesPerThread = numOfVnodes / threadNum + 1;
SVnodeThread *threads = taosMemoryCalloc(threadNum, sizeof(SVnodeThread));
if (threads == NULL) {
dError("failed to allocate memory for threads since %s", terrstr());
taosMemoryFree(pCfgs);
return terrno;
}
for (int32_t t = 0; t < threadNum; ++t) {
threads[t].threadIndex = t;
threads[t].pMgmt = pMgmt;

View File

@ -319,7 +319,7 @@ int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData);
// STsdbSnapWriter ========================================
int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, void* pRanges, STsdbSnapWriter** ppWriter);
int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, SSnapDataHdr* pHdr);
int32_t tsdbSnapWriterPrepareClose(STsdbSnapWriter* pWriter);
int32_t tsdbSnapWriterPrepareClose(STsdbSnapWriter* pWriter, bool rollback);
int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback);
// STsdbSnapRAWReader ========================================
int32_t tsdbSnapRAWReaderOpen(STsdb* pTsdb, int64_t ever, int8_t type, STsdbSnapRAWReader** ppReader);
@ -373,7 +373,7 @@ int32_t rsmaSnapRead(SRSmaSnapReader* pReader, uint8_t** ppData);
// SRSmaSnapWriter ========================================
int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, void** ppRanges, SRSmaSnapWriter** ppWriter);
int32_t rsmaSnapWrite(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
int32_t rsmaSnapWriterPrepareClose(SRSmaSnapWriter* pWriter);
int32_t rsmaSnapWriterPrepareClose(SRSmaSnapWriter* pWriter, bool rollback);
int32_t rsmaSnapWriterClose(SRSmaSnapWriter** ppWriter, int8_t rollback);
typedef struct {

View File

@ -1036,36 +1036,6 @@ const void *metaGetTableTagVal(const void *pTag, int16_t type, STagVal *val) {
return NULL;
}
#ifdef TAG_FILTER_DEBUG
if (IS_VAR_DATA_TYPE(val->type)) {
char *buf = taosMemoryCalloc(val->nData + 1, 1);
memcpy(buf, val->pData, val->nData);
metaDebug("metaTag table val varchar index:%d cid:%d type:%d value:%s", 1, val->cid, val->type, buf);
taosMemoryFree(buf);
} else {
double dval = 0;
GET_TYPED_DATA(dval, double, val->type, &val->i64);
metaDebug("metaTag table val number index:%d cid:%d type:%d value:%f", 1, val->cid, val->type, dval);
}
SArray *pTagVals = NULL;
tTagToValArray((STag *)pTag, &pTagVals);
for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
STagVal *pTagVal = (STagVal *)taosArrayGet(pTagVals, i);
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
char *buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
memcpy(buf, pTagVal->pData, pTagVal->nData);
metaDebug("metaTag table varchar index:%d cid:%d type:%d value:%s", i, pTagVal->cid, pTagVal->type, buf);
taosMemoryFree(buf);
} else {
double dval = 0;
GET_TYPED_DATA(dval, double, pTagVal->type, &pTagVal->i64);
metaDebug("metaTag table number index:%d cid:%d type:%d value:%f", i, pTagVal->cid, pTagVal->type, dval);
}
}
#endif
return val;
}
@ -1088,6 +1058,9 @@ int32_t metaFilterCreateTime(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
SIdxCursor *pCursor = NULL;
pCursor = (SIdxCursor *)taosMemoryCalloc(1, sizeof(SIdxCursor));
if (pCursor == NULL) {
return terrno;
}
pCursor->pMeta = pMeta;
pCursor->suid = param->suid;
pCursor->cid = param->cid;
@ -1160,6 +1133,9 @@ int32_t metaFilterTableName(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
SIdxCursor *pCursor = NULL;
pCursor = (SIdxCursor *)taosMemoryCalloc(1, sizeof(SIdxCursor));
if (pCursor == NULL) {
return terrno;
}
pCursor->pMeta = pMeta;
pCursor->suid = param->suid;
pCursor->cid = param->cid;
@ -1235,6 +1211,9 @@ int32_t metaFilterTtl(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
SIdxCursor *pCursor = NULL;
pCursor = (SIdxCursor *)taosMemoryCalloc(1, sizeof(SIdxCursor));
if (pCursor == NULL) {
return terrno;
}
pCursor->pMeta = pMeta;
pCursor->suid = param->suid;
pCursor->cid = param->cid;

View File

@ -89,12 +89,15 @@ static void metaGetEntryInfo(const SMetaEntry *pEntry, SMetaInfo *pInfo) {
static int metaUpdateMetaRsp(tb_uid_t uid, char *tbName, SSchemaWrapper *pSchema, STableMetaRsp *pMetaRsp) {
pMetaRsp->pSchemas = taosMemoryMalloc(pSchema->nCols * sizeof(SSchema));
if (NULL == pMetaRsp->pSchemas) {
return terrno = TSDB_CODE_OUT_OF_MEMORY;
return terrno;
}
pMetaRsp->pSchemaExt = taosMemoryMalloc(pSchema->nCols * sizeof(SSchemaExt));
if (pMetaRsp->pSchemaExt == NULL) {
taosMemoryFree(pMetaRsp->pSchemas);
return terrno;
}
tstrncpy(pMetaRsp->tbName, tbName, TSDB_TABLE_NAME_LEN);
pMetaRsp->numOfColumns = pSchema->nCols;
@ -1558,7 +1561,11 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
// get table entry
SDecoder dc = {0};
entry.pBuf = taosMemoryMalloc(nData);
if ((entry.pBuf = taosMemoryMalloc(nData)) == NULL) {
(void)tdbTbcClose(pUidIdxc);
(void)tdbTbcClose(pTbDbc);
return terrno;
}
memcpy(entry.pBuf, pData, nData);
tDecoderInit(&dc, entry.pBuf, nData);
ret = metaDecodeEntry(&dc, &entry);
@ -1625,6 +1632,9 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
pSchema->version++;
pSchema->nCols++;
pNewSchema = taosMemoryMalloc(sizeof(SSchema) * pSchema->nCols);
if (pNewSchema == NULL) {
goto _err;
}
memcpy(pNewSchema, pSchema->pSchema, sizeof(SSchema) * (pSchema->nCols - 1));
pSchema->pSchema = pNewSchema;
pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].bytes = pAlterTbReq->bytes;
@ -1832,7 +1842,11 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
(void)tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData);
ctbEntry.pBuf = taosMemoryMalloc(nData);
if ((ctbEntry.pBuf = taosMemoryMalloc(nData)) == NULL) {
(void)tdbTbcClose(pUidIdxc);
(void)tdbTbcClose(pTbDbc);
return terrno;
}
memcpy(ctbEntry.pBuf, pData, nData);
tDecoderInit(&dc1, ctbEntry.pBuf, nData);
(void)metaDecodeEntry(&dc1, &ctbEntry);
@ -2019,7 +2033,11 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
// get table entry
SDecoder dc = {0};
entry.pBuf = taosMemoryMalloc(nData);
if ((entry.pBuf = taosMemoryMalloc(nData)) == NULL) {
(void)tdbTbcClose(pUidIdxc);
(void)tdbTbcClose(pTbDbc);
return terrno;
}
memcpy(entry.pBuf, pData, nData);
tDecoderInit(&dc, entry.pBuf, nData);
ret = metaDecodeEntry(&dc, &entry);

View File

@ -163,11 +163,11 @@ _exit:
TAOS_RETURN(code);
}
int32_t rsmaSnapWriterPrepareClose(SRSmaSnapWriter* pWriter) {
int32_t rsmaSnapWriterPrepareClose(SRSmaSnapWriter* pWriter, bool rollback) {
int32_t code = 0;
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pWriter->pDataWriter[i]) {
code = tsdbSnapWriterPrepareClose(pWriter->pDataWriter[i]);
code = tsdbSnapWriterPrepareClose(pWriter->pDataWriter[i], rollback);
if (code) {
smaError("vgId:%d, failed to prepare close tsdbSnapWriter since %s. i: %d", SMA_VID(pWriter->pSma),
tstrerror(code), i);

View File

@ -924,6 +924,31 @@ _exit:
return code;
}
static int32_t tsdbSnapWriteFileSetAbort(STsdbSnapWriter* writer) {
if (!writer->ctx->fsetWriteBegin) return 0;
int32_t code = 0;
int32_t lino = 0;
// close write
code = tsdbSnapWriteFileSetCloseWriter(writer);
TSDB_CHECK_CODE(code, lino, _exit);
code = tsdbSnapWriteFileSetCloseIter(writer);
TSDB_CHECK_CODE(code, lino, _exit);
code = tsdbSnapWriteFileSetCloseReader(writer);
TSDB_CHECK_CODE(code, lino, _exit);
writer->ctx->fsetWriteBegin = false;
_exit:
if (code) {
TSDB_ERROR_LOG(TD_VID(writer->tsdb->pVnode), lino, code);
}
return code;
}
static int32_t tsdbSnapWriteTimeSeriesData(STsdbSnapWriter* writer, SSnapDataHdr* hdr) {
int32_t code = 0;
int32_t lino = 0;
@ -1075,15 +1100,23 @@ _exit:
return code;
}
int32_t tsdbSnapWriterPrepareClose(STsdbSnapWriter* writer) {
int32_t tsdbSnapWriterPrepareClose(STsdbSnapWriter* writer, bool rollback) {
int32_t code = 0;
int32_t lino = 0;
if (!rollback) {
code = tsdbSnapWriteFileSetEnd(writer);
TSDB_CHECK_CODE(code, lino, _exit);
code = tsdbFSEditBegin(writer->tsdb->pFS, writer->fopArr, TSDB_FEDIT_COMMIT);
TSDB_CHECK_CODE(code, lino, _exit);
} else {
code = tsdbSnapWriteFileSetAbort(writer);
TSDB_CHECK_CODE(code, lino, _exit);
code = tsdbFSEditBegin(writer->tsdb->pFS, writer->fopArr, TSDB_FEDIT_COMMIT);
TSDB_CHECK_CODE(code, lino, _exit);
}
_exit:
if (code) {

View File

@ -661,7 +661,7 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *
// prepare
if (pWriter->pTsdbSnapWriter) {
(void)tsdbSnapWriterPrepareClose(pWriter->pTsdbSnapWriter);
(void)tsdbSnapWriterPrepareClose(pWriter->pTsdbSnapWriter, rollback);
}
if (pWriter->pTsdbSnapRAWWriter) {
@ -669,7 +669,7 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *
}
if (pWriter->pRsmaSnapWriter) {
(void)rsmaSnapWriterPrepareClose(pWriter->pRsmaSnapWriter);
(void)rsmaSnapWriterPrepareClose(pWriter->pRsmaSnapWriter, rollback);
}
// commit json

View File

@ -1412,6 +1412,10 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t ver, void *pReq, in
if (tsEnableAuditCreateTable) {
char *str = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN);
if (str == NULL) {
pRsp->code = terrno;
goto _exit;
}
strcpy(str, pDropTbReq->name);
if (taosArrayPush(tbNames, &str) == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;

View File

@ -155,6 +155,10 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch
SSchema* pTagSchema = &pSchema[tags->pColIndex[c]];
int32_t colLen = pTagSchema->bytes;
if (IS_VAR_DATA_TYPE(pTagSchema->type)) {
if (!bind[c].length) {
code = buildInvalidOperationMsg(&pBuf, "var tag length is null");
goto end;
}
colLen = bind[c].length[0];
if ((colLen + VARSTR_HEADER_SIZE) > pTagSchema->bytes) {
code = buildInvalidOperationMsg(&pBuf, "tag length is too big");
@ -173,6 +177,10 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch
isJson = true;
char* tmp = taosMemoryCalloc(1, colLen + 1);
if (!tmp) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
memcpy(tmp, bind[c].buffer, colLen);
code = parseJsontoTagData(tmp, pTagArray, &pTag, &pBuf);
taosMemoryFree(tmp);
@ -495,6 +503,10 @@ int32_t qBindStmtTagsValue2(void* pBlock, void* boundTags, int64_t suid, const c
SSchema* pTagSchema = &pSchema[tags->pColIndex[c]];
int32_t colLen = pTagSchema->bytes;
if (IS_VAR_DATA_TYPE(pTagSchema->type)) {
if (!bind[c].length) {
code = buildInvalidOperationMsg(&pBuf, "var tag length is null");
goto end;
}
colLen = bind[c].length[0];
if ((colLen + VARSTR_HEADER_SIZE) > pTagSchema->bytes) {
code = buildInvalidOperationMsg(&pBuf, "tag length is too big");
@ -513,6 +525,10 @@ int32_t qBindStmtTagsValue2(void* pBlock, void* boundTags, int64_t suid, const c
isJson = true;
char* tmp = taosMemoryCalloc(1, colLen + 1);
if (!tmp) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
memcpy(tmp, bind[c].buffer, colLen);
code = parseJsontoTagData(tmp, pTagArray, &pTag, &pBuf);
taosMemoryFree(tmp);

View File

@ -146,6 +146,10 @@ void syncStop(int64_t rid) {
void syncPreStop(int64_t rid) {
SSyncNode* pSyncNode = syncNodeAcquire(rid);
if (pSyncNode != NULL) {
if (snapshotReceiverIsStart(pSyncNode->pNewNodeReceiver)) {
sInfo("vgId:%d, stop snapshot receiver", pSyncNode->vgId);
snapshotReceiverStop(pSyncNode->pNewNodeReceiver);
}
syncNodePreClose(pSyncNode);
syncNodeRelease(pSyncNode);
}

View File

@ -596,7 +596,9 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
if (sIdx + i < TDB_PAGE_TOTAL_CELLS(pParent)) {
pCell = tdbPageGetCell(pParent, sIdx + i);
szDivCell[i] = tdbBtreeCellSize(pParent, pCell, 0, NULL, NULL);
pDivCell[i] = tdbOsMalloc(szDivCell[i]);
if ((pDivCell[i] = tdbOsMalloc(szDivCell[i])) == NULL) {
return terrno;
}
memcpy(pDivCell[i], pCell, szDivCell[i]);
}
@ -855,6 +857,9 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
// TODO: pCell here may be inserted as an overflow cell, handle it
SCell *pNewCell = tdbOsMalloc(cd.kLen + 9);
if (pNewCell == NULL) {
return terrno;
}
int szNewCell;
SPgno pgno;
pgno = TDB_PAGE_PGNO(pNews[iNew]);
@ -2273,7 +2278,10 @@ int tdbBtcDelete(SBTC *pBtc) {
}
// update the cell with new key
pCell = tdbOsMalloc(nKey + 9);
if ((pCell = tdbOsMalloc(nKey + 9)) == NULL) {
tdbError("tdb/btc-delete: malloc failed.");
return terrno;
}
(void)tdbBtreeEncodeCell(pPage, pKey, nKey, &pgno, sizeof(pgno), pCell, &szCell, pBtc->pTxn, pBtc->pBt);
ret = tdbPageUpdateCell(pPage, idx, pCell, szCell, pBtc->pTxn, pBtc->pBt);

View File

@ -185,6 +185,10 @@ int tdbPageInsertCell(SPage *pPage, int idx, SCell *pCell, int szCell, u8 asOvfl
// TODO: here has memory leak
pNewCell = (SCell *)tdbOsMalloc(szCell);
if (pNewCell == NULL) {
tdbError("tdb/page-insert-cell: malloc failed.");
return terrno;
}
memcpy(pNewCell, pCell, szCell);
tdbTrace("tdbPage/insert/new ovfl cell: %p/%p", pNewCell, pPage);

View File

@ -353,7 +353,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/splitVGroupWal.py -N 3 -n 3
,,n,system-test,python3 ./test.py -f 0-others/timeRangeWise.py -N 3
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/delete_check.py
#,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/test_hot_refresh_configurations.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/test_hot_refresh_configurations.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/subscribe_stream_privilege.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/empty_identifier.py

View File

@ -13,6 +13,7 @@ from util.common import *
class TDTestCase:
"""This test case is used to veirfy hot refresh configurations
"""
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
@ -20,11 +21,11 @@ class TDTestCase:
self.configration_dic = {
"cli": [
{
"name": "keepAliveIdle",
"values": [1, 100, 7200000],
"except_values": [0, 7200001]
},
# {
# "name": "keepAliveIdle",
# "values": [1, 100, 7200000],
# "except_values": [0, 7200001]
# },
{
"name": "queryPolicy",
"values": [1, 2, 4],
@ -42,12 +43,12 @@ class TDTestCase:
}
],
"svr": [
{
"name": "keepAliveIdle",
"alias": "tsKeepAliveIdle",
"values": [1, 100, 7200000],
"except_values": [0, 7200001]
},
# {
# "name": "keepAliveIdle",
# "alias": "tsKeepAliveIdle",
# "values": [1, 100, 7200000],
# "except_values": [0, 7200001]
# },
{
"name": "mndSdbWriteDelta",
"alias": "tsMndSdbWriteDelta",
@ -100,6 +101,7 @@ class TDTestCase:
"name": "minDiskFreeSize",
"alias": "tsMinDiskFreeSize",
"values": ["51200K", "100M", "1G"],
"check_values": ["52428800", "104857600", "1073741824"],
"except_values": ["1024K", "1.1G", "1T"]
},
{
@ -159,27 +161,52 @@ class TDTestCase:
]
}
def get_param_value(self, config_name):
def cli_get_param_value(self, config_name):
tdSql.query("show local variables;")
for row in tdSql.queryResult:
if config_name == row[0]:
tdLog.debug("Found variable '{}'".format(row[0]))
return row[1]
def cli_check(self, name, values, except_values=False):
def svr_get_param_value(self, config_name):
tdSql.query("show dnode 1 variables;")
for row in tdSql.queryResult:
if config_name == row[1]:
tdLog.debug("Found variable '{}'".format(row[1]))
return row[2]
def cli_check(self, item, except_values=False):
name = item["name"]
if except_values:
values = item["except_values"]
else:
values = item["values"]
check_values = item.get("check_values", [])
if not except_values:
for v in values:
for i in range(len(values)):
v = values[i]
tdLog.debug("Set {} to {}".format(name, v))
tdSql.execute(f'alter local "{name} {v}";')
value = self.get_param_value(name)
value = self.cli_get_param_value(name)
tdLog.debug("Get {} value: {}".format(name, value))
assert(v == int(value))
if check_values:
tdLog.debug(f"assert {check_values[i]} == {str(value)}")
assert str(check_values[i]) == str(value)
else:
tdLog.debug(f"assert {v} == {str(value)}")
assert str(v) == str(value)
else:
for v in values:
tdLog.debug("Set {} to {}".format(name, v))
tdSql.error(f'alter local "{name} {v}";')
def svr_check(self, name, alias, values, except_values=False):
def svr_check(self, item, except_values=False):
name = item["name"]
if except_values:
values = item["except_values"]
else:
values = item["values"]
check_values = item.get("check_values", [])
p_list = ["dnode 1", "all dnodes"]
# check bool param value
if len(values) == 2 and [0, 1] == values and name != "queryRspPolicy":
@ -188,13 +215,18 @@ class TDTestCase:
is_bool = False
tdLog.debug(f"{name} is_bool: {is_bool}")
if not except_values:
for v in values:
for i in range(len(values)):
v = values[i]
dnode = random.choice(p_list)
tdSql.execute(f'alter {dnode} "{name} {v}";')
value = self.get_param_value(alias)
if value:
value = self.svr_get_param_value(name)
tdLog.debug(f"value: {value}")
assert(value == str(bool(v)).lower() if is_bool else str(v))
if check_values:
tdLog.debug(f"assert {check_values[i]} == {str(value)}")
assert str(check_values[i]) == str(value)
else:
tdLog.debug(f"assert {v} == {str(value)}")
assert str(v) == str(value)
else:
for v in values:
dnode = random.choice(p_list)
@ -205,28 +237,29 @@ class TDTestCase:
# reset log
taosdLogAbsoluteFilename = tdCom.getTaosdPath() + "/log/" + "taosdlog*"
tdSql.execute("alter all dnodes 'resetlog';")
r = subprocess.Popen("cat {} | grep 'reset log file'".format(taosdLogAbsoluteFilename), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
r = subprocess.Popen("cat {} | grep 'reset log file'".format(taosdLogAbsoluteFilename), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = r.communicate()
assert('reset log file' in stdout.decode())
assert ('reset log file' in stdout.decode())
for key in self.configration_dic:
if "cli" == key:
for item in self.configration_dic[key]:
self.cli_check(item["name"], item["values"])
self.cli_check(item)
if "except_values" in item:
self.cli_check(item["name"], item["except_values"], True)
self.cli_check(item, True)
elif "svr" == key:
for item in self.configration_dic[key]:
self.svr_check(item["name"], item["alias"], item["values"])
self.svr_check(item)
if "except_values" in item:
self.svr_check(item["name"], item["alias"], item["except_values"], True)
self.svr_check(item, True)
else:
raise Exception(f"unknown key: {key}")
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())