Merge branch '3.0' into feat/TS-4229
This commit is contained in:
commit
81767dfa32
|
@ -41,6 +41,7 @@ add_subdirectory(source)
|
|||
add_subdirectory(tools)
|
||||
add_subdirectory(utils)
|
||||
add_subdirectory(examples/c)
|
||||
add_subdirectory(tests)
|
||||
include(${TD_SUPPORT_DIR}/cmake.install)
|
||||
|
||||
# docs
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -262,6 +262,63 @@ The following types may be returned:
|
|||
- "INT UNSIGNED"
|
||||
- "BIGINT UNSIGNED"
|
||||
- "JSON"
|
||||
- "VARBINARY"
|
||||
- "GEOMETRY"
|
||||
|
||||
`VARBINARY` and `GEOMETRY` types return data as Hex string, example:
|
||||
|
||||
Prepare data
|
||||
|
||||
```bash
|
||||
create database demo
|
||||
use demo
|
||||
create table t(ts timestamp,c1 varbinary(20),c2 geometry(100))
|
||||
insert into t values(now,'\x7f8290','point(100 100)')
|
||||
```
|
||||
|
||||
Execute query
|
||||
|
||||
```bash
|
||||
curl --location 'http://<fqdn>:<port>/rest/sql' \
|
||||
--header 'Content-Type: text/plain' \
|
||||
--header 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' \
|
||||
--data 'select * from demo.t'
|
||||
```
|
||||
|
||||
Return results
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"column_meta": [
|
||||
[
|
||||
"ts",
|
||||
"TIMESTAMP",
|
||||
8
|
||||
],
|
||||
[
|
||||
"c1",
|
||||
"VARBINARY",
|
||||
20
|
||||
],
|
||||
[
|
||||
"c2",
|
||||
"GEOMETRY",
|
||||
100
|
||||
]
|
||||
],
|
||||
"data": [
|
||||
[
|
||||
"2023-11-01T06:28:15.210Z",
|
||||
"7f8290",
|
||||
"010100000000000000000059400000000000005940"
|
||||
]
|
||||
],
|
||||
"rows": 1
|
||||
}
|
||||
```
|
||||
|
||||
- `010100000000000000000059400000000000005940` is [Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/) format for `point(100 100)`
|
||||
|
||||
#### Errors
|
||||
|
||||
|
|
|
@ -257,6 +257,63 @@ curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timez
|
|||
- "INT UNSIGNED"
|
||||
- "BIGINT UNSIGNED"
|
||||
- "JSON"
|
||||
- "VARBINARY"
|
||||
- "GEOMETRY"
|
||||
|
||||
`VARBINARY` 和 `GEOMETRY` 类型返回数据为 Hex 字符串,样例:
|
||||
|
||||
准备数据
|
||||
|
||||
```bash
|
||||
create database demo
|
||||
use demo
|
||||
create table t(ts timestamp,c1 varbinary(20),c2 geometry(100))
|
||||
insert into t values(now,'\x7f8290','point(100 100)')
|
||||
```
|
||||
|
||||
执行查询
|
||||
|
||||
```bash
|
||||
curl --location 'http://<fqdn>:<port>/rest/sql' \
|
||||
--header 'Content-Type: text/plain' \
|
||||
--header 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' \
|
||||
--data 'select * from demo.t'
|
||||
```
|
||||
|
||||
返回结果
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"column_meta": [
|
||||
[
|
||||
"ts",
|
||||
"TIMESTAMP",
|
||||
8
|
||||
],
|
||||
[
|
||||
"c1",
|
||||
"VARBINARY",
|
||||
20
|
||||
],
|
||||
[
|
||||
"c2",
|
||||
"GEOMETRY",
|
||||
100
|
||||
]
|
||||
],
|
||||
"data": [
|
||||
[
|
||||
"2023-11-01T06:28:15.210Z",
|
||||
"7f8290",
|
||||
"010100000000000000000059400000000000005940"
|
||||
]
|
||||
],
|
||||
"rows": 1
|
||||
}
|
||||
```
|
||||
|
||||
- `010100000000000000000059400000000000005940` 为 `point(100 100)` 的 [Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/) 格式
|
||||
|
||||
#### 错误
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ extern int32_t tsS3UploadDelaySec;
|
|||
int32_t s3Init();
|
||||
void s3CleanUp();
|
||||
int32_t s3PutObjectFromFile(const char *file, const char *object);
|
||||
int32_t s3PutObjectFromFile2(const char *file, const char *object);
|
||||
int32_t s3PutObjectFromFile2(const char *file, const char *object, int8_t withcp);
|
||||
void s3DeleteObjectsByPrefix(const char *prefix);
|
||||
void s3DeleteObjects(const char *object_name[], int nobject);
|
||||
bool s3Exists(const char *object_name);
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_COMMON_COS_CP_H_
|
||||
#define _TD_COMMON_COS_CP_H_
|
||||
|
||||
#include "os.h"
|
||||
#include "tdef.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef enum {
|
||||
COS_CP_TYPE_UPLOAD, // upload
|
||||
COS_CP_TYPE_DOWNLOAD // download
|
||||
} ECpType;
|
||||
|
||||
typedef struct {
|
||||
int32_t index; // the index of part, start from 0
|
||||
int64_t offset; // the offset point of part
|
||||
int64_t size; // the size of part
|
||||
int completed; // COS_TRUE completed, COS_FALSE uncompleted
|
||||
char etag[128]; // the etag of part, for upload
|
||||
uint64_t crc64;
|
||||
} SCheckpointPart;
|
||||
|
||||
typedef struct {
|
||||
ECpType cp_type; // 0 upload, 1 download
|
||||
char md5[64]; // the md5 of checkout content
|
||||
TdFilePtr thefile; // the handle of checkpoint file
|
||||
|
||||
char file_path[TSDB_FILENAME_LEN]; // local file path
|
||||
int64_t file_size; // local file size, for upload
|
||||
int32_t file_last_modified; // local file last modified time, for upload
|
||||
char file_md5[64]; // md5 of the local file content, for upload, reserved
|
||||
|
||||
char object_name[128]; // object name
|
||||
int64_t object_size; // object size, for download
|
||||
char object_last_modified[64]; // object last modified time, for download
|
||||
char object_etag[128]; // object etag, for download
|
||||
|
||||
char upload_id[128]; // upload id
|
||||
|
||||
int part_num; // the total number of parts
|
||||
int64_t part_size; // the part size, byte
|
||||
SCheckpointPart* parts; // the parts of local or object, from 0
|
||||
} SCheckpoint;
|
||||
|
||||
int32_t cos_cp_open(char const* cp_path, SCheckpoint* checkpoint);
|
||||
void cos_cp_close(TdFilePtr fd);
|
||||
void cos_cp_remove(char const* filepath);
|
||||
|
||||
int32_t cos_cp_load(char const* filepath, SCheckpoint* checkpoint);
|
||||
int32_t cos_cp_dump(SCheckpoint* checkpoint);
|
||||
void cos_cp_get_undo_parts(SCheckpoint* checkpoint, int* part_num, SCheckpointPart* parts, int64_t* consume_bytes);
|
||||
void cos_cp_update(SCheckpoint* checkpoint, int32_t part_index, char const* etag, uint64_t crc64);
|
||||
void cos_cp_build_upload(SCheckpoint* checkpoint, char const* filepath, int64_t size, int32_t mtime,
|
||||
char const* upload_id, int64_t part_size);
|
||||
bool cos_cp_is_valid_upload(SCheckpoint* checkpoint, int64_t size, int32_t mtime);
|
||||
|
||||
void cos_cp_build_download(SCheckpoint* checkpoint, char const* filepath, char const* object_name, int64_t object_size,
|
||||
char const* object_lmtime, char const* object_etag, int64_t part_size);
|
||||
bool cos_cp_is_valid_download(SCheckpoint* checkpoint, char const* object_name, int64_t object_size,
|
||||
char const* object_lmtime, char const* object_etag);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_COMMON_COS_CP_H_*/
|
|
@ -107,8 +107,9 @@ extern int32_t tsMonitorMaxLogs;
|
|||
extern bool tsMonitorComp;
|
||||
|
||||
// audit
|
||||
extern bool tsEnableAudit;
|
||||
extern bool tsEnableAuditCreateTable;
|
||||
extern bool tsEnableAudit;
|
||||
extern bool tsEnableAuditCreateTable;
|
||||
extern int32_t tsAuditInterval;
|
||||
|
||||
// telem
|
||||
extern bool tsEnableTelem;
|
||||
|
|
|
@ -23,13 +23,13 @@
|
|||
#include "tjson.h"
|
||||
#include "tmsgcb.h"
|
||||
#include "trpc.h"
|
||||
#include "mnode.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define AUDIT_DETAIL_MAX 65472
|
||||
#define AUDIT_OPERATION_LEN 20
|
||||
|
||||
typedef struct {
|
||||
const char *server;
|
||||
|
@ -37,13 +37,28 @@ typedef struct {
|
|||
bool comp;
|
||||
} SAuditCfg;
|
||||
|
||||
typedef struct {
|
||||
int64_t curTime;
|
||||
char strClusterId[TSDB_CLUSTER_ID_LEN];
|
||||
char clientAddress[50];
|
||||
char user[TSDB_USER_LEN];
|
||||
char operation[AUDIT_OPERATION_LEN];
|
||||
char target1[TSDB_DB_NAME_LEN]; //put db name
|
||||
char target2[TSDB_STREAM_NAME_LEN]; //put stb name, table name, topic name, user name, stream name, use max
|
||||
char* detail;
|
||||
} SAuditRecord;
|
||||
|
||||
int32_t auditInit(const SAuditCfg *pCfg);
|
||||
void auditCleanup();
|
||||
void auditSend(SJson *pJson);
|
||||
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
|
||||
char *detail, int32_t len);
|
||||
void auditAddRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
|
||||
char *detail, int32_t len);
|
||||
void auditSendRecordsInBatch();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_MONITOR_H_*/
|
||||
#endif /*_TD_AUDIT_H_*/
|
||||
|
|
|
@ -509,11 +509,8 @@ typedef struct SStreamMeta {
|
|||
SArray* chkpSaved;
|
||||
SArray* chkpInUse;
|
||||
SRWLatch chkpDirLock;
|
||||
|
||||
void* qHandle;
|
||||
int32_t pauseTaskNum;
|
||||
|
||||
void* bkdChkptMgt;
|
||||
void* qHandle;
|
||||
void* bkdChkptMgt;
|
||||
} SStreamMeta;
|
||||
|
||||
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
|
||||
|
@ -840,11 +837,10 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int3
|
|||
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
|
||||
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||
int32_t streamMetaReopen(SStreamMeta* pMeta);
|
||||
void streamMetaClear(SStreamMeta* pMeta);
|
||||
void streamMetaInitBackend(SStreamMeta* pMeta);
|
||||
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
|
||||
int32_t streamMetaReloadAllTasks(SStreamMeta* pMeta);
|
||||
int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta);
|
||||
void streamMetaNotifyClose(SStreamMeta* pMeta);
|
||||
int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask, char* key);
|
||||
|
|
|
@ -33,11 +33,12 @@ extern "C" {
|
|||
#define SYNC_MAX_PROGRESS_WAIT_MS 4000
|
||||
#define SYNC_MAX_START_TIME_RANGE_MS (1000 * 20)
|
||||
#define SYNC_MAX_RECV_TIME_RANGE_MS 1200
|
||||
#define SYNC_DEL_WAL_MS (1000 * 60)
|
||||
#define SYNC_ADD_QUORUM_COUNT 3
|
||||
#define SYNC_VNODE_LOG_RETENTION (TSDB_SYNC_LOG_BUFFER_RETENTION + 1)
|
||||
#define SNAPSHOT_WAIT_MS 1000 * 5
|
||||
|
||||
#define SYNC_WAL_LOG_RETENTION_SIZE (8LL * 1024 * 1024 * 1024)
|
||||
|
||||
#define SYNC_MAX_RETRY_BACKOFF 5
|
||||
#define SYNC_LOG_REPL_RETRY_WAIT_MS 100
|
||||
#define SYNC_APPEND_ENTRIES_TIMEOUT_MS 10000
|
||||
|
@ -219,6 +220,7 @@ typedef struct SSyncLogStore {
|
|||
|
||||
SyncIndex (*syncLogWriteIndex)(struct SSyncLogStore* pLogStore);
|
||||
SyncIndex (*syncLogLastIndex)(struct SSyncLogStore* pLogStore);
|
||||
SyncIndex (*syncLogIndexRetention)(struct SSyncLogStore* pLogStore, int64_t bytes);
|
||||
SyncTerm (*syncLogLastTerm)(struct SSyncLogStore* pLogStore);
|
||||
|
||||
int32_t (*syncLogAppendEntry)(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, bool forcSync);
|
||||
|
|
|
@ -225,6 +225,7 @@ bool walIsEmpty(SWal *);
|
|||
int64_t walGetFirstVer(SWal *);
|
||||
int64_t walGetSnapshotVer(SWal *);
|
||||
int64_t walGetLastVer(SWal *);
|
||||
int64_t walGetVerRetention(SWal *pWal, int64_t bytes);
|
||||
int64_t walGetCommittedVer(SWal *);
|
||||
int64_t walGetAppliedVer(SWal *);
|
||||
|
||||
|
|
|
@ -47,18 +47,13 @@ typedef int32_t TdUcs4;
|
|||
#define strtof STR_TO_F_FUNC_TAOS_FORBID
|
||||
#endif
|
||||
|
||||
#ifdef WINDOWS
|
||||
#define tstrdup(str) _strdup(str)
|
||||
#else
|
||||
#define tstrdup(str) strdup(str)
|
||||
#endif
|
||||
|
||||
#define tstrncpy(dst, src, size) \
|
||||
do { \
|
||||
strncpy((dst), (src), (size)); \
|
||||
(dst)[(size)-1] = 0; \
|
||||
} while (0)
|
||||
|
||||
char *tstrdup(const char *src);
|
||||
int32_t taosUcs4len(TdUcs4 *ucs4);
|
||||
int64_t taosStr2int64(const char *str);
|
||||
|
||||
|
|
|
@ -205,7 +205,7 @@ typedef struct SRequestSendRecvBody {
|
|||
__taos_async_fn_t queryFp;
|
||||
__taos_async_fn_t fetchFp;
|
||||
EQueryExecMode execMode;
|
||||
void* param;
|
||||
void* interParam;
|
||||
SDataBuf requestMsg;
|
||||
int64_t queryJob; // query job, created according to sql query DAG.
|
||||
int32_t subplanNum;
|
||||
|
@ -287,6 +287,7 @@ typedef struct SRequestObj {
|
|||
typedef struct SSyncQueryParam {
|
||||
tsem_t sem;
|
||||
SRequestObj* pRequest;
|
||||
void* userParam;
|
||||
} SSyncQueryParam;
|
||||
|
||||
void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
|
||||
|
@ -420,6 +421,7 @@ int32_t buildPreviousRequest(SRequestObj *pRequest, const char* sql, SRequestObj
|
|||
int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *pRequest, bool updateMetaForce);
|
||||
void returnToUser(SRequestObj* pRequest);
|
||||
void stopAllQueries(SRequestObj *pRequest);
|
||||
void doRequestCallback(SRequestObj* pRequest, int32_t code);
|
||||
void freeQueryParam(SSyncQueryParam* param);
|
||||
|
||||
#ifdef TD_ENTERPRISE
|
||||
|
|
|
@ -316,6 +316,15 @@ void *createRequest(uint64_t connId, int32_t type, int64_t reqid) {
|
|||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return NULL;
|
||||
}
|
||||
SSyncQueryParam *interParam = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
|
||||
if (interParam == NULL) {
|
||||
doDestroyRequest(pRequest);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
tsem_init(&interParam->sem, 0, 0);
|
||||
interParam->pRequest = pRequest;
|
||||
pRequest->body.interParam = interParam;
|
||||
|
||||
pRequest->resType = RES_TYPE__QUERY;
|
||||
pRequest->requestId = reqid == 0 ? generateRequestId() : reqid;
|
||||
|
@ -437,12 +446,10 @@ void doDestroyRequest(void *p) {
|
|||
deregisterRequest(pRequest);
|
||||
}
|
||||
|
||||
if (pRequest->syncQuery) {
|
||||
if (pRequest->body.param) {
|
||||
tsem_destroy(&((SSyncQueryParam *)pRequest->body.param)->sem);
|
||||
}
|
||||
taosMemoryFree(pRequest->body.param);
|
||||
if (pRequest->body.interParam) {
|
||||
tsem_destroy(&((SSyncQueryParam *)pRequest->body.interParam)->sem);
|
||||
}
|
||||
taosMemoryFree(pRequest->body.interParam);
|
||||
|
||||
qDestroyQuery(pRequest->pQuery);
|
||||
nodesDestroyAllocator(pRequest->allocatorRefId);
|
||||
|
|
|
@ -196,21 +196,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
|
|||
(*pRequest)->sqlLen = sqlLen;
|
||||
(*pRequest)->validateOnly = validateSql;
|
||||
|
||||
SSyncQueryParam* newpParam = NULL;
|
||||
if (param == NULL) {
|
||||
newpParam = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
|
||||
if (newpParam == NULL) {
|
||||
destroyRequest(*pRequest);
|
||||
*pRequest = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
tsem_init(&newpParam->sem, 0, 0);
|
||||
newpParam->pRequest = (*pRequest);
|
||||
param = newpParam;
|
||||
}
|
||||
|
||||
(*pRequest)->body.param = param;
|
||||
((SSyncQueryParam*)(*pRequest)->body.interParam)->userParam = param;
|
||||
|
||||
STscObj* pTscObj = (*pRequest)->pTscObj;
|
||||
int32_t err = taosHashPut(pTscObj->pRequests, &(*pRequest)->self, sizeof((*pRequest)->self), &(*pRequest)->self,
|
||||
|
@ -218,7 +204,6 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
|
|||
if (err) {
|
||||
tscError("%" PRId64 " failed to add to request container, reqId:0x%" PRIx64 ", conn:%" PRId64 ", %s",
|
||||
(*pRequest)->self, (*pRequest)->requestId, pTscObj->id, sql);
|
||||
freeQueryParam(newpParam);
|
||||
destroyRequest(*pRequest);
|
||||
*pRequest = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -230,7 +215,6 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
|
|||
nodesCreateAllocator((*pRequest)->requestId, tsQueryNodeChunkSize, &((*pRequest)->allocatorRefId))) {
|
||||
tscError("%" PRId64 " failed to create node allocator, reqId:0x%" PRIx64 ", conn:%" PRId64 ", %s",
|
||||
(*pRequest)->self, (*pRequest)->requestId, pTscObj->id, sql);
|
||||
freeQueryParam(newpParam);
|
||||
destroyRequest(*pRequest);
|
||||
*pRequest = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -336,7 +320,7 @@ static SAppInstInfo* getAppInfo(SRequestObj* pRequest) { return pRequest->pTscOb
|
|||
void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
|
||||
SRetrieveTableRsp* pRsp = NULL;
|
||||
if (pRequest->validateOnly) {
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
|
||||
doRequestCallback(pRequest, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -358,18 +342,18 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
|
|||
pRequest->requestId);
|
||||
}
|
||||
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
}
|
||||
|
||||
int32_t asyncExecDdlQuery(SRequestObj* pRequest, SQuery* pQuery) {
|
||||
if (pRequest->validateOnly) {
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
|
||||
doRequestCallback(pRequest, 0);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// drop table if exists not_exists_table
|
||||
if (NULL == pQuery->pCmdMsg) {
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
|
||||
doRequestCallback(pRequest, 0);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -384,7 +368,7 @@ int32_t asyncExecDdlQuery(SRequestObj* pRequest, SQuery* pQuery) {
|
|||
int64_t transporterId = 0;
|
||||
int32_t code = asyncSendMsgToServer(pAppInfo->pTransporter, &pMsgInfo->epSet, &transporterId, pSendMsg);
|
||||
if (code) {
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
@ -913,7 +897,7 @@ void continuePostSubQuery(SRequestObj* pRequest, TAOS_ROW row) {
|
|||
void returnToUser(SRequestObj* pRequest) {
|
||||
if (pRequest->relation.userRefId == pRequest->self || 0 == pRequest->relation.userRefId) {
|
||||
// return to client
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, pRequest->code);
|
||||
doRequestCallback(pRequest, pRequest->code);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -921,7 +905,7 @@ void returnToUser(SRequestObj* pRequest) {
|
|||
if (pUserReq) {
|
||||
pUserReq->code = pRequest->code;
|
||||
// return to client
|
||||
pUserReq->body.queryFp(pUserReq->body.param, pUserReq, pUserReq->code);
|
||||
doRequestCallback(pUserReq, pUserReq->code);
|
||||
releaseRequest(pRequest->relation.userRefId);
|
||||
return;
|
||||
} else {
|
||||
|
@ -1031,7 +1015,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
|||
pRequest->pWrapper = NULL;
|
||||
|
||||
// return to client
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1186,7 +1170,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
|
|||
pRequest->code = terrno;
|
||||
}
|
||||
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
}
|
||||
|
||||
// todo not to be released here
|
||||
|
@ -1199,7 +1183,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
|
|||
int32_t code = 0;
|
||||
|
||||
if (pRequest->parseOnly) {
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
|
||||
doRequestCallback(pRequest, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1233,11 +1217,11 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
|
|||
}
|
||||
case QUERY_EXEC_MODE_EMPTY_RESULT:
|
||||
pRequest->type = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
|
||||
doRequestCallback(pRequest, 0);
|
||||
break;
|
||||
default:
|
||||
tscError("0x%" PRIx64 " invalid execMode %d", pRequest->self, pQuery->execMode);
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, -1);
|
||||
doRequestCallback(pRequest, -1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1703,8 +1687,8 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4)
|
|||
}
|
||||
|
||||
static void syncFetchFn(void* param, TAOS_RES* res, int32_t numOfRows) {
|
||||
SSyncQueryParam* pParam = param;
|
||||
tsem_post(&pParam->sem);
|
||||
tsem_t* sem = param;
|
||||
tsem_post(sem);
|
||||
}
|
||||
|
||||
void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) {
|
||||
|
@ -1722,10 +1706,11 @@ void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertU
|
|||
|
||||
// convert ucs4 to native multi-bytes string
|
||||
pResultInfo->convertUcs4 = convertUcs4;
|
||||
|
||||
SSyncQueryParam* pParam = pRequest->body.param;
|
||||
taos_fetch_rows_a(pRequest, syncFetchFn, pParam);
|
||||
tsem_wait(&pParam->sem);
|
||||
tsem_t sem;
|
||||
tsem_init(&sem, 0, 0);
|
||||
taos_fetch_rows_a(pRequest, syncFetchFn, &sem);
|
||||
tsem_wait(&sem);
|
||||
tsem_destroy(&sem);
|
||||
}
|
||||
|
||||
if (pResultInfo->numOfRows == 0 || pRequest->code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -2492,6 +2477,10 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) {
|
|||
tscDebug("taos_query start with sql:%s", sql);
|
||||
|
||||
SSyncQueryParam* param = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
|
||||
if (NULL == param) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
tsem_init(¶m->sem, 0, 0);
|
||||
|
||||
taosAsyncQueryImpl(*(int64_t*)taos, sql, syncQueryFn, param, validateOnly);
|
||||
|
@ -2501,9 +2490,8 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) {
|
|||
if (param->pRequest != NULL) {
|
||||
param->pRequest->syncQuery = true;
|
||||
pRequest = param->pRequest;
|
||||
} else {
|
||||
taosMemoryFree(param);
|
||||
}
|
||||
taosMemoryFree(param);
|
||||
|
||||
tscDebug("taos_query end with sql:%s", sql);
|
||||
|
||||
|
@ -2517,19 +2505,20 @@ TAOS_RES* taosQueryImplWithReqid(TAOS* taos, const char* sql, bool validateOnly,
|
|||
}
|
||||
|
||||
SSyncQueryParam* param = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
|
||||
if (param == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
tsem_init(¶m->sem, 0, 0);
|
||||
|
||||
taosAsyncQueryImplWithReqid(*(int64_t*)taos, sql, syncQueryFn, param, validateOnly, reqid);
|
||||
tsem_wait(¶m->sem);
|
||||
|
||||
SRequestObj* pRequest = NULL;
|
||||
if (param->pRequest != NULL) {
|
||||
param->pRequest->syncQuery = true;
|
||||
pRequest = param->pRequest;
|
||||
} else {
|
||||
taosMemoryFree(param);
|
||||
}
|
||||
|
||||
taosMemoryFree(param);
|
||||
return pRequest;
|
||||
}
|
||||
|
||||
|
@ -2547,13 +2536,13 @@ static void fetchCallback(void* pResult, void* param, int32_t code) {
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pRequest->code = code;
|
||||
taosMemoryFreeClear(pResultInfo->pData);
|
||||
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
|
||||
pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pRequest->code != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFreeClear(pResultInfo->pData);
|
||||
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
|
||||
pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2574,20 +2563,12 @@ static void fetchCallback(void* pResult, void* param, int32_t code) {
|
|||
atomic_add_fetch_64((int64_t*)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
|
||||
}
|
||||
|
||||
pRequest->body.fetchFp(pRequest->body.param, pRequest, pResultInfo->numOfRows);
|
||||
pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, pResultInfo->numOfRows);
|
||||
}
|
||||
|
||||
void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param) {
|
||||
if (pRequest->syncQuery && pRequest->body.param != param) {
|
||||
if (pRequest->body.param) {
|
||||
tsem_destroy(&((SSyncQueryParam *)pRequest->body.param)->sem);
|
||||
}
|
||||
taosMemoryFree(pRequest->body.param);
|
||||
pRequest->syncQuery = false;
|
||||
}
|
||||
|
||||
pRequest->body.fetchFp = fp;
|
||||
pRequest->body.param = param;
|
||||
((SSyncQueryParam *)pRequest->body.interParam)->userParam = param;
|
||||
|
||||
SReqResultInfo* pResultInfo = &pRequest->body.resInfo;
|
||||
|
||||
|
@ -2625,6 +2606,10 @@ void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param
|
|||
schedulerFetchRows(pRequest->body.queryJob, &req);
|
||||
}
|
||||
|
||||
void doRequestCallback(SRequestObj* pRequest, int32_t code) {
|
||||
pRequest->body.queryFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, code);
|
||||
}
|
||||
|
||||
int32_t clientParseSql(void* param, const char* dbName, const char* sql, bool parseOnly, const char* effectiveUser, SParseSqlRes* pRes) {
|
||||
#ifndef TD_ENTERPRISE
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -2633,4 +2618,3 @@ int32_t clientParseSql(void* param, const char* dbName, const char* sql, bool pa
|
|||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1095,7 +1095,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c
|
|||
pRequest->pWrapper = NULL;
|
||||
terrno = code;
|
||||
pRequest->code = code;
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1112,7 +1112,7 @@ void continueInsertFromCsv(SSqlCallbackWrapper *pWrapper, SRequestObj *pRequest)
|
|||
pRequest->pWrapper = NULL;
|
||||
terrno = code;
|
||||
pRequest->code = code;
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1210,7 +1210,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
|||
terrno = code;
|
||||
pRequest->code = code;
|
||||
tscDebug("call sync query cb with code: %s", tstrerror(code));
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1242,7 +1242,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
|||
|
||||
terrno = code;
|
||||
pRequest->code = code;
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1545,12 +1545,12 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
|||
|
||||
conn.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
|
||||
|
||||
code = catalogAsyncGetAllMeta(pCtg, &conn, &catalogReq, syncCatalogFn, pRequest->body.param, NULL);
|
||||
code = catalogAsyncGetAllMeta(pCtg, &conn, &catalogReq, syncCatalogFn, pRequest->body.interParam, NULL);
|
||||
if (code) {
|
||||
goto _return;
|
||||
}
|
||||
|
||||
SSyncQueryParam *pParam = pRequest->body.param;
|
||||
SSyncQueryParam *pParam = pRequest->body.interParam;
|
||||
tsem_wait(&pParam->sem);
|
||||
|
||||
_return:
|
||||
|
|
|
@ -42,7 +42,7 @@ int32_t genericRspCallback(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
taosMemoryFree(pMsg->pEpSet);
|
||||
taosMemoryFree(pMsg->pData);
|
||||
if (pRequest->body.queryFp != NULL) {
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
} else {
|
||||
tsem_post(&pRequest->body.rspSem);
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ int32_t processCreateDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
}
|
||||
|
||||
if (pRequest->body.queryFp) {
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
} else {
|
||||
tsem_post(&pRequest->body.rspSem);
|
||||
}
|
||||
|
@ -236,7 +236,8 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
setErrno(pRequest, code);
|
||||
|
||||
if (pRequest->body.queryFp != NULL) {
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, pRequest->code);
|
||||
doRequestCallback(pRequest, pRequest->code);
|
||||
|
||||
} else {
|
||||
tsem_post(&pRequest->body.rspSem);
|
||||
}
|
||||
|
@ -300,7 +301,7 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
taosMemoryFree(pMsg->pEpSet);
|
||||
|
||||
if (pRequest->body.queryFp != NULL) {
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, pRequest->code);
|
||||
doRequestCallback(pRequest, pRequest->code);
|
||||
} else {
|
||||
tsem_post(&pRequest->body.rspSem);
|
||||
}
|
||||
|
@ -344,7 +345,7 @@ int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
}
|
||||
}
|
||||
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
} else {
|
||||
tsem_post(&pRequest->body.rspSem);
|
||||
}
|
||||
|
@ -381,7 +382,7 @@ int32_t processDropDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
taosMemoryFree(pMsg->pEpSet);
|
||||
|
||||
if (pRequest->body.queryFp != NULL) {
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
} else {
|
||||
tsem_post(&pRequest->body.rspSem);
|
||||
}
|
||||
|
@ -421,7 +422,7 @@ int32_t processAlterStbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
}
|
||||
}
|
||||
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
} else {
|
||||
tsem_post(&pRequest->body.rspSem);
|
||||
}
|
||||
|
@ -535,7 +536,7 @@ int32_t processShowVariablesRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
taosMemoryFree(pMsg->pEpSet);
|
||||
|
||||
if (pRequest->body.queryFp != NULL) {
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
doRequestCallback(pRequest, code);
|
||||
} else {
|
||||
tsem_post(&pRequest->body.rspSem);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#define ALLOW_FORBID_FUNC
|
||||
|
||||
#include "cos.h"
|
||||
#include "cos_cp.h"
|
||||
#include "tdef.h"
|
||||
|
||||
extern char tsS3Endpoint[];
|
||||
extern char tsS3AccessKeyId[];
|
||||
|
@ -86,7 +88,7 @@ typedef struct {
|
|||
char err_msg[128];
|
||||
S3Status status;
|
||||
uint64_t content_length;
|
||||
char * buf;
|
||||
char *buf;
|
||||
int64_t buf_pos;
|
||||
} TS3SizeCBD;
|
||||
|
||||
|
@ -270,7 +272,7 @@ typedef struct list_parts_callback_data {
|
|||
typedef struct MultipartPartData {
|
||||
put_object_callback_data put_object_data;
|
||||
int seq;
|
||||
UploadManager * manager;
|
||||
UploadManager *manager;
|
||||
} MultipartPartData;
|
||||
|
||||
static int putObjectDataCallback(int bufferSize, char *buffer, void *callbackData) {
|
||||
|
@ -317,12 +319,23 @@ S3Status MultipartResponseProperiesCallback(const S3ResponseProperties *properti
|
|||
|
||||
MultipartPartData *data = (MultipartPartData *)callbackData;
|
||||
int seq = data->seq;
|
||||
const char * etag = properties->eTag;
|
||||
const char *etag = properties->eTag;
|
||||
data->manager->etags[seq - 1] = strdup(etag);
|
||||
data->manager->next_etags_pos = seq;
|
||||
return S3StatusOK;
|
||||
}
|
||||
|
||||
S3Status MultipartResponseProperiesCallbackWithCp(const S3ResponseProperties *properties, void *callbackData) {
|
||||
responsePropertiesCallbackNull(properties, callbackData);
|
||||
|
||||
MultipartPartData *data = (MultipartPartData *)callbackData;
|
||||
int seq = data->seq;
|
||||
const char *etag = properties->eTag;
|
||||
data->manager->etags[seq - 1] = strdup(etag);
|
||||
// data->manager->next_etags_pos = seq;
|
||||
return S3StatusOK;
|
||||
}
|
||||
|
||||
static int multipartPutXmlCallback(int bufferSize, char *buffer, void *callbackData) {
|
||||
UploadManager *manager = (UploadManager *)callbackData;
|
||||
int ret = 0;
|
||||
|
@ -446,37 +459,343 @@ static int try_get_parts_info(const char *bucketName, const char *key, UploadMan
|
|||
return 0;
|
||||
}
|
||||
*/
|
||||
int32_t s3PutObjectFromFile2(const char *file, const char *object) {
|
||||
int32_t code = 0;
|
||||
const char *key = object;
|
||||
// const char *uploadId = 0;
|
||||
const char * filename = 0;
|
||||
|
||||
static int32_t s3PutObjectFromFileSimple(S3BucketContext *bucket_context, char const *object_name, int64_t size,
|
||||
S3PutProperties *put_prop, put_object_callback_data *data) {
|
||||
int32_t code = 0;
|
||||
S3PutObjectHandler putObjectHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
|
||||
&putObjectDataCallback};
|
||||
|
||||
do {
|
||||
S3_put_object(bucket_context, object_name, size, put_prop, 0, 0, &putObjectHandler, data);
|
||||
} while (S3_status_is_retryable(data->status) && should_retry());
|
||||
|
||||
if (data->status != S3StatusOK) {
|
||||
s3PrintError(__FILE__, __LINE__, __func__, data->status, data->err_msg);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
} else if (data->contentLength) {
|
||||
uError("%s Failed to read remaining %llu bytes from input", __func__, (unsigned long long)data->contentLength);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t s3PutObjectFromFileWithoutCp(S3BucketContext *bucket_context, char const *object_name,
|
||||
int64_t contentLength, S3PutProperties *put_prop,
|
||||
put_object_callback_data *data) {
|
||||
int32_t code = 0;
|
||||
uint64_t totalContentLength = contentLength;
|
||||
uint64_t todoContentLength = contentLength;
|
||||
UploadManager manager = {0};
|
||||
|
||||
uint64_t chunk_size = MULTIPART_CHUNK_SIZE >> 3;
|
||||
int totalSeq = (contentLength + chunk_size - 1) / chunk_size;
|
||||
const int max_part_num = 10000;
|
||||
if (totalSeq > max_part_num) {
|
||||
chunk_size = (contentLength + max_part_num - contentLength % max_part_num) / max_part_num;
|
||||
totalSeq = (contentLength + chunk_size - 1) / chunk_size;
|
||||
}
|
||||
|
||||
MultipartPartData partData;
|
||||
memset(&partData, 0, sizeof(MultipartPartData));
|
||||
int partContentLength = 0;
|
||||
|
||||
S3MultipartInitialHandler handler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
|
||||
&initial_multipart_callback};
|
||||
|
||||
S3PutObjectHandler putObjectHandler = {{&MultipartResponseProperiesCallback, &responseCompleteCallback},
|
||||
&putObjectDataCallback};
|
||||
|
||||
S3MultipartCommitHandler commit_handler = {
|
||||
{&responsePropertiesCallbackNull, &responseCompleteCallback}, &multipartPutXmlCallback, 0};
|
||||
|
||||
manager.etags = (char **)taosMemoryCalloc(totalSeq, sizeof(char *));
|
||||
manager.next_etags_pos = 0;
|
||||
do {
|
||||
S3_initiate_multipart(bucket_context, object_name, 0, &handler, 0, timeoutMsG, &manager);
|
||||
} while (S3_status_is_retryable(manager.status) && should_retry());
|
||||
|
||||
if (manager.upload_id == 0 || manager.status != S3StatusOK) {
|
||||
s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
goto clean;
|
||||
}
|
||||
|
||||
upload:
|
||||
todoContentLength -= chunk_size * manager.next_etags_pos;
|
||||
for (int seq = manager.next_etags_pos + 1; seq <= totalSeq; seq++) {
|
||||
partData.manager = &manager;
|
||||
partData.seq = seq;
|
||||
if (partData.put_object_data.gb == NULL) {
|
||||
partData.put_object_data = *data;
|
||||
}
|
||||
partContentLength = ((contentLength > chunk_size) ? chunk_size : contentLength);
|
||||
// printf("%s Part Seq %d, length=%d\n", srcSize ? "Copying" : "Sending", seq, partContentLength);
|
||||
partData.put_object_data.contentLength = partContentLength;
|
||||
partData.put_object_data.originalContentLength = partContentLength;
|
||||
partData.put_object_data.totalContentLength = todoContentLength;
|
||||
partData.put_object_data.totalOriginalContentLength = totalContentLength;
|
||||
put_prop->md5 = 0;
|
||||
do {
|
||||
S3_upload_part(bucket_context, object_name, put_prop, &putObjectHandler, seq, manager.upload_id,
|
||||
partContentLength, 0, timeoutMsG, &partData);
|
||||
} while (S3_status_is_retryable(partData.put_object_data.status) && should_retry());
|
||||
if (partData.put_object_data.status != S3StatusOK) {
|
||||
s3PrintError(__FILE__, __LINE__, __func__, partData.put_object_data.status, partData.put_object_data.err_msg);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
goto clean;
|
||||
}
|
||||
contentLength -= chunk_size;
|
||||
todoContentLength -= chunk_size;
|
||||
}
|
||||
|
||||
int i;
|
||||
int size = 0;
|
||||
size += growbuffer_append(&(manager.gb), "<CompleteMultipartUpload>", strlen("<CompleteMultipartUpload>"));
|
||||
char buf[256];
|
||||
int n;
|
||||
for (i = 0; i < totalSeq; i++) {
|
||||
if (!manager.etags[i]) {
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
goto clean;
|
||||
}
|
||||
n = snprintf(buf, sizeof(buf),
|
||||
"<Part><PartNumber>%d</PartNumber>"
|
||||
"<ETag>%s</ETag></Part>",
|
||||
i + 1, manager.etags[i]);
|
||||
size += growbuffer_append(&(manager.gb), buf, n);
|
||||
}
|
||||
size += growbuffer_append(&(manager.gb), "</CompleteMultipartUpload>", strlen("</CompleteMultipartUpload>"));
|
||||
manager.remaining = size;
|
||||
|
||||
do {
|
||||
S3_complete_multipart_upload(bucket_context, object_name, &commit_handler, manager.upload_id, manager.remaining, 0,
|
||||
timeoutMsG, &manager);
|
||||
} while (S3_status_is_retryable(manager.status) && should_retry());
|
||||
if (manager.status != S3StatusOK) {
|
||||
s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
goto clean;
|
||||
}
|
||||
|
||||
clean:
|
||||
if (manager.upload_id) {
|
||||
taosMemoryFree(manager.upload_id);
|
||||
}
|
||||
for (i = 0; i < manager.next_etags_pos; i++) {
|
||||
taosMemoryFree(manager.etags[i]);
|
||||
}
|
||||
growbuffer_destroy(manager.gb);
|
||||
taosMemoryFree(manager.etags);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t s3PutObjectFromFileWithCp(S3BucketContext *bucket_context, const char *file, int32_t lmtime,
|
||||
char const *object_name, int64_t contentLength, S3PutProperties *put_prop,
|
||||
put_object_callback_data *data) {
|
||||
int32_t code = 0;
|
||||
|
||||
uint64_t totalContentLength = contentLength;
|
||||
// uint64_t todoContentLength = contentLength;
|
||||
UploadManager manager = {0};
|
||||
|
||||
uint64_t chunk_size = MULTIPART_CHUNK_SIZE >> 3;
|
||||
int totalSeq = (contentLength + chunk_size - 1) / chunk_size;
|
||||
const int max_part_num = 10000;
|
||||
if (totalSeq > max_part_num) {
|
||||
chunk_size = (contentLength + max_part_num - contentLength % max_part_num) / max_part_num;
|
||||
totalSeq = (contentLength + chunk_size - 1) / chunk_size;
|
||||
}
|
||||
|
||||
bool need_init_upload = true;
|
||||
char file_cp_path[TSDB_FILENAME_LEN];
|
||||
snprintf(file_cp_path, TSDB_FILENAME_LEN, "%s.cp", file);
|
||||
|
||||
SCheckpoint cp = {0};
|
||||
cp.parts = taosMemoryCalloc(max_part_num, sizeof(SCheckpointPart));
|
||||
|
||||
if (taosCheckExistFile(file_cp_path)) {
|
||||
if (!cos_cp_load(file_cp_path, &cp) && cos_cp_is_valid_upload(&cp, contentLength, lmtime)) {
|
||||
manager.upload_id = strdup(cp.upload_id);
|
||||
need_init_upload = false;
|
||||
} else {
|
||||
cos_cp_remove(file_cp_path);
|
||||
}
|
||||
}
|
||||
|
||||
if (need_init_upload) {
|
||||
S3MultipartInitialHandler handler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
|
||||
&initial_multipart_callback};
|
||||
do {
|
||||
S3_initiate_multipart(bucket_context, object_name, 0, &handler, 0, timeoutMsG, &manager);
|
||||
} while (S3_status_is_retryable(manager.status) && should_retry());
|
||||
|
||||
if (manager.upload_id == 0 || manager.status != S3StatusOK) {
|
||||
s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
goto clean;
|
||||
}
|
||||
|
||||
cos_cp_build_upload(&cp, file, contentLength, lmtime, manager.upload_id, chunk_size);
|
||||
}
|
||||
|
||||
if (cos_cp_open(file_cp_path, &cp)) {
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
goto clean;
|
||||
}
|
||||
|
||||
int part_num = 0;
|
||||
int64_t consume_bytes = 0;
|
||||
// SCheckpointPart *parts = taosMemoryCalloc(cp.part_num, sizeof(SCheckpointPart));
|
||||
// cos_cp_get_undo_parts(&cp, &part_num, parts, &consume_bytes);
|
||||
|
||||
MultipartPartData partData;
|
||||
memset(&partData, 0, sizeof(MultipartPartData));
|
||||
int partContentLength = 0;
|
||||
|
||||
S3PutObjectHandler putObjectHandler = {{&MultipartResponseProperiesCallbackWithCp, &responseCompleteCallback},
|
||||
&putObjectDataCallback};
|
||||
|
||||
S3MultipartCommitHandler commit_handler = {
|
||||
{&responsePropertiesCallbackNull, &responseCompleteCallback}, &multipartPutXmlCallback, 0};
|
||||
|
||||
manager.etags = (char **)taosMemoryCalloc(totalSeq, sizeof(char *));
|
||||
manager.next_etags_pos = 0;
|
||||
|
||||
upload:
|
||||
// todoContentLength -= chunk_size * manager.next_etags_pos;
|
||||
for (int i = 0; i < cp.part_num; ++i) {
|
||||
if (cp.parts[i].completed) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (i > 0 && cp.parts[i - 1].completed) {
|
||||
if (taosLSeekFile(data->infileFD, cp.parts[i].offset, SEEK_SET) < 0) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
goto clean;
|
||||
}
|
||||
}
|
||||
|
||||
int seq = cp.parts[i].index + 1;
|
||||
|
||||
partData.manager = &manager;
|
||||
partData.seq = seq;
|
||||
if (partData.put_object_data.gb == NULL) {
|
||||
partData.put_object_data = *data;
|
||||
}
|
||||
|
||||
partContentLength = cp.parts[i].size;
|
||||
partData.put_object_data.contentLength = partContentLength;
|
||||
partData.put_object_data.originalContentLength = partContentLength;
|
||||
// partData.put_object_data.totalContentLength = todoContentLength;
|
||||
partData.put_object_data.totalOriginalContentLength = totalContentLength;
|
||||
put_prop->md5 = 0;
|
||||
do {
|
||||
S3_upload_part(bucket_context, object_name, put_prop, &putObjectHandler, seq, manager.upload_id,
|
||||
partContentLength, 0, timeoutMsG, &partData);
|
||||
} while (S3_status_is_retryable(partData.put_object_data.status) && should_retry());
|
||||
if (partData.put_object_data.status != S3StatusOK) {
|
||||
s3PrintError(__FILE__, __LINE__, __func__, partData.put_object_data.status, partData.put_object_data.err_msg);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
|
||||
//(void)cos_cp_dump(&cp);
|
||||
goto clean;
|
||||
}
|
||||
|
||||
if (!manager.etags[seq - 1]) {
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
goto clean;
|
||||
}
|
||||
|
||||
cos_cp_update(&cp, cp.parts[seq - 1].index, manager.etags[seq - 1], 0);
|
||||
(void)cos_cp_dump(&cp);
|
||||
|
||||
contentLength -= chunk_size;
|
||||
// todoContentLength -= chunk_size;
|
||||
}
|
||||
|
||||
cos_cp_close(cp.thefile);
|
||||
cp.thefile = 0;
|
||||
|
||||
int size = 0;
|
||||
size += growbuffer_append(&(manager.gb), "<CompleteMultipartUpload>", strlen("<CompleteMultipartUpload>"));
|
||||
char buf[256];
|
||||
int n;
|
||||
for (int i = 0; i < cp.part_num; ++i) {
|
||||
n = snprintf(buf, sizeof(buf),
|
||||
"<Part><PartNumber>%d</PartNumber>"
|
||||
"<ETag>%s</ETag></Part>",
|
||||
// i + 1, manager.etags[i]);
|
||||
cp.parts[i].index + 1, cp.parts[i].etag);
|
||||
size += growbuffer_append(&(manager.gb), buf, n);
|
||||
}
|
||||
size += growbuffer_append(&(manager.gb), "</CompleteMultipartUpload>", strlen("</CompleteMultipartUpload>"));
|
||||
manager.remaining = size;
|
||||
|
||||
do {
|
||||
S3_complete_multipart_upload(bucket_context, object_name, &commit_handler, manager.upload_id, manager.remaining, 0,
|
||||
timeoutMsG, &manager);
|
||||
} while (S3_status_is_retryable(manager.status) && should_retry());
|
||||
if (manager.status != S3StatusOK) {
|
||||
s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
goto clean;
|
||||
}
|
||||
|
||||
cos_cp_remove(file_cp_path);
|
||||
|
||||
clean:
|
||||
/*
|
||||
if (parts) {
|
||||
taosMemoryFree(parts);
|
||||
}
|
||||
*/
|
||||
if (cp.thefile) {
|
||||
cos_cp_close(cp.thefile);
|
||||
}
|
||||
if (cp.parts) {
|
||||
taosMemoryFree(cp.parts);
|
||||
}
|
||||
|
||||
if (manager.upload_id) {
|
||||
taosMemoryFree(manager.upload_id);
|
||||
}
|
||||
for (int i = 0; i < cp.part_num; ++i) {
|
||||
if (manager.etags[i]) {
|
||||
taosMemoryFree(manager.etags[i]);
|
||||
}
|
||||
}
|
||||
taosMemoryFree(manager.etags);
|
||||
growbuffer_destroy(manager.gb);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t s3PutObjectFromFile2(const char *file, const char *object_name, int8_t withcp) {
|
||||
int32_t code = 0;
|
||||
int32_t lmtime = 0;
|
||||
const char *filename = 0;
|
||||
uint64_t contentLength = 0;
|
||||
const char * cacheControl = 0, *contentType = 0, *md5 = 0;
|
||||
const char * contentDispositionFilename = 0, *contentEncoding = 0;
|
||||
const char *cacheControl = 0, *contentType = 0, *md5 = 0;
|
||||
const char *contentDispositionFilename = 0, *contentEncoding = 0;
|
||||
int64_t expires = -1;
|
||||
S3CannedAcl cannedAcl = S3CannedAclPrivate;
|
||||
int metaPropertiesCount = 0;
|
||||
S3NameValue metaProperties[S3_MAX_METADATA_COUNT];
|
||||
char useServerSideEncryption = 0;
|
||||
put_object_callback_data data = {0};
|
||||
// int noStatus = 0;
|
||||
|
||||
// data.infile = 0;
|
||||
// data.gb = 0;
|
||||
// data.infileFD = NULL;
|
||||
// data.noStatus = noStatus;
|
||||
|
||||
// uError("ERROR: %s stat file %s: ", __func__, file);
|
||||
if (taosStatFile(file, &contentLength, NULL, NULL) < 0) {
|
||||
uError("ERROR: %s Failed to stat file %s: ", __func__, file);
|
||||
if (taosStatFile(file, &contentLength, &lmtime, NULL) < 0) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("ERROR: %s Failed to stat file %s: ", __func__, file);
|
||||
return code;
|
||||
}
|
||||
|
||||
if (!(data.infileFD = taosOpenFile(file, TD_FILE_READ))) {
|
||||
uError("ERROR: %s Failed to open file %s: ", __func__, file);
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("ERROR: %s Failed to open file %s: ", __func__, file);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -493,143 +812,13 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
|
|||
metaProperties, useServerSideEncryption};
|
||||
|
||||
if (contentLength <= MULTIPART_CHUNK_SIZE) {
|
||||
S3PutObjectHandler putObjectHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
|
||||
&putObjectDataCallback};
|
||||
|
||||
do {
|
||||
S3_put_object(&bucketContext, key, contentLength, &putProperties, 0, 0, &putObjectHandler, &data);
|
||||
} while (S3_status_is_retryable(data.status) && should_retry());
|
||||
|
||||
if (data.status != S3StatusOK) {
|
||||
s3PrintError(__FILE__, __LINE__, __func__, data.status, data.err_msg);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
} else if (data.contentLength) {
|
||||
uError("ERROR: %s Failed to read remaining %llu bytes from input", __func__,
|
||||
(unsigned long long)data.contentLength);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
}
|
||||
code = s3PutObjectFromFileSimple(&bucketContext, object_name, contentLength, &putProperties, &data);
|
||||
} else {
|
||||
uint64_t totalContentLength = contentLength;
|
||||
uint64_t todoContentLength = contentLength;
|
||||
UploadManager manager = {0};
|
||||
// manager.upload_id = 0;
|
||||
// manager.gb = 0;
|
||||
|
||||
// div round up
|
||||
int seq;
|
||||
uint64_t chunk_size = MULTIPART_CHUNK_SIZE >> 3;
|
||||
int totalSeq = (contentLength + chunk_size - 1) / chunk_size;
|
||||
const int max_part_num = 10000;
|
||||
if (totalSeq > max_part_num) {
|
||||
chunk_size = (contentLength + max_part_num - contentLength % max_part_num) / max_part_num;
|
||||
totalSeq = (contentLength + chunk_size - 1) / chunk_size;
|
||||
if (withcp) {
|
||||
code = s3PutObjectFromFileWithCp(&bucketContext, file, lmtime, object_name, contentLength, &putProperties, &data);
|
||||
} else {
|
||||
code = s3PutObjectFromFileWithoutCp(&bucketContext, object_name, contentLength, &putProperties, &data);
|
||||
}
|
||||
|
||||
MultipartPartData partData;
|
||||
memset(&partData, 0, sizeof(MultipartPartData));
|
||||
int partContentLength = 0;
|
||||
|
||||
S3MultipartInitialHandler handler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
|
||||
&initial_multipart_callback};
|
||||
|
||||
S3PutObjectHandler putObjectHandler = {{&MultipartResponseProperiesCallback, &responseCompleteCallback},
|
||||
&putObjectDataCallback};
|
||||
|
||||
S3MultipartCommitHandler commit_handler = {
|
||||
{&responsePropertiesCallbackNull, &responseCompleteCallback}, &multipartPutXmlCallback, 0};
|
||||
|
||||
manager.etags = (char **)taosMemoryCalloc(totalSeq, sizeof(char *));
|
||||
manager.next_etags_pos = 0;
|
||||
/*
|
||||
if (uploadId) {
|
||||
manager.upload_id = strdup(uploadId);
|
||||
manager.remaining = contentLength;
|
||||
if (!try_get_parts_info(tsS3BucketName, key, &manager)) {
|
||||
fseek(data.infile, -(manager.remaining), 2);
|
||||
taosLSeekFile(data.infileFD, -(manager.remaining), SEEK_END);
|
||||
contentLength = manager.remaining;
|
||||
goto upload;
|
||||
} else {
|
||||
goto clean;
|
||||
}
|
||||
}
|
||||
*/
|
||||
do {
|
||||
S3_initiate_multipart(&bucketContext, key, 0, &handler, 0, timeoutMsG, &manager);
|
||||
} while (S3_status_is_retryable(manager.status) && should_retry());
|
||||
|
||||
if (manager.upload_id == 0 || manager.status != S3StatusOK) {
|
||||
s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
goto clean;
|
||||
}
|
||||
|
||||
upload:
|
||||
todoContentLength -= chunk_size * manager.next_etags_pos;
|
||||
for (seq = manager.next_etags_pos + 1; seq <= totalSeq; seq++) {
|
||||
partData.manager = &manager;
|
||||
partData.seq = seq;
|
||||
if (partData.put_object_data.gb == NULL) {
|
||||
partData.put_object_data = data;
|
||||
}
|
||||
partContentLength = ((contentLength > chunk_size) ? chunk_size : contentLength);
|
||||
// printf("%s Part Seq %d, length=%d\n", srcSize ? "Copying" : "Sending", seq, partContentLength);
|
||||
partData.put_object_data.contentLength = partContentLength;
|
||||
partData.put_object_data.originalContentLength = partContentLength;
|
||||
partData.put_object_data.totalContentLength = todoContentLength;
|
||||
partData.put_object_data.totalOriginalContentLength = totalContentLength;
|
||||
putProperties.md5 = 0;
|
||||
do {
|
||||
S3_upload_part(&bucketContext, key, &putProperties, &putObjectHandler, seq, manager.upload_id,
|
||||
partContentLength, 0, timeoutMsG, &partData);
|
||||
} while (S3_status_is_retryable(partData.put_object_data.status) && should_retry());
|
||||
if (partData.put_object_data.status != S3StatusOK) {
|
||||
s3PrintError(__FILE__, __LINE__, __func__, partData.put_object_data.status, partData.put_object_data.err_msg);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
goto clean;
|
||||
}
|
||||
contentLength -= chunk_size;
|
||||
todoContentLength -= chunk_size;
|
||||
}
|
||||
|
||||
int i;
|
||||
int size = 0;
|
||||
size += growbuffer_append(&(manager.gb), "<CompleteMultipartUpload>", strlen("<CompleteMultipartUpload>"));
|
||||
char buf[256];
|
||||
int n;
|
||||
for (i = 0; i < totalSeq; i++) {
|
||||
if (!manager.etags[i]) {
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
goto clean;
|
||||
}
|
||||
n = snprintf(buf, sizeof(buf),
|
||||
"<Part><PartNumber>%d</PartNumber>"
|
||||
"<ETag>%s</ETag></Part>",
|
||||
i + 1, manager.etags[i]);
|
||||
size += growbuffer_append(&(manager.gb), buf, n);
|
||||
}
|
||||
size += growbuffer_append(&(manager.gb), "</CompleteMultipartUpload>", strlen("</CompleteMultipartUpload>"));
|
||||
manager.remaining = size;
|
||||
|
||||
do {
|
||||
S3_complete_multipart_upload(&bucketContext, key, &commit_handler, manager.upload_id, manager.remaining, 0,
|
||||
timeoutMsG, &manager);
|
||||
} while (S3_status_is_retryable(manager.status) && should_retry());
|
||||
if (manager.status != S3StatusOK) {
|
||||
s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg);
|
||||
code = TAOS_SYSTEM_ERROR(EIO);
|
||||
goto clean;
|
||||
}
|
||||
|
||||
clean:
|
||||
if (manager.upload_id) {
|
||||
taosMemoryFree(manager.upload_id);
|
||||
}
|
||||
for (i = 0; i < manager.next_etags_pos; i++) {
|
||||
taosMemoryFree(manager.etags[i]);
|
||||
}
|
||||
growbuffer_destroy(manager.gb);
|
||||
taosMemoryFree(manager.etags);
|
||||
}
|
||||
|
||||
if (data.infileFD) {
|
||||
|
@ -648,7 +837,7 @@ typedef struct list_bucket_callback_data {
|
|||
char nextMarker[1024];
|
||||
int keyCount;
|
||||
int allDetails;
|
||||
SArray * objectArray;
|
||||
SArray *objectArray;
|
||||
} list_bucket_callback_data;
|
||||
|
||||
static S3Status listBucketCallback(int isTruncated, const char *nextMarker, int contentsCount,
|
||||
|
@ -693,11 +882,11 @@ static void s3FreeObjectKey(void *pItem) {
|
|||
|
||||
static SArray *getListByPrefix(const char *prefix) {
|
||||
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
|
||||
0, awsRegionG};
|
||||
0, awsRegionG};
|
||||
S3ListBucketHandler listBucketHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
|
||||
&listBucketCallback};
|
||||
|
||||
const char * marker = 0, *delimiter = 0;
|
||||
const char *marker = 0, *delimiter = 0;
|
||||
int maxkeys = 0, allDetails = 0;
|
||||
list_bucket_callback_data data;
|
||||
data.objectArray = taosArrayInit(32, sizeof(void *));
|
||||
|
@ -738,7 +927,7 @@ static SArray *getListByPrefix(const char *prefix) {
|
|||
|
||||
void s3DeleteObjects(const char *object_name[], int nobject) {
|
||||
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
|
||||
0, awsRegionG};
|
||||
0, awsRegionG};
|
||||
S3ResponseHandler responseHandler = {0, &responseCompleteCallback};
|
||||
|
||||
for (int i = 0; i < nobject; ++i) {
|
||||
|
@ -789,7 +978,7 @@ int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size,
|
|||
const char *ifMatch = 0, *ifNotMatch = 0;
|
||||
|
||||
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
|
||||
0, awsRegionG};
|
||||
0, awsRegionG};
|
||||
S3GetConditions getConditions = {ifModifiedSince, ifNotModifiedSince, ifMatch, ifNotMatch};
|
||||
S3GetObjectHandler getObjectHandler = {{&responsePropertiesCallback, &responseCompleteCallback},
|
||||
&getObjectDataCallback};
|
||||
|
@ -827,7 +1016,7 @@ int32_t s3GetObjectToFile(const char *object_name, char *fileName) {
|
|||
const char *ifMatch = 0, *ifNotMatch = 0;
|
||||
|
||||
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
|
||||
0, awsRegionG};
|
||||
0, awsRegionG};
|
||||
S3GetConditions getConditions = {ifModifiedSince, ifNotModifiedSince, ifMatch, ifNotMatch};
|
||||
S3GetObjectHandler getObjectHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
|
||||
&getObjectCallback};
|
||||
|
@ -858,7 +1047,7 @@ int32_t s3GetObjectsByPrefix(const char *prefix, const char *path) {
|
|||
if (objectArray == NULL) return -1;
|
||||
|
||||
for (size_t i = 0; i < taosArrayGetSize(objectArray); i++) {
|
||||
char * object = taosArrayGetP(objectArray, i);
|
||||
char *object = taosArrayGetP(objectArray, i);
|
||||
const char *tmp = strchr(object, '/');
|
||||
tmp = (tmp == NULL) ? object : tmp + 1;
|
||||
char fileName[PATH_MAX] = {0};
|
||||
|
@ -949,12 +1138,12 @@ static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) {
|
|||
|
||||
int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) {
|
||||
int32_t code = 0;
|
||||
cos_pool_t * p = NULL;
|
||||
cos_pool_t *p = NULL;
|
||||
int is_cname = 0;
|
||||
cos_status_t * s = NULL;
|
||||
cos_status_t *s = NULL;
|
||||
cos_request_options_t *options = NULL;
|
||||
cos_string_t bucket, object, file;
|
||||
cos_table_t * resp_headers;
|
||||
cos_table_t *resp_headers;
|
||||
// int traffic_limit = 0;
|
||||
|
||||
cos_pool_create(&p, NULL);
|
||||
|
@ -983,18 +1172,19 @@ int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) {
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t s3PutObjectFromFile2(const char *file_str, const char *object_str) {
|
||||
int32_t s3PutObjectFromFile2(const char *file_str, const char *object_str, int8_t withcp) {
|
||||
int32_t code = 0;
|
||||
cos_pool_t * p = NULL;
|
||||
cos_pool_t *p = NULL;
|
||||
int is_cname = 0;
|
||||
cos_status_t * s = NULL;
|
||||
cos_request_options_t * options = NULL;
|
||||
cos_status_t *s = NULL;
|
||||
cos_request_options_t *options = NULL;
|
||||
cos_string_t bucket, object, file;
|
||||
cos_table_t * resp_headers;
|
||||
cos_table_t *resp_headers;
|
||||
int traffic_limit = 0;
|
||||
cos_table_t * headers = NULL;
|
||||
cos_table_t *headers = NULL;
|
||||
cos_resumable_clt_params_t *clt_params = NULL;
|
||||
|
||||
(void)withcp;
|
||||
cos_pool_create(&p, NULL);
|
||||
options = cos_request_options_create(p);
|
||||
s3InitRequestOptions(options, is_cname);
|
||||
|
@ -1025,11 +1215,11 @@ int32_t s3PutObjectFromFile2(const char *file_str, const char *object_str) {
|
|||
}
|
||||
|
||||
void s3DeleteObjectsByPrefix(const char *prefix_str) {
|
||||
cos_pool_t * p = NULL;
|
||||
cos_pool_t *p = NULL;
|
||||
cos_request_options_t *options = NULL;
|
||||
int is_cname = 0;
|
||||
cos_string_t bucket;
|
||||
cos_status_t * s = NULL;
|
||||
cos_status_t *s = NULL;
|
||||
cos_string_t prefix;
|
||||
|
||||
cos_pool_create(&p, NULL);
|
||||
|
@ -1044,10 +1234,10 @@ void s3DeleteObjectsByPrefix(const char *prefix_str) {
|
|||
}
|
||||
|
||||
void s3DeleteObjects(const char *object_name[], int nobject) {
|
||||
cos_pool_t * p = NULL;
|
||||
cos_pool_t *p = NULL;
|
||||
int is_cname = 0;
|
||||
cos_string_t bucket;
|
||||
cos_table_t * resp_headers = NULL;
|
||||
cos_table_t *resp_headers = NULL;
|
||||
cos_request_options_t *options = NULL;
|
||||
cos_list_t object_list;
|
||||
cos_list_t deleted_object_list;
|
||||
|
@ -1081,14 +1271,14 @@ void s3DeleteObjects(const char *object_name[], int nobject) {
|
|||
|
||||
bool s3Exists(const char *object_name) {
|
||||
bool ret = false;
|
||||
cos_pool_t * p = NULL;
|
||||
cos_pool_t *p = NULL;
|
||||
int is_cname = 0;
|
||||
cos_status_t * s = NULL;
|
||||
cos_request_options_t * options = NULL;
|
||||
cos_status_t *s = NULL;
|
||||
cos_request_options_t *options = NULL;
|
||||
cos_string_t bucket;
|
||||
cos_string_t object;
|
||||
cos_table_t * resp_headers;
|
||||
cos_table_t * headers = NULL;
|
||||
cos_table_t *resp_headers;
|
||||
cos_table_t *headers = NULL;
|
||||
cos_object_exist_status_e object_exist;
|
||||
|
||||
cos_pool_create(&p, NULL);
|
||||
|
@ -1115,15 +1305,15 @@ bool s3Exists(const char *object_name) {
|
|||
|
||||
bool s3Get(const char *object_name, const char *path) {
|
||||
bool ret = false;
|
||||
cos_pool_t * p = NULL;
|
||||
cos_pool_t *p = NULL;
|
||||
int is_cname = 0;
|
||||
cos_status_t * s = NULL;
|
||||
cos_status_t *s = NULL;
|
||||
cos_request_options_t *options = NULL;
|
||||
cos_string_t bucket;
|
||||
cos_string_t object;
|
||||
cos_string_t file;
|
||||
cos_table_t * resp_headers = NULL;
|
||||
cos_table_t * headers = NULL;
|
||||
cos_table_t *resp_headers = NULL;
|
||||
cos_table_t *headers = NULL;
|
||||
int traffic_limit = 0;
|
||||
|
||||
//创建内存池
|
||||
|
@ -1159,15 +1349,15 @@ bool s3Get(const char *object_name, const char *path) {
|
|||
int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t block_size, bool check, uint8_t **ppBlock) {
|
||||
(void)check;
|
||||
int32_t code = 0;
|
||||
cos_pool_t * p = NULL;
|
||||
cos_pool_t *p = NULL;
|
||||
int is_cname = 0;
|
||||
cos_status_t * s = NULL;
|
||||
cos_status_t *s = NULL;
|
||||
cos_request_options_t *options = NULL;
|
||||
cos_string_t bucket;
|
||||
cos_string_t object;
|
||||
cos_table_t * resp_headers;
|
||||
cos_table_t * headers = NULL;
|
||||
cos_buf_t * content = NULL;
|
||||
cos_table_t *resp_headers;
|
||||
cos_table_t *headers = NULL;
|
||||
cos_buf_t *content = NULL;
|
||||
// cos_string_t file;
|
||||
// int traffic_limit = 0;
|
||||
char range_buf[64];
|
||||
|
@ -1261,7 +1451,7 @@ void s3EvictCache(const char *path, long object_size) {
|
|||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
vError("failed to open %s since %s", dir_name, terrstr());
|
||||
}
|
||||
SArray * evict_files = taosArrayInit(16, sizeof(SEvictFile));
|
||||
SArray *evict_files = taosArrayInit(16, sizeof(SEvictFile));
|
||||
tdbDirEntryPtr pDirEntry;
|
||||
while ((pDirEntry = taosReadDir(pDir)) != NULL) {
|
||||
char *name = taosGetDirEntryName(pDirEntry);
|
||||
|
@ -1303,13 +1493,13 @@ void s3EvictCache(const char *path, long object_size) {
|
|||
long s3Size(const char *object_name) {
|
||||
long size = 0;
|
||||
|
||||
cos_pool_t * p = NULL;
|
||||
cos_pool_t *p = NULL;
|
||||
int is_cname = 0;
|
||||
cos_status_t * s = NULL;
|
||||
cos_status_t *s = NULL;
|
||||
cos_request_options_t *options = NULL;
|
||||
cos_string_t bucket;
|
||||
cos_string_t object;
|
||||
cos_table_t * resp_headers = NULL;
|
||||
cos_table_t *resp_headers = NULL;
|
||||
|
||||
//创建内存池
|
||||
cos_pool_create(&p, NULL);
|
||||
|
@ -1344,7 +1534,7 @@ long s3Size(const char *object_name) {
|
|||
int32_t s3Init() { return 0; }
|
||||
void s3CleanUp() {}
|
||||
int32_t s3PutObjectFromFile(const char *file, const char *object) { return 0; }
|
||||
int32_t s3PutObjectFromFile2(const char *file, const char *object) { return 0; }
|
||||
int32_t s3PutObjectFromFile2(const char *file, const char *object, int8_t withcp) { return 0; }
|
||||
void s3DeleteObjectsByPrefix(const char *prefix) {}
|
||||
void s3DeleteObjects(const char *object_name[], int nobject) {}
|
||||
bool s3Exists(const char *object_name) { return false; }
|
||||
|
|
|
@ -0,0 +1,417 @@
|
|||
#define ALLOW_FORBID_FUNC
|
||||
|
||||
#include "cos_cp.h"
|
||||
#include "cJSON.h"
|
||||
#include "tutil.h"
|
||||
|
||||
int32_t cos_cp_open(char const* cp_path, SCheckpoint* checkpoint) {
|
||||
int32_t code = 0;
|
||||
|
||||
TdFilePtr fd = taosOpenFile(cp_path, TD_FILE_WRITE | TD_FILE_CREATE /* | TD_FILE_TRUNC*/ | TD_FILE_WRITE_THROUGH);
|
||||
if (!fd) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("ERROR: %s Failed to open %s", __func__, cp_path);
|
||||
return code;
|
||||
}
|
||||
|
||||
checkpoint->thefile = fd;
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
void cos_cp_close(TdFilePtr fd) { taosCloseFile(&fd); }
|
||||
void cos_cp_remove(char const* filepath) { taosRemoveFile(filepath); }
|
||||
|
||||
static int32_t cos_cp_parse_body(char* cp_body, SCheckpoint* cp) {
|
||||
int32_t code = 0;
|
||||
cJSON const* item2 = NULL;
|
||||
|
||||
cJSON* json = cJSON_Parse(cp_body);
|
||||
if (NULL == json) {
|
||||
code = TSDB_CODE_FILE_CORRUPTED;
|
||||
uError("ERROR: %s Failed to parse json", __func__);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
cJSON const* item = cJSON_GetObjectItem(json, "ver");
|
||||
if (!cJSON_IsNumber(item) || item->valuedouble != 1) {
|
||||
code = TSDB_CODE_FILE_CORRUPTED;
|
||||
uError("ERROR: %s Failed to parse json ver: %f", __func__, item->valuedouble);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
item = cJSON_GetObjectItem(json, "type");
|
||||
if (!cJSON_IsNumber(item)) {
|
||||
code = TSDB_CODE_FILE_CORRUPTED;
|
||||
uError("ERROR: %s Failed to parse json", __func__);
|
||||
goto _exit;
|
||||
}
|
||||
cp->cp_type = item->valuedouble;
|
||||
|
||||
item = cJSON_GetObjectItem(json, "md5");
|
||||
if (cJSON_IsString(item)) {
|
||||
memcpy(cp->md5, item->valuestring, strlen(item->valuestring));
|
||||
}
|
||||
|
||||
item = cJSON_GetObjectItem(json, "upload_id");
|
||||
if (cJSON_IsString(item)) {
|
||||
strncpy(cp->upload_id, item->valuestring, 128);
|
||||
}
|
||||
|
||||
item2 = cJSON_GetObjectItem(json, "file");
|
||||
if (cJSON_IsObject(item2)) {
|
||||
item = cJSON_GetObjectItem(item2, "size");
|
||||
if (cJSON_IsNumber(item)) {
|
||||
cp->file_size = item->valuedouble;
|
||||
}
|
||||
|
||||
item = cJSON_GetObjectItem(item2, "lastmodified");
|
||||
if (cJSON_IsNumber(item)) {
|
||||
cp->file_last_modified = item->valuedouble;
|
||||
}
|
||||
|
||||
item = cJSON_GetObjectItem(item2, "path");
|
||||
if (cJSON_IsString(item)) {
|
||||
strncpy(cp->file_path, item->valuestring, TSDB_FILENAME_LEN);
|
||||
}
|
||||
|
||||
item = cJSON_GetObjectItem(item2, "file_md5");
|
||||
if (cJSON_IsString(item)) {
|
||||
strncpy(cp->file_md5, item->valuestring, 64);
|
||||
}
|
||||
}
|
||||
|
||||
item2 = cJSON_GetObjectItem(json, "object");
|
||||
if (cJSON_IsObject(item2)) {
|
||||
item = cJSON_GetObjectItem(item2, "object_size");
|
||||
if (cJSON_IsNumber(item)) {
|
||||
cp->object_size = item->valuedouble;
|
||||
}
|
||||
|
||||
item = cJSON_GetObjectItem(item2, "object_name");
|
||||
if (cJSON_IsString(item)) {
|
||||
strncpy(cp->object_name, item->valuestring, 128);
|
||||
}
|
||||
|
||||
item = cJSON_GetObjectItem(item2, "object_last_modified");
|
||||
if (cJSON_IsString(item)) {
|
||||
strncpy(cp->object_last_modified, item->valuestring, 64);
|
||||
}
|
||||
|
||||
item = cJSON_GetObjectItem(item2, "object_etag");
|
||||
if (cJSON_IsString(item)) {
|
||||
strncpy(cp->object_etag, item->valuestring, 128);
|
||||
}
|
||||
}
|
||||
|
||||
item2 = cJSON_GetObjectItem(json, "cpparts");
|
||||
if (cJSON_IsObject(item2)) {
|
||||
item = cJSON_GetObjectItem(item2, "number");
|
||||
if (cJSON_IsNumber(item)) {
|
||||
cp->part_num = item->valuedouble;
|
||||
}
|
||||
|
||||
item = cJSON_GetObjectItem(item2, "size");
|
||||
if (cJSON_IsNumber(item)) {
|
||||
cp->part_size = item->valuedouble;
|
||||
}
|
||||
|
||||
item2 = cJSON_GetObjectItem(item2, "parts");
|
||||
if (cJSON_IsArray(item2) && cp->part_num > 0) {
|
||||
cJSON_ArrayForEach(item, item2) {
|
||||
cJSON const* item3 = cJSON_GetObjectItem(item, "index");
|
||||
int32_t index = 0;
|
||||
if (cJSON_IsNumber(item3)) {
|
||||
index = item3->valuedouble;
|
||||
cp->parts[index].index = index;
|
||||
}
|
||||
|
||||
item3 = cJSON_GetObjectItem(item, "offset");
|
||||
if (cJSON_IsNumber(item3)) {
|
||||
cp->parts[index].offset = item3->valuedouble;
|
||||
}
|
||||
|
||||
item3 = cJSON_GetObjectItem(item, "size");
|
||||
if (cJSON_IsNumber(item3)) {
|
||||
cp->parts[index].size = item3->valuedouble;
|
||||
}
|
||||
|
||||
item3 = cJSON_GetObjectItem(item, "completed");
|
||||
if (cJSON_IsNumber(item3)) {
|
||||
cp->parts[index].completed = item3->valuedouble;
|
||||
}
|
||||
|
||||
item3 = cJSON_GetObjectItem(item, "crc64");
|
||||
if (cJSON_IsNumber(item3)) {
|
||||
cp->parts[index].crc64 = item3->valuedouble;
|
||||
}
|
||||
|
||||
item3 = cJSON_GetObjectItem(item, "etag");
|
||||
if (cJSON_IsString(item3)) {
|
||||
strncpy(cp->parts[index].etag, item3->valuestring, 128);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_exit:
|
||||
if (json) cJSON_Delete(json);
|
||||
if (cp_body) taosMemoryFree(cp_body);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t cos_cp_load(char const* filepath, SCheckpoint* checkpoint) {
|
||||
int32_t code = 0;
|
||||
|
||||
TdFilePtr fd = taosOpenFile(filepath, TD_FILE_READ);
|
||||
if (!fd) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("ERROR: %s Failed to open %s", __func__, filepath);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
int64_t size = -1;
|
||||
code = taosStatFile(filepath, &size, NULL, NULL);
|
||||
if (code) {
|
||||
uError("ERROR: %s Failed to stat %s", __func__, filepath);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
char* cp_body = taosMemoryMalloc(size + 1);
|
||||
|
||||
int64_t n = taosReadFile(fd, cp_body, size);
|
||||
if (n < 0) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
uError("ERROR: %s Failed to read %s", __func__, filepath);
|
||||
goto _exit;
|
||||
} else if (n != size) {
|
||||
code = TSDB_CODE_FILE_CORRUPTED;
|
||||
uError("ERROR: %s Failed to read %s %" PRId64 "/%" PRId64, __func__, filepath, n, size);
|
||||
goto _exit;
|
||||
}
|
||||
taosCloseFile(&fd);
|
||||
cp_body[size] = '\0';
|
||||
|
||||
return cos_cp_parse_body(cp_body, checkpoint);
|
||||
|
||||
_exit:
|
||||
if (fd) {
|
||||
taosCloseFile(&fd);
|
||||
}
|
||||
if (cp_body) {
|
||||
taosMemoryFree(cp_body);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t cos_cp_save_json(cJSON const* json, SCheckpoint* checkpoint) {
|
||||
int32_t code = 0;
|
||||
|
||||
char* data = cJSON_PrintUnformatted(json);
|
||||
if (NULL == data) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
TdFilePtr fp = checkpoint->thefile;
|
||||
if (taosFtruncateFile(fp, 0) < 0) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _exit;
|
||||
}
|
||||
if (taosLSeekFile(fp, 0, SEEK_SET) < 0) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _exit;
|
||||
}
|
||||
if (taosWriteFile(fp, data, strlen(data)) < 0) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
if (taosFsyncFile(fp) < 0) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
_exit:
|
||||
taosMemoryFree(data);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t cos_cp_dump(SCheckpoint* cp) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
||||
cJSON* ojson = NULL;
|
||||
cJSON* json = cJSON_CreateObject();
|
||||
if (!json) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
||||
if (NULL == cJSON_AddNumberToObject(json, "ver", 1)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
if (NULL == cJSON_AddNumberToObject(json, "type", cp->cp_type)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
if (NULL == cJSON_AddStringToObject(json, "md5", cp->md5)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
if (NULL == cJSON_AddStringToObject(json, "upload_id", cp->upload_id)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
if (COS_CP_TYPE_UPLOAD == cp->cp_type) {
|
||||
ojson = cJSON_AddObjectToObject(json, "file");
|
||||
if (!ojson) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddNumberToObject(ojson, "size", cp->file_size)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddNumberToObject(ojson, "lastmodified", cp->file_last_modified)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddStringToObject(ojson, "path", cp->file_path)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddStringToObject(ojson, "file_md5", cp->file_md5)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
} else if (COS_CP_TYPE_DOWNLOAD == cp->cp_type) {
|
||||
ojson = cJSON_AddObjectToObject(json, "object");
|
||||
if (!ojson) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddNumberToObject(ojson, "object_size", cp->object_size)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddStringToObject(ojson, "object_name", cp->object_name)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddStringToObject(ojson, "object_last_modified", cp->object_last_modified)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddStringToObject(ojson, "object_etag", cp->object_etag)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
}
|
||||
|
||||
ojson = cJSON_AddObjectToObject(json, "cpparts");
|
||||
if (!ojson) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddNumberToObject(ojson, "number", cp->part_num)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddNumberToObject(ojson, "size", cp->part_size)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
cJSON* ajson = cJSON_AddArrayToObject(ojson, "parts");
|
||||
if (!ajson) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
for (int i = 0; i < cp->part_num; ++i) {
|
||||
cJSON* item = cJSON_CreateObject();
|
||||
if (!item) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
cJSON_AddItemToArray(ajson, item);
|
||||
|
||||
if (NULL == cJSON_AddNumberToObject(item, "index", cp->parts[i].index)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddNumberToObject(item, "offset", cp->parts[i].offset)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddNumberToObject(item, "size", cp->parts[i].size)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddNumberToObject(item, "completed", cp->parts[i].completed)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddNumberToObject(item, "crc64", cp->parts[i].crc64)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
if (NULL == cJSON_AddStringToObject(item, "etag", cp->parts[i].etag)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
}
|
||||
|
||||
code = cos_cp_save_json(json, cp);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
_exit:
|
||||
if (code) {
|
||||
uError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
cJSON_Delete(json);
|
||||
return code;
|
||||
}
|
||||
|
||||
void cos_cp_get_undo_parts(SCheckpoint* checkpoint, int* part_num, SCheckpointPart* parts, int64_t* consume_bytes) {}
|
||||
|
||||
void cos_cp_update(SCheckpoint* checkpoint, int32_t part_index, char const* etag, uint64_t crc64) {
|
||||
checkpoint->parts[part_index].completed = 1;
|
||||
strncpy(checkpoint->parts[part_index].etag, etag, 128);
|
||||
checkpoint->parts[part_index].crc64 = crc64;
|
||||
}
|
||||
|
||||
void cos_cp_build_upload(SCheckpoint* checkpoint, char const* filepath, int64_t size, int32_t mtime,
|
||||
char const* upload_id, int64_t part_size) {
|
||||
int i = 0;
|
||||
|
||||
checkpoint->cp_type = COS_CP_TYPE_UPLOAD;
|
||||
strncpy(checkpoint->file_path, filepath, TSDB_FILENAME_LEN);
|
||||
|
||||
checkpoint->file_size = size;
|
||||
checkpoint->file_last_modified = mtime;
|
||||
strncpy(checkpoint->upload_id, upload_id, 128);
|
||||
|
||||
checkpoint->part_size = part_size;
|
||||
for (; i * part_size < size; i++) {
|
||||
checkpoint->parts[i].index = i;
|
||||
checkpoint->parts[i].offset = i * part_size;
|
||||
checkpoint->parts[i].size = TMIN(part_size, (size - i * part_size));
|
||||
checkpoint->parts[i].completed = 0;
|
||||
checkpoint->parts[i].etag[0] = '\0';
|
||||
}
|
||||
checkpoint->part_num = i;
|
||||
}
|
||||
|
||||
static bool cos_cp_verify_md5(SCheckpoint* cp) { return true; }
|
||||
|
||||
bool cos_cp_is_valid_upload(SCheckpoint* checkpoint, int64_t size, int32_t mtime) {
|
||||
if (cos_cp_verify_md5(checkpoint) && checkpoint->file_size == size && checkpoint->file_last_modified == mtime) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
|
@ -95,8 +95,9 @@ int32_t tsMonitorMaxLogs = 100;
|
|||
bool tsMonitorComp = false;
|
||||
|
||||
// audit
|
||||
bool tsEnableAudit = true;
|
||||
bool tsEnableAuditCreateTable = true;
|
||||
bool tsEnableAudit = true;
|
||||
bool tsEnableAuditCreateTable = true;
|
||||
int32_t tsAuditInterval = 500;
|
||||
|
||||
// telem
|
||||
#ifdef TD_ENTERPRISE
|
||||
|
@ -687,6 +688,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
|
||||
if (cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0)
|
||||
return -1;
|
||||
|
||||
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_BOTH, CFG_DYN_NONE) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, CFG_SCOPE_BOTH, CFG_DYN_ENT_SERVER) != 0) return -1;
|
||||
|
@ -1141,6 +1144,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
|
||||
tsEnableAudit = cfgGetItem(pCfg, "audit")->bval;
|
||||
tsEnableAuditCreateTable = cfgGetItem(pCfg, "auditCreateTable")->bval;
|
||||
tsAuditInterval = cfgGetItem(pCfg, "auditInterval")->i32;
|
||||
|
||||
tsEnableTelem = cfgGetItem(pCfg, "telemetryReporting")->bval;
|
||||
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;
|
||||
|
|
|
@ -30,12 +30,14 @@ typedef struct SDnodeMgmt {
|
|||
TdThread statusThread;
|
||||
TdThread notifyThread;
|
||||
TdThread monitorThread;
|
||||
TdThread auditThread;
|
||||
TdThread crashReportThread;
|
||||
SSingleWorker mgmtWorker;
|
||||
ProcessCreateNodeFp processCreateNodeFp;
|
||||
ProcessAlterNodeTypeFp processAlterNodeTypeFp;
|
||||
ProcessDropNodeFp processDropNodeFp;
|
||||
SendMonitorReportFp sendMonitorReportFp;
|
||||
SendAuditRecordsFp sendAuditRecordsFp;
|
||||
GetVnodeLoadsFp getVnodeLoadsFp;
|
||||
GetVnodeLoadsFp getVnodeLoadsLiteFp;
|
||||
GetMnodeLoadsFp getMnodeLoadsFp;
|
||||
|
@ -62,7 +64,9 @@ void dmStopStatusThread(SDnodeMgmt *pMgmt);
|
|||
int32_t dmStartNotifyThread(SDnodeMgmt *pMgmt);
|
||||
void dmStopNotifyThread(SDnodeMgmt *pMgmt);
|
||||
int32_t dmStartMonitorThread(SDnodeMgmt *pMgmt);
|
||||
int32_t dmStartAuditThread(SDnodeMgmt *pMgmt);
|
||||
void dmStopMonitorThread(SDnodeMgmt *pMgmt);
|
||||
void dmStopAuditThread(SDnodeMgmt *pMgmt);
|
||||
int32_t dmStartCrashReportThread(SDnodeMgmt *pMgmt);
|
||||
void dmStopCrashReportThread(SDnodeMgmt *pMgmt);
|
||||
int32_t dmStartWorker(SDnodeMgmt *pMgmt);
|
||||
|
|
|
@ -29,6 +29,9 @@ static int32_t dmStartMgmt(SDnodeMgmt *pMgmt) {
|
|||
if (dmStartMonitorThread(pMgmt) != 0) {
|
||||
return -1;
|
||||
}
|
||||
if (dmStartAuditThread(pMgmt) != 0) {
|
||||
return -1;
|
||||
}
|
||||
if (dmStartCrashReportThread(pMgmt) != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -38,6 +41,7 @@ static int32_t dmStartMgmt(SDnodeMgmt *pMgmt) {
|
|||
static void dmStopMgmt(SDnodeMgmt *pMgmt) {
|
||||
pMgmt->pData->stopped = true;
|
||||
dmStopMonitorThread(pMgmt);
|
||||
dmStopAuditThread(pMgmt);
|
||||
dmStopStatusThread(pMgmt);
|
||||
#if defined(TD_ENTERPRISE)
|
||||
dmStopNotifyThread(pMgmt);
|
||||
|
@ -60,6 +64,7 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
|
|||
pMgmt->processAlterNodeTypeFp = pInput->processAlterNodeTypeFp;
|
||||
pMgmt->processDropNodeFp = pInput->processDropNodeFp;
|
||||
pMgmt->sendMonitorReportFp = pInput->sendMonitorReportFp;
|
||||
pMgmt->sendAuditRecordsFp = pInput->sendAuditRecordFp;
|
||||
pMgmt->getVnodeLoadsFp = pInput->getVnodeLoadsFp;
|
||||
pMgmt->getVnodeLoadsLiteFp = pInput->getVnodeLoadsLiteFp;
|
||||
pMgmt->getMnodeLoadsFp = pInput->getMnodeLoadsFp;
|
||||
|
|
|
@ -99,6 +99,27 @@ static void *dmMonitorThreadFp(void *param) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void *dmAuditThreadFp(void *param) {
|
||||
SDnodeMgmt *pMgmt = param;
|
||||
int64_t lastTime = taosGetTimestampMs();
|
||||
setThreadName("dnode-audit");
|
||||
|
||||
while (1) {
|
||||
taosMsleep(100);
|
||||
if (pMgmt->pData->dropped || pMgmt->pData->stopped) break;
|
||||
|
||||
int64_t curTime = taosGetTimestampMs();
|
||||
if (curTime < lastTime) lastTime = curTime;
|
||||
float interval = curTime - lastTime;
|
||||
if (interval >= tsAuditInterval) {
|
||||
(*pMgmt->sendAuditRecordsFp)();
|
||||
lastTime = curTime;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *dmCrashReportThreadFp(void *param) {
|
||||
SDnodeMgmt *pMgmt = param;
|
||||
int64_t lastTime = taosGetTimestampMs();
|
||||
|
@ -218,6 +239,20 @@ int32_t dmStartMonitorThread(SDnodeMgmt *pMgmt) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t dmStartAuditThread(SDnodeMgmt *pMgmt) {
|
||||
TdThreadAttr thAttr;
|
||||
taosThreadAttrInit(&thAttr);
|
||||
taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
|
||||
if (taosThreadCreate(&pMgmt->auditThread, &thAttr, dmAuditThreadFp, pMgmt) != 0) {
|
||||
dError("failed to create audit thread since %s", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
taosThreadAttrDestroy(&thAttr);
|
||||
tmsgReportStartup("dnode-audit", "initialized");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dmStopMonitorThread(SDnodeMgmt *pMgmt) {
|
||||
if (taosCheckPthreadValid(pMgmt->monitorThread)) {
|
||||
taosThreadJoin(pMgmt->monitorThread, NULL);
|
||||
|
@ -225,6 +260,13 @@ void dmStopMonitorThread(SDnodeMgmt *pMgmt) {
|
|||
}
|
||||
}
|
||||
|
||||
void dmStopAuditThread(SDnodeMgmt *pMgmt) {
|
||||
if (taosCheckPthreadValid(pMgmt->auditThread)) {
|
||||
taosThreadJoin(pMgmt->auditThread, NULL);
|
||||
taosThreadClear(&pMgmt->auditThread);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t dmStartCrashReportThread(SDnodeMgmt *pMgmt) {
|
||||
if (!tsEnableCrashReport) {
|
||||
return 0;
|
||||
|
|
|
@ -70,6 +70,21 @@ typedef struct SUdfdData {
|
|||
int32_t dnodeId;
|
||||
} SUdfdData;
|
||||
|
||||
#ifndef TD_MODULE_OPTIMIZE
|
||||
typedef struct SDnode {
|
||||
int8_t once;
|
||||
bool stop;
|
||||
EDndRunStatus status;
|
||||
SStartupInfo startup;
|
||||
SDnodeData data;
|
||||
SUdfdData udfdData;
|
||||
TdThreadMutex mutex;
|
||||
TdFilePtr lockfile;
|
||||
STfs *pTfs;
|
||||
SMgmtWrapper wrappers[NODE_END];
|
||||
SDnodeTrans trans;
|
||||
} SDnode;
|
||||
#else
|
||||
typedef struct SDnode {
|
||||
int8_t once;
|
||||
bool stop;
|
||||
|
@ -83,6 +98,7 @@ typedef struct SDnode {
|
|||
STfs *pTfs;
|
||||
SMgmtWrapper wrappers[NODE_END];
|
||||
} SDnode;
|
||||
#endif
|
||||
|
||||
// dmEnv.c
|
||||
SDnode *dmInstance();
|
||||
|
@ -124,6 +140,7 @@ int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);
|
|||
|
||||
// dmMonitor.c
|
||||
void dmSendMonitorReport();
|
||||
void dmSendAuditRecords();
|
||||
void dmGetVnodeLoads(SMonVloadInfo *pInfo);
|
||||
void dmGetVnodeLoadsLite(SMonVloadInfo *pInfo);
|
||||
void dmGetMnodeLoads(SMonMloadInfo *pInfo);
|
||||
|
|
|
@ -189,6 +189,7 @@ void dmCleanup() {
|
|||
if (dmCheckRepeatCleanup(pDnode) != 0) return;
|
||||
dmCleanupDnode(pDnode);
|
||||
monCleanup();
|
||||
auditCleanup();
|
||||
syncCleanUp();
|
||||
walCleanUp();
|
||||
udfcClose();
|
||||
|
@ -396,6 +397,7 @@ SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper) {
|
|||
.processAlterNodeTypeFp = dmProcessAlterNodeTypeReq,
|
||||
.processDropNodeFp = dmProcessDropNodeReq,
|
||||
.sendMonitorReportFp = dmSendMonitorReport,
|
||||
.sendAuditRecordFp = auditSendRecordsInBatch,
|
||||
.getVnodeLoadsFp = dmGetVnodeLoads,
|
||||
.getVnodeLoadsLiteFp = dmGetVnodeLoadsLite,
|
||||
.getMnodeLoadsFp = dmGetMnodeLoads,
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "dmMgmt.h"
|
||||
#include "dmNodes.h"
|
||||
#include "audit.h"
|
||||
|
||||
static void dmGetMonitorBasicInfo(SDnode *pDnode, SMonBasicInfo *pInfo) {
|
||||
pInfo->protocol = 1;
|
||||
|
@ -108,6 +109,11 @@ void dmSendMonitorReport() {
|
|||
monSendReport();
|
||||
}
|
||||
|
||||
//Todo: put this in seperate file in the future
|
||||
void dmSendAuditRecords() {
|
||||
auditSendRecordsInBatch();
|
||||
}
|
||||
|
||||
void dmGetVnodeLoads(SMonVloadInfo *pInfo) {
|
||||
SDnode *pDnode = dmInstance();
|
||||
SMgmtWrapper *pWrapper = &pDnode->wrappers[VNODE];
|
||||
|
|
|
@ -86,6 +86,7 @@ typedef enum {
|
|||
typedef int32_t (*ProcessCreateNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
|
||||
typedef int32_t (*ProcessDropNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
|
||||
typedef void (*SendMonitorReportFp)();
|
||||
typedef void (*SendAuditRecordsFp)();
|
||||
typedef void (*GetVnodeLoadsFp)(SMonVloadInfo *pInfo);
|
||||
typedef void (*GetMnodeLoadsFp)(SMonMloadInfo *pInfo);
|
||||
typedef void (*GetQnodeLoadsFp)(SQnodeLoad *pInfo);
|
||||
|
@ -120,6 +121,7 @@ typedef struct {
|
|||
ProcessAlterNodeTypeFp processAlterNodeTypeFp;
|
||||
ProcessDropNodeFp processDropNodeFp;
|
||||
SendMonitorReportFp sendMonitorReportFp;
|
||||
SendAuditRecordsFp sendAuditRecordFp;
|
||||
GetVnodeLoadsFp getVnodeLoadsFp;
|
||||
GetVnodeLoadsFp getVnodeLoadsLiteFp;
|
||||
GetMnodeLoadsFp getMnodeLoadsFp;
|
||||
|
|
|
@ -37,6 +37,7 @@ TEST_F(DndTestQnode, 01_Create_Qnode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -50,6 +51,7 @@ TEST_F(DndTestQnode, 01_Create_Qnode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -63,6 +65,7 @@ TEST_F(DndTestQnode, 01_Create_Qnode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_QNODE_ALREADY_DEPLOYED);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
test.Restart();
|
||||
|
@ -78,6 +81,7 @@ TEST_F(DndTestQnode, 01_Create_Qnode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_QNODE_ALREADY_DEPLOYED);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -94,6 +98,7 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_QNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -108,6 +113,7 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_QNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -121,6 +127,7 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_QNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_QNODE_NOT_DEPLOYED);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
test.Restart();
|
||||
|
@ -136,6 +143,7 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_QNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_QNODE_NOT_DEPLOYED);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -149,5 +157,6 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
}
|
|
@ -37,6 +37,7 @@ TEST_F(DndTestSnode, 01_Create_Snode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -50,6 +51,7 @@ TEST_F(DndTestSnode, 01_Create_Snode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -63,6 +65,7 @@ TEST_F(DndTestSnode, 01_Create_Snode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_SNODE_ALREADY_DEPLOYED);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
test.Restart();
|
||||
|
@ -78,6 +81,7 @@ TEST_F(DndTestSnode, 01_Create_Snode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_SNODE_ALREADY_DEPLOYED);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -94,6 +98,7 @@ TEST_F(DndTestSnode, 01_Drop_Snode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_SNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -108,6 +113,7 @@ TEST_F(DndTestSnode, 01_Drop_Snode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_SNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -121,6 +127,7 @@ TEST_F(DndTestSnode, 01_Drop_Snode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_SNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_SNODE_NOT_DEPLOYED);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
test.Restart();
|
||||
|
@ -136,6 +143,7 @@ TEST_F(DndTestSnode, 01_Drop_Snode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_SNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_SNODE_NOT_DEPLOYED);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -149,5 +157,6 @@ TEST_F(DndTestSnode, 01_Drop_Snode) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
}
|
|
@ -704,10 +704,10 @@ static int32_t mndPersistTaskDropReq(SMnode *pMnode, STrans *pTrans, SStreamTask
|
|||
pReq->streamId = pTask->id.streamId;
|
||||
|
||||
STransAction action = {0};
|
||||
SEpSet epset = {0};
|
||||
if(pTask->info.nodeId == SNODE_HANDLE){
|
||||
SEpSet epset = {0};
|
||||
if (pTask->info.nodeId == SNODE_HANDLE) {
|
||||
SSnodeObj *pObj = NULL;
|
||||
void *pIter = NULL;
|
||||
void *pIter = NULL;
|
||||
while (1) {
|
||||
pIter = sdbFetch(pMnode->pSdb, SDB_SNODE, pIter, (void **)&pObj);
|
||||
if (pIter == NULL) {
|
||||
|
@ -717,10 +717,16 @@ static int32_t mndPersistTaskDropReq(SMnode *pMnode, STrans *pTrans, SStreamTask
|
|||
addEpIntoEpSet(&epset, pObj->pDnode->fqdn, pObj->pDnode->port);
|
||||
sdbRelease(pMnode->pSdb, pObj);
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->info.nodeId);
|
||||
epset = mndGetVgroupEpset(pMnode, pVgObj);
|
||||
mndReleaseVgroup(pMnode, pVgObj);
|
||||
if (pVgObj != NULL) {
|
||||
epset = mndGetVgroupEpset(pMnode, pVgObj);
|
||||
mndReleaseVgroup(pMnode, pVgObj);
|
||||
} else {
|
||||
mDebug("orphaned task:0x%x need to be dropped, nodeId:%d, no redo action", pTask->id.taskId, pTask->info.nodeId);
|
||||
taosMemoryFree(pReq);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// The epset of nodeId of this task may have been expired now, let's use the newest epset from mnode.
|
||||
|
@ -1657,6 +1663,7 @@ static void setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDat
|
|||
|
||||
STaskStatusEntry *pe = taosHashGet(execInfo.pTaskMap, &id, sizeof(id));
|
||||
if (pe == NULL) {
|
||||
mError("task:0x%" PRIx64 " not exists in vnode, no valid status/stage info", id.taskId);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -48,8 +48,7 @@ int32_t clearFinishedTrans(SMnode* pMnode) {
|
|||
void* pKey = taosHashGetKey(pEntry, &keyLen);
|
||||
// key is the name of src/dst db name
|
||||
SKeyInfo info = {.pKey = pKey, .keyLen = keyLen};
|
||||
|
||||
mDebug("transId:%d %s startTs:%" PRId64 "cleared due to finished", pEntry->transId, pEntry->name,
|
||||
mDebug("transId:%d %s startTs:%" PRId64 " cleared since finished", pEntry->transId, pEntry->name,
|
||||
pEntry->startTime);
|
||||
taosArrayPush(pList, &info);
|
||||
} else {
|
||||
|
|
|
@ -33,6 +33,7 @@ TEST_F(MndTestAcct, 01_Create_Acct) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_ACCT, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_OPS_NOT_SUPPORT);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
TEST_F(MndTestAcct, 02_Alter_Acct) {
|
||||
|
@ -43,6 +44,7 @@ TEST_F(MndTestAcct, 02_Alter_Acct) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_ACCT, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_OPS_NOT_SUPPORT);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
TEST_F(MndTestAcct, 03_Drop_Acct) {
|
||||
|
@ -53,4 +55,5 @@ TEST_F(MndTestAcct, 03_Drop_Acct) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_ACCT, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_OPS_NOT_SUPPORT);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
|
|
@ -253,12 +253,14 @@ TEST_F(MndTestSma, 02_Create_Show_Meta_Drop_Restart_BSma) {
|
|||
pReq = BuildCreateDbReq(dbname, &contLen);
|
||||
pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
pReq = BuildCreateBSmaStbReq(stbname, &contLen);
|
||||
pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
|
||||
EXPECT_EQ(test.GetShowRows(), 1);
|
||||
}
|
||||
|
@ -269,12 +271,14 @@ TEST_F(MndTestSma, 02_Create_Show_Meta_Drop_Restart_BSma) {
|
|||
pReq = BuildCreateBSmaStbReq(stbname, &contLen);
|
||||
pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_STB_ALREADY_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
pReq = BuildDropStbReq(stbname, &contLen);
|
||||
pRsp = test.SendReq(TDMT_MND_DROP_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
|
||||
EXPECT_EQ(test.GetShowRows(), 0);
|
||||
}
|
||||
|
@ -283,5 +287,6 @@ TEST_F(MndTestSma, 02_Create_Show_Meta_Drop_Restart_BSma) {
|
|||
pReq = BuildDropStbReq(stbname, &contLen);
|
||||
pRsp = test.SendReq(TDMT_MND_DROP_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_STB_NOT_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -155,6 +155,7 @@ void* MndTestStb::BuildAlterStbAddTagReq(const char* stbname, const char* tagnam
|
|||
tSerializeSMAlterStbReq(pHead, contLen, &req);
|
||||
|
||||
*pContLen = contLen;
|
||||
taosArrayDestroy(req.pFields);
|
||||
return pHead;
|
||||
}
|
||||
|
||||
|
@ -176,6 +177,7 @@ void* MndTestStb::BuildAlterStbDropTagReq(const char* stbname, const char* tagna
|
|||
tSerializeSMAlterStbReq(pHead, contLen, &req);
|
||||
|
||||
*pContLen = contLen;
|
||||
taosArrayDestroy(req.pFields);
|
||||
return pHead;
|
||||
}
|
||||
|
||||
|
@ -204,6 +206,7 @@ void* MndTestStb::BuildAlterStbUpdateTagNameReq(const char* stbname, const char*
|
|||
tSerializeSMAlterStbReq(pHead, contLen, &req);
|
||||
|
||||
*pContLen = contLen;
|
||||
taosArrayDestroy(req.pFields);
|
||||
return pHead;
|
||||
}
|
||||
|
||||
|
@ -226,6 +229,7 @@ void* MndTestStb::BuildAlterStbUpdateTagBytesReq(const char* stbname, const char
|
|||
tSerializeSMAlterStbReq(pHead, contLen, &req);
|
||||
|
||||
*pContLen = contLen;
|
||||
taosArrayDestroy(req.pFields);
|
||||
return pHead;
|
||||
}
|
||||
|
||||
|
@ -247,6 +251,7 @@ void* MndTestStb::BuildAlterStbAddColumnReq(const char* stbname, const char* col
|
|||
tSerializeSMAlterStbReq(pHead, contLen, &req);
|
||||
|
||||
*pContLen = contLen;
|
||||
taosArrayDestroy(req.pFields);
|
||||
return pHead;
|
||||
}
|
||||
|
||||
|
@ -268,6 +273,7 @@ void* MndTestStb::BuildAlterStbDropColumnReq(const char* stbname, const char* co
|
|||
tSerializeSMAlterStbReq(pHead, contLen, &req);
|
||||
|
||||
*pContLen = contLen;
|
||||
taosArrayDestroy(req.pFields);
|
||||
return pHead;
|
||||
}
|
||||
|
||||
|
@ -290,6 +296,7 @@ void* MndTestStb::BuildAlterStbUpdateColumnBytesReq(const char* stbname, const c
|
|||
tSerializeSMAlterStbReq(pHead, contLen, &req);
|
||||
|
||||
*pContLen = contLen;
|
||||
taosArrayDestroy(req.pFields);
|
||||
return pHead;
|
||||
}
|
||||
|
||||
|
@ -303,6 +310,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -311,6 +319,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -334,6 +343,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) {
|
|||
|
||||
STableMetaRsp metaRsp = {0};
|
||||
tDeserializeSTableMetaRsp(pMsg->pCont, pMsg->contLen, &metaRsp);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
|
||||
EXPECT_STREQ(metaRsp.dbFName, dbname);
|
||||
EXPECT_STREQ(metaRsp.tbName, "stb");
|
||||
|
@ -410,6 +420,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_STB, pHead, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -423,6 +434,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -436,6 +448,7 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -443,30 +456,35 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbAddTagReq("1.d3.stb", "tag4", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbAddTagReq("1.d2.stb3", "tag4", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_STB_NOT_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbAddTagReq(stbname, "tag3", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_ALREADY_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbAddTagReq(stbname, "col1", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_ALREADY_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -474,6 +492,7 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
|
||||
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
|
||||
}
|
||||
|
@ -483,6 +502,7 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -495,18 +515,21 @@ TEST_F(MndTestStb, 03_Alter_Stb_DropTag) {
|
|||
void* pReq = BuildCreateDbReq(dbname, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildCreateStbReq(stbname, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbDropTagReq(stbname, "tag5", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_NOT_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -514,6 +537,7 @@ TEST_F(MndTestStb, 03_Alter_Stb_DropTag) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
|
||||
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
|
||||
EXPECT_EQ(test.GetShowRows(), 1);
|
||||
|
@ -524,6 +548,7 @@ TEST_F(MndTestStb, 03_Alter_Stb_DropTag) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -536,41 +561,48 @@ TEST_F(MndTestStb, 04_Alter_Stb_AlterTagName) {
|
|||
void* pReq = BuildCreateDbReq(dbname, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildCreateStbReq(stbname, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "tag5", "tag6", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_NOT_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "col1", "tag6", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_NOT_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "tag3", "col1", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_ALREADY_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "tag3", "tag2", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_ALREADY_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "tag3", "tag2", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_ALREADY_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -578,6 +610,7 @@ TEST_F(MndTestStb, 04_Alter_Stb_AlterTagName) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
|
||||
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
|
||||
EXPECT_EQ(test.GetShowRows(), 1);
|
||||
|
@ -588,6 +621,7 @@ TEST_F(MndTestStb, 04_Alter_Stb_AlterTagName) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -600,36 +634,42 @@ TEST_F(MndTestStb, 05_Alter_Stb_AlterTagBytes) {
|
|||
void* pReq = BuildCreateDbReq(dbname, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildCreateStbReq(stbname, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateTagBytesReq(stbname, "tag5", 12, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_NOT_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateTagBytesReq(stbname, "tag1", 13, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_OPTION);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateTagBytesReq(stbname, "tag3", 8, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_ROW_BYTES);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateTagBytesReq(stbname, "tag3", 20, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
|
||||
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
|
||||
EXPECT_EQ(test.GetShowRows(), 1);
|
||||
|
@ -640,6 +680,7 @@ TEST_F(MndTestStb, 05_Alter_Stb_AlterTagBytes) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -653,6 +694,7 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -660,30 +702,35 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbAddColumnReq("1.d7.stb", "tag4", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbAddColumnReq("1.d6.stb3", "tag4", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_STB_NOT_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbAddColumnReq(stbname, "tag3", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_ALREADY_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbAddColumnReq(stbname, "col1", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_ALREADY_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -691,6 +738,7 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
|
||||
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
|
||||
EXPECT_EQ(test.GetShowRows(), 1);
|
||||
|
@ -701,6 +749,7 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -713,30 +762,35 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) {
|
|||
void* pReq = BuildCreateDbReq(dbname, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildCreateStbReq(stbname, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbDropColumnReq(stbname, "col4", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_NOT_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbDropColumnReq(stbname, "col1", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_ALTER_OPTION);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbDropColumnReq(stbname, "ts", &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_ALTER_OPTION);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -744,6 +798,7 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -751,6 +806,7 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
|
||||
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
|
||||
EXPECT_EQ(test.GetShowRows(), 1);
|
||||
|
@ -761,6 +817,7 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -773,42 +830,49 @@ TEST_F(MndTestStb, 08_Alter_Stb_AlterTagBytes) {
|
|||
void* pReq = BuildCreateDbReq(dbname, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildCreateStbReq(stbname, &contLen);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col5", 12, &contLen, 0);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_NOT_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "ts", 8, &contLen, 0);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_OPTION);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col1", 8, &contLen, 0);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_ROW_BYTES);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col1", TSDB_MAX_BYTES_PER_ROW, &contLen, 0);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_ROW_BYTES);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
{
|
||||
void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col1", 20, &contLen, 0);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
|
||||
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
|
||||
EXPECT_EQ(test.GetShowRows(), 1);
|
||||
|
@ -818,6 +882,7 @@ TEST_F(MndTestStb, 08_Alter_Stb_AlterTagBytes) {
|
|||
void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col_not_exist", 20, &contLen, 1);
|
||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_NOT_EXIST);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
|
||||
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
|
||||
EXPECT_EQ(test.GetShowRows(), 1);
|
||||
|
@ -828,5 +893,6 @@ TEST_F(MndTestStb, 08_Alter_Stb_AlterTagBytes) {
|
|||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
|
||||
ASSERT_NE(pRsp, nullptr);
|
||||
ASSERT_EQ(pRsp->code, 0);
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -699,7 +699,23 @@ end:
|
|||
return ret;
|
||||
}
|
||||
|
||||
void freePtr(void* ptr) { taosMemoryFree(*(void**)ptr); }
|
||||
static void freePtr(void* ptr) { taosMemoryFree(*(void**)ptr); }
|
||||
|
||||
static STaskId replaceStreamTaskId(SStreamTask* pTask) {
|
||||
ASSERT(pTask->info.fillHistory);
|
||||
STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId};
|
||||
|
||||
pTask->id.streamId = pTask->streamTaskId.streamId;
|
||||
pTask->id.taskId = pTask->streamTaskId.taskId;
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
static void restoreStreamTaskId(SStreamTask* pTask, STaskId* pId) {
|
||||
ASSERT(pTask->info.fillHistory);
|
||||
pTask->id.taskId = pId->taskId;
|
||||
pTask->id.streamId = pId->streamId;
|
||||
}
|
||||
|
||||
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
@ -713,15 +729,9 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
|
|||
streamTaskOpenAllUpstreamInput(pTask);
|
||||
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
SStreamTask* pStateTask = pTask;
|
||||
|
||||
STaskId taskId = {.streamId = 0, .taskId = 0};
|
||||
STaskId taskId = {0};
|
||||
if (pTask->info.fillHistory) {
|
||||
taskId.streamId = pTask->id.streamId;
|
||||
taskId.taskId = pTask->id.taskId;
|
||||
|
||||
pTask->id.streamId = pTask->streamTaskId.streamId;
|
||||
pTask->id.taskId = pTask->streamTaskId.taskId;
|
||||
taskId = replaceStreamTaskId(pTask);
|
||||
}
|
||||
|
||||
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1);
|
||||
|
@ -731,9 +741,9 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
|
|||
} else {
|
||||
tqDebug("s-task:%s state:%p", pTask->id.idStr, pTask->pState);
|
||||
}
|
||||
|
||||
if (pTask->info.fillHistory) {
|
||||
pTask->id.streamId = taskId.streamId;
|
||||
pTask->id.taskId = taskId.taskId;
|
||||
restoreStreamTaskId(pTask, &taskId);
|
||||
}
|
||||
|
||||
SReadHandle handle = {
|
||||
|
@ -754,15 +764,9 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
|
|||
|
||||
qSetTaskId(pTask->exec.pExecutor, pTask->id.taskId, pTask->id.streamId);
|
||||
} else if (pTask->info.taskLevel == TASK_LEVEL__AGG) {
|
||||
SStreamTask* pSateTask = pTask;
|
||||
// SStreamTask task = {0};
|
||||
|
||||
STaskId taskId = {.streamId = 0, .taskId = 0};
|
||||
STaskId taskId = {0};
|
||||
if (pTask->info.fillHistory) {
|
||||
taskId.streamId = pTask->id.streamId;
|
||||
taskId.taskId = pTask->id.taskId;
|
||||
pTask->id.streamId = pTask->streamTaskId.streamId;
|
||||
pTask->id.taskId = pTask->streamTaskId.taskId;
|
||||
taskId = replaceStreamTaskId(pTask);
|
||||
}
|
||||
|
||||
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1);
|
||||
|
@ -774,15 +778,13 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
|
|||
}
|
||||
|
||||
if (pTask->info.fillHistory) {
|
||||
pTask->id.streamId = taskId.streamId;
|
||||
pTask->id.taskId = taskId.taskId;
|
||||
restoreStreamTaskId(pTask, &taskId);
|
||||
}
|
||||
|
||||
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList);
|
||||
SReadHandle handle = {
|
||||
.checkpointId = pTask->chkInfo.checkpointId,
|
||||
.vnode = NULL,
|
||||
.numOfVgroups = numOfVgroups,
|
||||
.numOfVgroups = (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList),
|
||||
.pStateBackend = pTask->pState,
|
||||
.fillHistory = pTask->info.fillHistory,
|
||||
.winRange = pTask->dataRange.window,
|
||||
|
@ -828,12 +830,6 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
|
|||
pTask->exec.pWalReader = walOpenReader(pTq->pVnode->pWal, &cond, pTask->id.taskId);
|
||||
}
|
||||
|
||||
// // reset the task status from unfinished transaction
|
||||
// if (pTask->status.taskStatus == TASK_STATUS__PAUSE) {
|
||||
// tqWarn("s-task:%s reset task status to be normal, status kept in taskMeta: Paused", pTask->id.idStr);
|
||||
// pTask->status.taskStatus = TASK_STATUS__READY;
|
||||
// }
|
||||
|
||||
streamTaskResetUpstreamStageInfo(pTask);
|
||||
streamSetupScheduleTrigger(pTask);
|
||||
SCheckpointInfo* pChkInfo = &pTask->chkInfo;
|
||||
|
|
|
@ -181,5 +181,5 @@ int32_t streamStateRebuildFromSnap(SStreamStateWriter* pWriter, int64_t chkpId)
|
|||
}
|
||||
|
||||
int32_t streamStateLoadTasks(SStreamStateWriter* pWriter) {
|
||||
return streamMetaReloadAllTasks(pWriter->pTq->pStreamMeta);
|
||||
return streamMetaLoadAllTasks(pWriter->pTq->pStreamMeta);
|
||||
}
|
||||
|
|
|
@ -712,9 +712,9 @@ int32_t resetStreamTaskStatus(SStreamMeta* pMeta) {
|
|||
}
|
||||
|
||||
static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) {
|
||||
int32_t vgId = pMeta->vgId;
|
||||
int32_t code = 0;
|
||||
int64_t st = taosGetTimestampMs();
|
||||
int32_t vgId = pMeta->vgId;
|
||||
int32_t code = 0;
|
||||
int64_t st = taosGetTimestampMs();
|
||||
|
||||
while(1) {
|
||||
int32_t startVal = atomic_val_compare_exchange_32(&pMeta->startInfo.taskStarting, 0, 1);
|
||||
|
@ -736,17 +736,9 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) {
|
|||
}
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
code = streamMetaReopen(pMeta);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("vgId:%d failed to reopen stream meta", vgId);
|
||||
streamMetaWUnLock(pMeta);
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
streamMetaClear(pMeta);
|
||||
|
||||
streamMetaInitBackend(pMeta);
|
||||
int64_t el = taosGetTimestampMs() - st;
|
||||
|
||||
tqInfo("vgId:%d close&reload state elapsed time:%.3fs", vgId, el/1000.);
|
||||
|
||||
code = streamMetaLoadAllTasks(pMeta);
|
||||
|
@ -758,10 +750,10 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) {
|
|||
}
|
||||
|
||||
if (isLeader && !tsDisableStream) {
|
||||
tqInfo("vgId:%d restart all stream tasks after all tasks being updated", vgId);
|
||||
resetStreamTaskStatus(pMeta);
|
||||
|
||||
streamMetaWUnLock(pMeta);
|
||||
tqInfo("vgId:%d restart all stream tasks after all tasks being updated", vgId);
|
||||
|
||||
startStreamTasks(pMeta);
|
||||
} else {
|
||||
streamMetaResetStartInfo(&pMeta->startInfo);
|
||||
|
|
|
@ -53,11 +53,10 @@ static void tsdbCloseBICache(STsdb *pTsdb) {
|
|||
}
|
||||
|
||||
static int32_t tsdbOpenBCache(STsdb *pTsdb) {
|
||||
int32_t code = 0;
|
||||
// SLRUCache *pCache = taosLRUCacheInit(10 * 1024 * 1024, 0, .5);
|
||||
int32_t szPage = pTsdb->pVnode->config.tsdbPageSize;
|
||||
|
||||
SLRUCache *pCache = taosLRUCacheInit((int64_t)tsS3BlockCacheSize * tsS3BlockSize * szPage, 0, .5);
|
||||
int32_t code = 0;
|
||||
int32_t szPage = pTsdb->pVnode->config.tsdbPageSize;
|
||||
int64_t szBlock = tsS3BlockSize <= 1024 ? 1024 : tsS3BlockSize;
|
||||
SLRUCache *pCache = taosLRUCacheInit((int64_t)tsS3BlockCacheSize * szBlock * szPage, 0, .5);
|
||||
if (pCache == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
|
@ -67,8 +66,9 @@ static int32_t tsdbOpenBCache(STsdb *pTsdb) {
|
|||
|
||||
taosThreadMutexInit(&pTsdb->bMutex, NULL);
|
||||
|
||||
_err:
|
||||
pTsdb->bCache = pCache;
|
||||
|
||||
_err:
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -452,9 +452,11 @@ static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, int8_t
|
|||
static void reallocVarData(SColVal *pColVal) {
|
||||
if (IS_VAR_DATA_TYPE(pColVal->type)) {
|
||||
uint8_t *pVal = pColVal->value.pData;
|
||||
pColVal->value.pData = taosMemoryMalloc(pColVal->value.nData);
|
||||
if (pColVal->value.nData) {
|
||||
if (pColVal->value.nData > 0) {
|
||||
pColVal->value.pData = taosMemoryMalloc(pColVal->value.nData);
|
||||
memcpy(pColVal->value.pData, pVal, pColVal->value.nData);
|
||||
} else {
|
||||
pColVal->value.pData = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2755,14 +2757,16 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC
|
|||
|
||||
*pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal};
|
||||
if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) {
|
||||
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
|
||||
if (pCol->colVal.value.pData == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
if (pColVal->value.nData > 0) {
|
||||
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
|
||||
if (pCol->colVal.value.pData == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
|
||||
} else {
|
||||
pCol->colVal.value.pData = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2802,17 +2806,21 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC
|
|||
tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal);
|
||||
if (!COL_VAL_IS_VALUE(tColVal) && COL_VAL_IS_VALUE(pColVal)) {
|
||||
SLastCol lastCol = {.ts = rowTs, .colVal = *pColVal};
|
||||
if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) {
|
||||
if (IS_VAR_DATA_TYPE(pColVal->type) /* && pColVal->value.nData > 0 */) {
|
||||
SLastCol *pLastCol = (SLastCol *)taosArrayGet(pColArray, iCol);
|
||||
taosMemoryFree(pLastCol->colVal.value.pData);
|
||||
|
||||
lastCol.colVal.value.pData = taosMemoryMalloc(lastCol.colVal.value.nData);
|
||||
if (lastCol.colVal.value.pData == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
if (pColVal->value.nData > 0) {
|
||||
lastCol.colVal.value.pData = taosMemoryMalloc(lastCol.colVal.value.nData);
|
||||
if (lastCol.colVal.value.pData == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
memcpy(lastCol.colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
|
||||
} else {
|
||||
lastCol.colVal.value.pData = NULL;
|
||||
}
|
||||
memcpy(lastCol.colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
|
||||
}
|
||||
|
||||
taosArraySet(pColArray, iCol, &lastCol);
|
||||
|
@ -2923,14 +2931,18 @@ static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray,
|
|||
|
||||
*pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal};
|
||||
if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) {
|
||||
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
|
||||
if (pCol->colVal.value.pData == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
if (pColVal->value.nData > 0) {
|
||||
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
|
||||
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
|
||||
if (pCol->colVal.value.pData == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
if (pColVal->value.nData > 0) {
|
||||
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
|
||||
}
|
||||
} else {
|
||||
pCol->colVal.value.pData = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ static int32_t tsdbDataFileRAWWriterDoClose(SDataFileRAWWriter *writer) { return
|
|||
static int32_t tsdbDataFileRAWWriterCloseCommit(SDataFileRAWWriter *writer, TFileOpArray *opArr) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
ASSERT(writer->ctx->offset == writer->file.size);
|
||||
ASSERT(writer->ctx->offset <= writer->file.size);
|
||||
ASSERT(writer->config->fid == writer->file.fid);
|
||||
|
||||
STFileOp op = (STFileOp){
|
||||
|
|
|
@ -529,7 +529,8 @@ static int32_t tsdbFSDoSanAndFix(STFileSystem *fs) {
|
|||
for (const STfsFile *file = NULL; (file = tfsReaddir(dir)) != NULL;) {
|
||||
if (taosIsDir(file->aname)) continue;
|
||||
|
||||
if (tsdbFSGetFileObjHashEntry(&fobjHash, file->aname) == NULL) {
|
||||
if (tsdbFSGetFileObjHashEntry(&fobjHash, file->aname) == NULL &&
|
||||
strncmp(file->aname + strlen(file->aname) - 3, ".cp", 3)) {
|
||||
int32_t nlevel = tfsGetLevel(fs->tsdb->pVnode->pTfs);
|
||||
remove_file(file->aname, nlevel > 1 && file->did.level == nlevel - 1);
|
||||
}
|
||||
|
@ -1118,7 +1119,7 @@ int32_t tsdbFSCreateCopyRangedSnapshot(STFileSystem *fs, TFileSetRangeArray *pRa
|
|||
TARRAY2_FOREACH(fs->fSetArr, fset) {
|
||||
int64_t ever = VERSION_MAX;
|
||||
if (pHash) {
|
||||
int32_t fid = fset->fid;
|
||||
int32_t fid = fset->fid;
|
||||
STFileSetRange *u = taosHashGet(pHash, &fid, sizeof(fid));
|
||||
if (u) {
|
||||
ever = u->sver - 1;
|
||||
|
@ -1150,10 +1151,10 @@ int32_t tsdbFSDestroyCopyRangedSnapshot(TFileSetArray **fsetArr) { return tsdbFS
|
|||
|
||||
int32_t tsdbFSCreateRefRangedSnapshot(STFileSystem *fs, int64_t sver, int64_t ever, TFileSetRangeArray *pRanges,
|
||||
TFileSetRangeArray **fsrArr) {
|
||||
int32_t code = 0;
|
||||
STFileSet *fset;
|
||||
int32_t code = 0;
|
||||
STFileSet *fset;
|
||||
STFileSetRange *fsr1 = NULL;
|
||||
SHashObj *pHash = NULL;
|
||||
SHashObj *pHash = NULL;
|
||||
|
||||
fsrArr[0] = taosMemoryCalloc(1, sizeof(*fsrArr[0]));
|
||||
if (fsrArr[0] == NULL) {
|
||||
|
@ -1176,7 +1177,7 @@ int32_t tsdbFSCreateRefRangedSnapshot(STFileSystem *fs, int64_t sver, int64_t ev
|
|||
int64_t ever1 = ever;
|
||||
|
||||
if (pHash) {
|
||||
int32_t fid = fset->fid;
|
||||
int32_t fid = fset->fid;
|
||||
STFileSetRange *u = taosHashGet(pHash, &fid, sizeof(fid));
|
||||
if (u) {
|
||||
sver1 = u->sver;
|
||||
|
|
|
@ -67,7 +67,7 @@ static SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond
|
|||
static int32_t doBuildDataBlock(STsdbReader* pReader);
|
||||
static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader);
|
||||
static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo);
|
||||
static bool hasDataInSttBlock(SSttBlockReader* pSttBlockReader);
|
||||
static bool hasDataInSttBlock(STableBlockScanInfo *pInfo);
|
||||
static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter);
|
||||
static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order);
|
||||
static void resetTableListIndex(SReaderStatus* pStatus);
|
||||
|
@ -1466,7 +1466,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
|
||||
|
||||
int64_t tsLast = INT64_MIN;
|
||||
if (hasDataInSttBlock(pSttBlockReader)) {
|
||||
if (hasDataInSttBlock(pBlockScanInfo)) {
|
||||
tsLast = getCurrentKeyInSttBlock(pSttBlockReader);
|
||||
}
|
||||
|
||||
|
@ -1485,7 +1485,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
int64_t minKey = 0;
|
||||
if (pReader->info.order == TSDB_ORDER_ASC) {
|
||||
minKey = INT64_MAX; // chosen the minimum value
|
||||
if (minKey > tsLast && hasDataInSttBlock(pSttBlockReader)) {
|
||||
if (minKey > tsLast && hasDataInSttBlock(pBlockScanInfo)) {
|
||||
minKey = tsLast;
|
||||
}
|
||||
|
||||
|
@ -1498,7 +1498,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
}
|
||||
} else {
|
||||
minKey = INT64_MIN;
|
||||
if (minKey < tsLast && hasDataInSttBlock(pSttBlockReader)) {
|
||||
if (minKey < tsLast && hasDataInSttBlock(pBlockScanInfo)) {
|
||||
minKey = tsLast;
|
||||
}
|
||||
|
||||
|
@ -1705,7 +1705,7 @@ static int32_t mergeFileBlockAndSttBlock(STsdbReader* pReader, SSttBlockReader*
|
|||
}
|
||||
|
||||
bool dataInDataFile = hasDataInFileBlock(pBlockData, pDumpInfo);
|
||||
bool dataInSttFile = hasDataInSttBlock(pSttBlockReader);
|
||||
bool dataInSttFile = hasDataInSttBlock(pBlockScanInfo);
|
||||
if (dataInDataFile && (!dataInSttFile)) {
|
||||
// no stt file block available, only data block exists
|
||||
return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
|
||||
|
@ -1791,7 +1791,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader);
|
||||
|
||||
int64_t tsLast = INT64_MIN;
|
||||
if (hasDataInSttBlock(pSttBlockReader)) {
|
||||
if (hasDataInSttBlock(pBlockScanInfo)) {
|
||||
tsLast = getCurrentKeyInSttBlock(pSttBlockReader);
|
||||
}
|
||||
|
||||
|
@ -1840,7 +1840,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
minKey = key;
|
||||
}
|
||||
|
||||
if (minKey > tsLast && hasDataInSttBlock(pSttBlockReader)) {
|
||||
if (minKey > tsLast && hasDataInSttBlock(pBlockScanInfo)) {
|
||||
minKey = tsLast;
|
||||
}
|
||||
} else {
|
||||
|
@ -1857,7 +1857,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
minKey = key;
|
||||
}
|
||||
|
||||
if (minKey < tsLast && hasDataInSttBlock(pSttBlockReader)) {
|
||||
if (minKey < tsLast && hasDataInSttBlock(pBlockScanInfo)) {
|
||||
minKey = tsLast;
|
||||
}
|
||||
}
|
||||
|
@ -2065,7 +2065,7 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan
|
|||
|
||||
// the stt block reader has been initialized for this table.
|
||||
if (pSttBlockReader->uid == pScanInfo->uid) {
|
||||
return hasDataInSttBlock(pSttBlockReader);
|
||||
return hasDataInSttBlock(pScanInfo);
|
||||
}
|
||||
|
||||
if (pSttBlockReader->uid != 0) {
|
||||
|
@ -2158,7 +2158,9 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan
|
|||
return hasData;
|
||||
}
|
||||
|
||||
static bool hasDataInSttBlock(SSttBlockReader* pSttBlockReader) { return pSttBlockReader->mergeTree.pIter != NULL; }
|
||||
static bool hasDataInSttBlock(STableBlockScanInfo *pInfo) {
|
||||
return pInfo->sttKeyInfo.status == STT_FILE_HAS_DATA;
|
||||
}
|
||||
|
||||
bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo) {
|
||||
if ((pBlockData->nRow > 0) && (pBlockData->nRow != pDumpInfo->totalRows)) {
|
||||
|
@ -2733,7 +2735,7 @@ static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) {
|
|||
int64_t st = taosGetTimestampUs();
|
||||
while (1) {
|
||||
// no data in stt block and block, no need to proceed.
|
||||
if (!hasDataInSttBlock(pSttBlockReader)) {
|
||||
if (!hasDataInSttBlock(pScanInfo)) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2850,7 +2852,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
|
|||
initSttBlockReader(pSttBlockReader, pScanInfo, pReader);
|
||||
|
||||
// no data in stt block, no need to proceed.
|
||||
while (hasDataInSttBlock(pSttBlockReader)) {
|
||||
while (hasDataInSttBlock(pScanInfo)) {
|
||||
ASSERT(pScanInfo->sttKeyInfo.status == STT_FILE_HAS_DATA);
|
||||
|
||||
code = buildComposedDataBlockImpl(pReader, pScanInfo, &pReader->status.fileBlockData, pSttBlockReader);
|
||||
|
|
|
@ -252,6 +252,7 @@ static void doCleanupInfoForNextFileset(STableBlockScanInfo* pScanInfo) {
|
|||
taosArrayClear(pScanInfo->pFileDelData); // del data from each file set
|
||||
pScanInfo->cleanSttBlocks = false;
|
||||
pScanInfo->numOfRowsInStt = 0;
|
||||
pScanInfo->sttBlockReturned = false;
|
||||
INIT_TIMEWINDOW(&pScanInfo->sttWindow);
|
||||
INIT_TIMEWINDOW(&pScanInfo->filesetWindow);
|
||||
pScanInfo->sttKeyInfo.status = STT_FILE_READER_UNINIT;
|
||||
|
|
|
@ -112,7 +112,7 @@ static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile
|
|||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
char *object_name = taosDirEntryBaseName(fname);
|
||||
code = s3PutObjectFromFile2(from->fname, object_name);
|
||||
code = s3PutObjectFromFile2(from->fname, object_name, 1);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
taosCloseFile(&fdFrom);
|
||||
|
|
|
@ -17,9 +17,12 @@
|
|||
#define _TD_AUDIT_INT_H_
|
||||
|
||||
#include "audit.h"
|
||||
#include "tarray.h"
|
||||
|
||||
typedef struct {
|
||||
SAuditCfg cfg;
|
||||
SArray *records;
|
||||
TdThreadMutex lock;
|
||||
} SAudit;
|
||||
|
||||
#endif /*_TD_AUDIT_INT_H_*/
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
|
||||
#include "tarray.h"
|
||||
#include "auditInt.h"
|
||||
#include "taoserror.h"
|
||||
#include "thttp.h"
|
||||
|
@ -21,25 +23,56 @@
|
|||
#include "tjson.h"
|
||||
#include "tglobal.h"
|
||||
#include "mnode.h"
|
||||
#include "audit.h"
|
||||
|
||||
SAudit tsAudit = {0};
|
||||
char* tsAuditUri = "/audit";
|
||||
char* tsAuditBatchUri = "/audit-batch";
|
||||
|
||||
int32_t auditInit(const SAuditCfg *pCfg) {
|
||||
tsAudit.cfg = *pCfg;
|
||||
tsAudit.records = taosArrayInit(0, sizeof(SAuditRecord *));
|
||||
taosThreadMutexInit(&tsAudit.lock, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void auditCleanup() {
|
||||
tsLogFp = NULL;
|
||||
taosArrayDestroy(tsAudit.records);
|
||||
tsAudit.records = NULL;
|
||||
taosThreadMutexDestroy(&tsAudit.lock);
|
||||
}
|
||||
|
||||
extern void auditRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
|
||||
char *detail, int32_t len);
|
||||
extern void auditAddRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
|
||||
char *detail, int32_t len);
|
||||
extern void auditSendRecordsInBatchImp();
|
||||
|
||||
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
|
||||
char *detail, int32_t len) {
|
||||
auditRecordImp(pReq, clusterId, operation, target1, target2, detail, len);
|
||||
}
|
||||
|
||||
void auditAddRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
|
||||
char *detail, int32_t len) {
|
||||
auditAddRecordImp(pReq, clusterId, operation, target1, target2, detail, len);
|
||||
}
|
||||
|
||||
void auditSendRecordsInBatch(){
|
||||
auditSendRecordsInBatchImp();
|
||||
}
|
||||
|
||||
#ifndef TD_ENTERPRISE
|
||||
void auditRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
|
||||
char *detail, int32_t len) {
|
||||
}
|
||||
|
||||
void auditAddRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
|
||||
char *detail, int32_t len) {
|
||||
}
|
||||
|
||||
void auditSendRecordsInBatchImp(){
|
||||
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -749,7 +749,7 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO
|
|||
void clearResultRowInitFlag(SqlFunctionCtx* pCtx, int32_t numOfOutput);
|
||||
|
||||
SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
|
||||
int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
|
||||
int32_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
|
||||
bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup);
|
||||
|
||||
int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
|
||||
|
|
|
@ -984,6 +984,21 @@ int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) {
|
|||
|
||||
qInfo("save stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark);
|
||||
|
||||
pSup->calTriggerSaved = pSup->calTrigger;
|
||||
pSup->deleteMarkSaved = pSup->deleteMark;
|
||||
pSup->calTrigger = STREAM_TRIGGER_AT_ONCE;
|
||||
pSup->deleteMark = INT64_MAX;
|
||||
pInfo->ignoreExpiredDataSaved = pInfo->ignoreExpiredData;
|
||||
pInfo->ignoreExpiredData = false;
|
||||
} else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT) {
|
||||
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
|
||||
STimeWindowAggSupp* pSup = &pInfo->twAggSup;
|
||||
|
||||
ASSERT(pSup->calTrigger == STREAM_TRIGGER_AT_ONCE || pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE);
|
||||
ASSERT(pSup->calTriggerSaved == 0 && pSup->deleteMarkSaved == 0);
|
||||
|
||||
qInfo("save stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark);
|
||||
|
||||
pSup->calTriggerSaved = pSup->calTrigger;
|
||||
pSup->deleteMarkSaved = pSup->deleteMark;
|
||||
pSup->calTrigger = STREAM_TRIGGER_AT_ONCE;
|
||||
|
|
|
@ -137,7 +137,7 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, i
|
|||
* +----------+---------------+
|
||||
*/
|
||||
SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
|
||||
int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
|
||||
int32_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
|
||||
bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup) {
|
||||
SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId);
|
||||
if (!keepGroup) {
|
||||
|
|
|
@ -65,7 +65,7 @@ typedef struct SPartitionOperatorInfo {
|
|||
static void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInfo** pGroupInfo, int32_t len);
|
||||
static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity);
|
||||
static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData,
|
||||
int16_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup);
|
||||
int32_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup);
|
||||
static SArray* extractColumnInfo(SNodeList* pNodeList);
|
||||
|
||||
static void freeGroupKey(void* param) {
|
||||
|
@ -1016,7 +1016,7 @@ _error:
|
|||
}
|
||||
|
||||
int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData,
|
||||
int16_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup) {
|
||||
int32_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
SResultRowInfo* pResultRowInfo = &binfo->resultRowInfo;
|
||||
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
|
||||
|
|
|
@ -176,7 +176,7 @@ int32_t updateEventWindowInfo(SStreamAggSupporter* pAggSup, SEventWindowInfo* pW
|
|||
|
||||
for (int32_t i = start; i < rows; ++i) {
|
||||
if (pTsData[i] >= maxTs) {
|
||||
return i - 1 - start;
|
||||
return i - start;
|
||||
}
|
||||
|
||||
if (pWin->skey > pTsData[i]) {
|
||||
|
@ -616,6 +616,10 @@ void streamEventReloadState(SOperatorInfo* pOperator) {
|
|||
qDebug("===stream=== reload state. try process result %" PRId64 ", %" PRIu64 ", index:%d", pSeKeyBuf[i].win.skey,
|
||||
pSeKeyBuf[i].groupId, i);
|
||||
getSessionWindowInfoByKey(pAggSup, pSeKeyBuf + i, &curInfo.winInfo);
|
||||
//event window has been deleted
|
||||
if (!IS_VALID_SESSION_WIN(curInfo.winInfo)) {
|
||||
continue;
|
||||
}
|
||||
setEventWindowFlag(pAggSup, &curInfo);
|
||||
if (!curInfo.pWinFlag->startFlag || curInfo.pWinFlag->endFlag) {
|
||||
continue;
|
||||
|
|
|
@ -634,9 +634,6 @@ static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo, i
|
|||
for (int32_t i = 0; i < size; i++) {
|
||||
SWinKey* winKey = taosArrayGet(wins, i);
|
||||
STimeWindow nextWin = getFinalTimeWindow(winKey->ts, &pInfo->interval);
|
||||
if (isOverdue(nextWin.ekey, &pInfo->twAggSup) && pInfo->ignoreExpiredData) {
|
||||
continue;
|
||||
}
|
||||
void* chIds = taosHashGet(pInfo->pPullDataMap, winKey, sizeof(SWinKey));
|
||||
if (!chIds) {
|
||||
SPullWindowInfo pull = {
|
||||
|
@ -801,7 +798,7 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDat
|
|||
}
|
||||
while (1) {
|
||||
bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup);
|
||||
if ((!IS_FINAL_INTERVAL_OP(pOperator) && pInfo->ignoreExpiredData &&
|
||||
if ((!IS_FINAL_INTERVAL_OP(pOperator) && pInfo->ignoreExpiredData && pSDataBlock->info.type != STREAM_PULL_DATA &&
|
||||
checkExpiredData(&pInfo->stateStore, pInfo->pUpdateInfo, &pInfo->twAggSup, pSDataBlock->info.id.uid,
|
||||
nextWin.ekey)) ||
|
||||
!inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) {
|
||||
|
|
|
@ -5466,13 +5466,12 @@ bool blockDistSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) {
|
|||
}
|
||||
|
||||
int32_t blockDistFunction(SqlFunctionCtx* pCtx) {
|
||||
const int32_t BLOCK_DIST_RESULT_ROWS = 24;
|
||||
const int32_t BLOCK_DIST_RESULT_ROWS = 25;
|
||||
|
||||
SInputColumnInfoData* pInput = &pCtx->input;
|
||||
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
STableBlockDistInfo* pDistInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
STableBlockDistInfo* pDistInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
STableBlockDistInfo p1 = {0};
|
||||
tDeserializeBlockDistInfo(varDataVal(pInputCol->pData), varDataLen(pInputCol->pData), &p1);
|
||||
|
|
|
@ -164,7 +164,7 @@ IF(NOT TD_DARWIN)
|
|||
)
|
||||
|
||||
add_test(
|
||||
NAME idxtest
|
||||
NAME idxTest
|
||||
COMMAND idxTest
|
||||
)
|
||||
add_test(
|
||||
|
|
|
@ -60,6 +60,7 @@ class JsonEnv : public ::testing::Test {
|
|||
}
|
||||
virtual void TearDown() {
|
||||
indexJsonClose(index);
|
||||
indexOptsDestroy(opts);
|
||||
printf("destory\n");
|
||||
taosMsleep(1000);
|
||||
}
|
||||
|
@ -152,6 +153,7 @@ TEST_F(JsonEnv, testWrite) {
|
|||
indexMultiTermQueryAdd(mq, q, QUERY_TERM);
|
||||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(100, taosArrayGetSize(result));
|
||||
taosArrayDestroy(result);
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
}
|
||||
}
|
||||
|
@ -211,6 +213,7 @@ TEST_F(JsonEnv, testWriteMillonData) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(10, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
{
|
||||
{
|
||||
|
@ -226,6 +229,7 @@ TEST_F(JsonEnv, testWriteMillonData) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(0, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
{
|
||||
{
|
||||
|
@ -241,6 +245,7 @@ TEST_F(JsonEnv, testWriteMillonData) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(10, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -311,6 +316,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(1000, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
{
|
||||
std::string colName("test");
|
||||
|
@ -325,6 +331,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(0, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
{
|
||||
std::string colName("test");
|
||||
|
@ -340,6 +347,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(1000, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
{
|
||||
std::string colName("test");
|
||||
|
@ -355,6 +363,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(0, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
{
|
||||
std::string colName("test");
|
||||
|
@ -370,6 +379,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(1000, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -413,6 +423,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(1000, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
{
|
||||
std::string colName("test1");
|
||||
|
@ -427,6 +438,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(0, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
{
|
||||
std::string colName("test1");
|
||||
|
@ -442,6 +454,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(1000, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
{
|
||||
std::string colName("test1");
|
||||
|
@ -456,6 +469,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(0, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
{
|
||||
std::string colName("test1");
|
||||
|
@ -470,6 +484,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(1000, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
{
|
||||
std::string colName("other_column");
|
||||
|
@ -499,6 +514,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(0, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
{
|
||||
std::string colName("test1");
|
||||
|
@ -527,6 +543,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
|
|||
indexJsonSearch(index, mq, result);
|
||||
EXPECT_EQ(2000, taosArrayGetSize(result));
|
||||
indexMultiTermQueryDestroy(mq);
|
||||
taosArrayDestroy(result);
|
||||
}
|
||||
}
|
||||
TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) {
|
||||
|
@ -553,6 +570,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) {
|
|||
int val = 9;
|
||||
Search(index, colName, TSDB_DATA_TYPE_INT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
|
||||
EXPECT_EQ(1000, taosArrayGetSize(res));
|
||||
taosArrayDestroy(res);
|
||||
}
|
||||
{
|
||||
SArray* res = NULL;
|
||||
|
@ -560,6 +578,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) {
|
|||
std::string colVal("xxxxxxxxxxxxxxx");
|
||||
Search(index, colName, TSDB_DATA_TYPE_BINARY, (void*)(colVal.c_str()), colVal.size(), QUERY_TERM, &res);
|
||||
EXPECT_EQ(1000, taosArrayGetSize(res));
|
||||
taosArrayDestroy(res);
|
||||
}
|
||||
}
|
||||
TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) {
|
||||
|
@ -583,6 +602,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) {
|
|||
float val = 1.9;
|
||||
Search(index, colName, TSDB_DATA_TYPE_FLOAT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
|
||||
EXPECT_EQ(2000, taosArrayGetSize(res));
|
||||
taosArrayDestroy(res);
|
||||
}
|
||||
{
|
||||
SArray* res = NULL;
|
||||
|
@ -590,6 +610,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) {
|
|||
float val = 2.1;
|
||||
Search(index, colName, TSDB_DATA_TYPE_FLOAT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
|
||||
EXPECT_EQ(1000, taosArrayGetSize(res));
|
||||
taosArrayDestroy(res);
|
||||
}
|
||||
{
|
||||
std::string colName("test1");
|
||||
|
@ -597,6 +618,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) {
|
|||
float val = 2.1;
|
||||
Search(index, colName, TSDB_DATA_TYPE_FLOAT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
|
||||
EXPECT_EQ(1000, taosArrayGetSize(res));
|
||||
taosArrayDestroy(res);
|
||||
}
|
||||
}
|
||||
TEST_F(JsonEnv, testWriteJsonTfileAndCache_DOUBLE) {
|
||||
|
@ -618,29 +640,34 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_DOUBLE) {
|
|||
double val = 1.9;
|
||||
Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
|
||||
EXPECT_EQ(2000, taosArrayGetSize(res));
|
||||
taosArrayDestroy(res);
|
||||
}
|
||||
{
|
||||
SArray* res = NULL;
|
||||
double val = 2.1;
|
||||
Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
|
||||
EXPECT_EQ(1000, taosArrayGetSize(res));
|
||||
taosArrayDestroy(res);
|
||||
}
|
||||
{
|
||||
SArray* res = NULL;
|
||||
double val = 2.1;
|
||||
Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
|
||||
EXPECT_EQ(1000, taosArrayGetSize(res));
|
||||
taosArrayDestroy(res);
|
||||
}
|
||||
{
|
||||
SArray* res = NULL;
|
||||
double val = 10.0;
|
||||
Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_LESS_EQUAL, &res);
|
||||
EXPECT_EQ(2000, taosArrayGetSize(res));
|
||||
taosArrayDestroy(res);
|
||||
}
|
||||
{
|
||||
SArray* res = NULL;
|
||||
double val = 10.0;
|
||||
Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_LESS_THAN, &res);
|
||||
EXPECT_EQ(1000, taosArrayGetSize(res));
|
||||
taosArrayDestroy(res);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,6 +53,7 @@ TEST(NodesTest, traverseTest) {
|
|||
EXPECT_EQ(res, DEAL_RES_CONTINUE);
|
||||
EXPECT_EQ(nodeType(pRoot), QUERY_NODE_VALUE);
|
||||
EXPECT_EQ(string(((SValueNode*)pRoot)->literal), "18");
|
||||
nodesDestroyNode(pRoot);
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
|
|
|
@ -1367,8 +1367,8 @@ EDealRes sclRewriteCaseWhen(SNode **pNode, SScalarCtx *ctx) {
|
|||
} else {
|
||||
int32_t type = output.columnData->info.type;
|
||||
if (IS_VAR_DATA_TYPE(type)) { // todo refactor
|
||||
res->datum.p = output.columnData->pData;
|
||||
output.columnData->pData = NULL;
|
||||
res->datum.p = taosMemoryCalloc(varDataTLen(output.columnData->pData) + 1, sizeof(char)); // add \0 to the end for print json value
|
||||
memcpy(res->datum.p, output.columnData->pData, varDataTLen(output.columnData->pData));
|
||||
} else {
|
||||
nodesSetValueNodeValue(res, output.columnData->pData);
|
||||
}
|
||||
|
|
|
@ -455,7 +455,6 @@ int32_t schHandleCallback(void *param, SDataBuf *pMsg, int32_t rspCode) {
|
|||
tstrerror(rspCode));
|
||||
|
||||
SCH_ERR_JRET(schProcessOnCbBegin(&pJob, &pTask, pParam->queryId, pParam->refId, pParam->taskId));
|
||||
|
||||
code = schHandleResponseMsg(pJob, pTask, pParam->execId, pMsg, rspCode);
|
||||
pMsg->pData = NULL;
|
||||
|
||||
|
|
|
@ -515,7 +515,7 @@ static int uploadCheckpointToS3(char* id, char* path) {
|
|||
char object[PATH_MAX] = {0};
|
||||
snprintf(object, sizeof(object), "%s%s%s", id, TD_DIRSEP, name);
|
||||
|
||||
if (s3PutObjectFromFile2(filename, object) != 0) {
|
||||
if (s3PutObjectFromFile2(filename, object, 0) != 0) {
|
||||
taosCloseDir(&pDir);
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ int32_t streamMetaId = 0;
|
|||
int32_t taskDbWrapperId = 0;
|
||||
|
||||
static void metaHbToMnode(void* param, void* tmrId);
|
||||
static void streamMetaClear(SStreamMeta* pMeta);
|
||||
static int32_t streamMetaBegin(SStreamMeta* pMeta);
|
||||
static void streamMetaCloseImpl(void* arg);
|
||||
|
||||
|
@ -395,41 +394,6 @@ _err:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
int32_t streamMetaReopen(SStreamMeta* pMeta) {
|
||||
streamMetaClear(pMeta);
|
||||
|
||||
// NOTE: role should not be changed during reopen meta
|
||||
pMeta->streamBackendRid = -1;
|
||||
pMeta->streamBackend = NULL;
|
||||
|
||||
char* defaultPath = taosMemoryCalloc(1, strlen(pMeta->path) + 128);
|
||||
sprintf(defaultPath, "%s%s%s", pMeta->path, TD_DIRSEP, "state");
|
||||
taosRemoveDir(defaultPath);
|
||||
|
||||
char* newPath = taosMemoryCalloc(1, strlen(pMeta->path) + 128);
|
||||
sprintf(newPath, "%s%s%s", pMeta->path, TD_DIRSEP, "received");
|
||||
|
||||
int32_t code = taosStatFile(newPath, NULL, NULL, NULL);
|
||||
if (code == 0) {
|
||||
// directory exists
|
||||
code = taosRenameFile(newPath, defaultPath);
|
||||
if (code != 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(code);
|
||||
stError("vgId:%d failed to rename file, from %s to %s, code:%s", pMeta->vgId, newPath, defaultPath,
|
||||
tstrerror(terrno));
|
||||
|
||||
taosMemoryFree(defaultPath);
|
||||
taosMemoryFree(newPath);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
taosMemoryFree(defaultPath);
|
||||
taosMemoryFree(newPath);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// todo refactor: the lock shoud be restricted in one function
|
||||
void streamMetaInitBackend(SStreamMeta* pMeta) {
|
||||
pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId, pMeta->vgId);
|
||||
|
@ -829,28 +793,27 @@ static void doClear(void* pKey, void* pVal, TBC* pCur, SArray* pRecycleList) {
|
|||
taosArrayDestroy(pRecycleList);
|
||||
}
|
||||
|
||||
int32_t streamMetaReloadAllTasks(SStreamMeta* pMeta) {
|
||||
if (pMeta == NULL) return 0;
|
||||
|
||||
return streamMetaLoadAllTasks(pMeta);
|
||||
}
|
||||
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
||||
TBC* pCur = NULL;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
|
||||
stInfo("vgId:%d load stream tasks from meta files", vgId);
|
||||
|
||||
if (tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL) < 0) {
|
||||
stError("vgId:%d failed to open stream meta, code:%s", vgId, tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
void* pKey = NULL;
|
||||
int32_t kLen = 0;
|
||||
void* pVal = NULL;
|
||||
int32_t vLen = 0;
|
||||
SDecoder decoder;
|
||||
SArray* pRecycleList = taosArrayInit(4, sizeof(STaskId));
|
||||
|
||||
if (pMeta == NULL) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SArray* pRecycleList = taosArrayInit(4, sizeof(STaskId));
|
||||
int32_t vgId = pMeta->vgId;
|
||||
stInfo("vgId:%d load stream tasks from meta files", vgId);
|
||||
|
||||
if (tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL) < 0) {
|
||||
stError("vgId:%d failed to open stream meta, code:%s", vgId, tstrerror(terrno));
|
||||
taosArrayDestroy(pRecycleList);
|
||||
return -1;
|
||||
}
|
||||
|
||||
tdbTbcMoveToFirst(pCur);
|
||||
while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
|
||||
|
|
|
@ -46,6 +46,7 @@ SyncIndex raftLogBeginIndex(struct SSyncLogStore* pLogStore);
|
|||
SyncIndex raftLogEndIndex(struct SSyncLogStore* pLogStore);
|
||||
int32_t raftLogEntryCount(struct SSyncLogStore* pLogStore);
|
||||
SyncIndex raftLogLastIndex(struct SSyncLogStore* pLogStore);
|
||||
SyncIndex raftLogIndexRetention(struct SSyncLogStore* pLogStore, int64_t bytes);
|
||||
SyncTerm raftLogLastTerm(struct SSyncLogStore* pLogStore);
|
||||
int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, SSyncRaftEntry** ppEntry);
|
||||
|
||||
|
|
|
@ -63,7 +63,6 @@ typedef struct SSyncSnapshotSender {
|
|||
int64_t sendingMS;
|
||||
SyncTerm term;
|
||||
int64_t startTime;
|
||||
int64_t waitTime;
|
||||
int64_t lastSendTime;
|
||||
bool finish;
|
||||
|
||||
|
|
|
@ -305,6 +305,10 @@ SyncIndex syncMinMatchIndex(SSyncNode* pSyncNode) {
|
|||
return minMatchIndex;
|
||||
}
|
||||
|
||||
static SyncIndex syncLogRetentionIndex(SSyncNode* pSyncNode, int64_t bytes) {
|
||||
return pSyncNode->pLogStore->syncLogIndexRetention(pSyncNode->pLogStore, bytes);
|
||||
}
|
||||
|
||||
int32_t syncBeginSnapshot(int64_t rid, int64_t lastApplyIndex) {
|
||||
SSyncNode* pSyncNode = syncNodeAcquire(rid);
|
||||
if (pSyncNode == NULL) {
|
||||
|
@ -331,7 +335,6 @@ int32_t syncBeginSnapshot(int64_t rid, int64_t lastApplyIndex) {
|
|||
} else {
|
||||
// vnode
|
||||
if (pSyncNode->replicaNum > 1) {
|
||||
// multi replicas
|
||||
logRetention = SYNC_VNODE_LOG_RETENTION;
|
||||
}
|
||||
}
|
||||
|
@ -344,7 +347,9 @@ int32_t syncBeginSnapshot(int64_t rid, int64_t lastApplyIndex) {
|
|||
syncNodeRelease(pSyncNode);
|
||||
return 0;
|
||||
}
|
||||
logRetention = TMAX(logRetention, lastApplyIndex - pSyncNode->minMatchIndex + logRetention);
|
||||
SyncIndex retentionIndex =
|
||||
TMAX(pSyncNode->minMatchIndex, syncLogRetentionIndex(pSyncNode, SYNC_WAL_LOG_RETENTION_SIZE));
|
||||
logRetention += TMAX(0, lastApplyIndex - retentionIndex);
|
||||
}
|
||||
|
||||
_DEL_WAL:
|
||||
|
|
|
@ -70,6 +70,7 @@ SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) {
|
|||
pLogStore->syncLogIsEmpty = raftLogIsEmpty;
|
||||
pLogStore->syncLogEntryCount = raftLogEntryCount;
|
||||
pLogStore->syncLogLastIndex = raftLogLastIndex;
|
||||
pLogStore->syncLogIndexRetention = raftLogIndexRetention;
|
||||
pLogStore->syncLogLastTerm = raftLogLastTerm;
|
||||
pLogStore->syncLogAppendEntry = raftLogAppendEntry;
|
||||
pLogStore->syncLogGetEntry = raftLogGetEntry;
|
||||
|
@ -154,6 +155,15 @@ SyncIndex raftLogLastIndex(struct SSyncLogStore* pLogStore) {
|
|||
return lastVer;
|
||||
}
|
||||
|
||||
SyncIndex raftLogIndexRetention(struct SSyncLogStore* pLogStore, int64_t bytes) {
|
||||
SyncIndex lastIndex;
|
||||
SSyncLogStoreData* pData = pLogStore->data;
|
||||
SWal* pWal = pData->pWal;
|
||||
SyncIndex lastVer = walGetVerRetention(pWal, bytes);
|
||||
|
||||
return lastVer;
|
||||
}
|
||||
|
||||
SyncIndex raftLogWriteIndex(struct SSyncLogStore* pLogStore) {
|
||||
SSyncLogStoreData* pData = pLogStore->data;
|
||||
SWal* pWal = pData->pWal;
|
||||
|
|
|
@ -23,8 +23,9 @@
|
|||
#include "syncReplication.h"
|
||||
#include "syncUtil.h"
|
||||
|
||||
static SyncIndex syncNodeGetSnapBeginIndex(SSyncNode *ths);
|
||||
|
||||
static void syncSnapBufferReset(SSyncSnapBuffer *pBuf) {
|
||||
taosThreadMutexLock(&pBuf->mutex);
|
||||
for (int64_t i = pBuf->start; i < pBuf->end; ++i) {
|
||||
if (pBuf->entryDeleteCb) {
|
||||
pBuf->entryDeleteCb(pBuf->entries[i % pBuf->size]);
|
||||
|
@ -34,7 +35,6 @@ static void syncSnapBufferReset(SSyncSnapBuffer *pBuf) {
|
|||
pBuf->start = SYNC_SNAPSHOT_SEQ_BEGIN + 1;
|
||||
pBuf->end = pBuf->start;
|
||||
pBuf->cursor = pBuf->start - 1;
|
||||
taosThreadMutexUnlock(&pBuf->mutex);
|
||||
}
|
||||
|
||||
static void syncSnapBufferDestroy(SSyncSnapBuffer **ppBuf) {
|
||||
|
@ -81,7 +81,6 @@ SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaI
|
|||
pSender->replicaIndex = replicaIndex;
|
||||
pSender->term = raftStoreGetTerm(pSyncNode);
|
||||
pSender->startTime = -1;
|
||||
pSender->waitTime = -1;
|
||||
pSender->pSyncNode->pFsm->FpGetSnapshotInfo(pSender->pSyncNode->pFsm, &pSender->snapshot);
|
||||
pSender->finish = false;
|
||||
|
||||
|
@ -108,6 +107,19 @@ void syncSnapBlockDestroy(void *ptr) {
|
|||
taosMemoryFree(pBlk);
|
||||
}
|
||||
|
||||
static int32_t snapshotSenderClearInfoData(SSyncSnapshotSender *pSender) {
|
||||
if (pSender->snapshotParam.data) {
|
||||
taosMemoryFree(pSender->snapshotParam.data);
|
||||
pSender->snapshotParam.data = NULL;
|
||||
}
|
||||
|
||||
if (pSender->snapshot.data) {
|
||||
taosMemoryFree(pSender->snapshot.data);
|
||||
pSender->snapshot.data = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void snapshotSenderDestroy(SSyncSnapshotSender *pSender) {
|
||||
if (pSender == NULL) return;
|
||||
|
||||
|
@ -122,10 +134,8 @@ void snapshotSenderDestroy(SSyncSnapshotSender *pSender) {
|
|||
syncSnapBufferDestroy(&pSender->pSndBuf);
|
||||
}
|
||||
|
||||
if (pSender->snapshotParam.data) {
|
||||
taosMemoryFree(pSender->snapshotParam.data);
|
||||
pSender->snapshotParam.data = NULL;
|
||||
}
|
||||
snapshotSenderClearInfoData(pSender);
|
||||
|
||||
// free sender
|
||||
taosMemoryFree(pSender);
|
||||
}
|
||||
|
@ -198,20 +208,24 @@ void snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish) {
|
|||
// update flag
|
||||
int8_t stopped = !atomic_val_compare_exchange_8(&pSender->start, true, false);
|
||||
if (stopped) return;
|
||||
taosThreadMutexLock(&pSender->pSndBuf->mutex);
|
||||
{
|
||||
pSender->finish = finish;
|
||||
|
||||
pSender->finish = finish;
|
||||
pSender->waitTime = -1;
|
||||
// close reader
|
||||
if (pSender->pReader != NULL) {
|
||||
pSender->pSyncNode->pFsm->FpSnapshotStopRead(pSender->pSyncNode->pFsm, pSender->pReader);
|
||||
pSender->pReader = NULL;
|
||||
}
|
||||
|
||||
// close reader
|
||||
if (pSender->pReader != NULL) {
|
||||
pSender->pSyncNode->pFsm->FpSnapshotStopRead(pSender->pSyncNode->pFsm, pSender->pReader);
|
||||
pSender->pReader = NULL;
|
||||
syncSnapBufferReset(pSender->pSndBuf);
|
||||
|
||||
snapshotSenderClearInfoData(pSender);
|
||||
|
||||
SRaftId destId = pSender->pSyncNode->replicasId[pSender->replicaIndex];
|
||||
sSInfo(pSender, "snapshot sender stop, to dnode:%d, finish:%d", DID(&destId), finish);
|
||||
}
|
||||
|
||||
syncSnapBufferReset(pSender->pSndBuf);
|
||||
|
||||
SRaftId destId = pSender->pSyncNode->replicasId[pSender->replicaIndex];
|
||||
sSInfo(pSender, "snapshot sender stop, to dnode:%d, finish:%d", DID(&destId), finish);
|
||||
taosThreadMutexUnlock(&pSender->pSndBuf->mutex);
|
||||
}
|
||||
|
||||
int32_t syncSnapSendMsg(SSyncSnapshotSender *pSender, int32_t seq, void *pBlock, int32_t blockLen, int32_t typ) {
|
||||
|
@ -324,6 +338,9 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) {
|
|||
SSyncSnapBuffer *pSndBuf = pSender->pSndBuf;
|
||||
int32_t code = -1;
|
||||
taosThreadMutexLock(&pSndBuf->mutex);
|
||||
if (pSender->pReader == NULL || pSender->finish || !snapshotSenderIsStart(pSender)) {
|
||||
goto _out;
|
||||
}
|
||||
|
||||
for (int32_t seq = pSndBuf->cursor + 1; seq < pSndBuf->end; ++seq) {
|
||||
SyncSnapBlock *pBlk = pSndBuf->entries[seq % pSndBuf->size];
|
||||
|
@ -338,6 +355,12 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) {
|
|||
pBlk->sendTimeMs = nowMs;
|
||||
}
|
||||
|
||||
if (pSender->seq != SYNC_SNAPSHOT_SEQ_END && pSndBuf->end <= pSndBuf->start) {
|
||||
if (snapshotSend(pSender) != 0) {
|
||||
goto _out;
|
||||
}
|
||||
}
|
||||
|
||||
if (pSender->seq == SYNC_SNAPSHOT_SEQ_END && pSndBuf->end <= pSndBuf->start) {
|
||||
if (syncSnapSendMsg(pSender, pSender->seq, NULL, 0, 0) != 0) {
|
||||
goto _out;
|
||||
|
@ -361,14 +384,7 @@ int32_t syncNodeStartSnapshot(SSyncNode *pSyncNode, SRaftId *pDestId) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int64_t timeNow = taosGetTimestampMs();
|
||||
if (pSender->waitTime <= 0) {
|
||||
pSender->waitTime = timeNow + SNAPSHOT_WAIT_MS;
|
||||
}
|
||||
if (timeNow < pSender->waitTime) {
|
||||
sSDebug(pSender, "snapshot sender waitTime not expired yet, ignore");
|
||||
return 0;
|
||||
}
|
||||
taosMsleep(1);
|
||||
|
||||
int32_t code = snapshotSenderStart(pSender);
|
||||
if (code != 0) {
|
||||
|
@ -416,6 +432,19 @@ SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode, SRaftId from
|
|||
return pReceiver;
|
||||
}
|
||||
|
||||
static int32_t snapshotReceiverClearInfoData(SSyncSnapshotReceiver *pReceiver) {
|
||||
if (pReceiver->snapshotParam.data) {
|
||||
taosMemoryFree(pReceiver->snapshotParam.data);
|
||||
pReceiver->snapshotParam.data = NULL;
|
||||
}
|
||||
|
||||
if (pReceiver->snapshot.data) {
|
||||
taosMemoryFree(pReceiver->snapshot.data);
|
||||
pReceiver->snapshot.data = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) {
|
||||
if (pReceiver == NULL) return;
|
||||
|
||||
|
@ -429,22 +458,13 @@ void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) {
|
|||
pReceiver->pWriter = NULL;
|
||||
}
|
||||
|
||||
// free data of snapshot info
|
||||
if (pReceiver->snapshotParam.data) {
|
||||
taosMemoryFree(pReceiver->snapshotParam.data);
|
||||
pReceiver->snapshotParam.data = NULL;
|
||||
}
|
||||
|
||||
if (pReceiver->snapshot.data) {
|
||||
taosMemoryFree(pReceiver->snapshot.data);
|
||||
pReceiver->snapshot.data = NULL;
|
||||
}
|
||||
|
||||
// free snap buf
|
||||
if (pReceiver->pRcvBuf) {
|
||||
syncSnapBufferDestroy(&pReceiver->pRcvBuf);
|
||||
}
|
||||
|
||||
snapshotReceiverClearInfoData(pReceiver);
|
||||
|
||||
// free receiver
|
||||
taosMemoryFree(pReceiver);
|
||||
}
|
||||
|
@ -504,7 +524,9 @@ void snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *p
|
|||
pReceiver->term = pPreMsg->term;
|
||||
pReceiver->fromId = pPreMsg->srcId;
|
||||
pReceiver->startTime = pPreMsg->startTime;
|
||||
ASSERT(pReceiver->startTime);
|
||||
|
||||
pReceiver->snapshotParam.start = syncNodeGetSnapBeginIndex(pReceiver->pSyncNode);
|
||||
pReceiver->snapshotParam.end = -1;
|
||||
|
||||
sRInfo(pReceiver, "snapshot receiver start, from dnode:%d.", DID(&pReceiver->fromId));
|
||||
}
|
||||
|
@ -514,19 +536,24 @@ void snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) {
|
|||
|
||||
int8_t stopped = !atomic_val_compare_exchange_8(&pReceiver->start, true, false);
|
||||
if (stopped) return;
|
||||
|
||||
if (pReceiver->pWriter != NULL) {
|
||||
int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, false,
|
||||
&pReceiver->snapshot);
|
||||
if (ret != 0) {
|
||||
sRError(pReceiver, "snapshot receiver stop write failed since %s", terrstr());
|
||||
taosThreadMutexLock(&pReceiver->pRcvBuf->mutex);
|
||||
{
|
||||
if (pReceiver->pWriter != NULL) {
|
||||
int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter,
|
||||
false, &pReceiver->snapshot);
|
||||
if (ret != 0) {
|
||||
sRError(pReceiver, "snapshot receiver stop write failed since %s", terrstr());
|
||||
}
|
||||
pReceiver->pWriter = NULL;
|
||||
} else {
|
||||
sRInfo(pReceiver, "snapshot receiver stop, writer is null");
|
||||
}
|
||||
pReceiver->pWriter = NULL;
|
||||
} else {
|
||||
sRInfo(pReceiver, "snapshot receiver stop, writer is null");
|
||||
}
|
||||
|
||||
syncSnapBufferReset(pReceiver->pRcvBuf);
|
||||
syncSnapBufferReset(pReceiver->pRcvBuf);
|
||||
|
||||
snapshotReceiverClearInfoData(pReceiver);
|
||||
}
|
||||
taosThreadMutexUnlock(&pReceiver->pRcvBuf->mutex);
|
||||
}
|
||||
|
||||
static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg) {
|
||||
|
@ -656,7 +683,10 @@ static int32_t syncSnapReceiverExchgSnapInfo(SSyncNode *pSyncNode, SSyncSnapshot
|
|||
memcpy(pInfo->data, pMsg->data, pMsg->dataLen);
|
||||
|
||||
// exchange snap info
|
||||
pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, pInfo);
|
||||
if (pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, pInfo) != 0) {
|
||||
sRError(pReceiver, "failed to get snapshot info. type: %d", pMsg->payloadType);
|
||||
goto _out;
|
||||
}
|
||||
SSyncTLV *datHead = pInfo->data;
|
||||
if (datHead->typ != TDMT_SYNC_PREP_SNAPSHOT_REPLY) {
|
||||
sRError(pReceiver, "unexpected data typ in data of snapshot info. typ: %d", datHead->typ);
|
||||
|
|
|
@ -218,6 +218,7 @@ TEST_F(TfsTest, 04_File) {
|
|||
EXPECT_STREQ(outfile.aname, file0.aname);
|
||||
EXPECT_STREQ(outfile.rname, "fname");
|
||||
EXPECT_EQ(outfile.pTfs, pTfs);
|
||||
taosMemoryFree(ret);
|
||||
}
|
||||
|
||||
{
|
||||
|
|
|
@ -106,4 +106,3 @@ add_test(
|
|||
NAME transUtilUt
|
||||
COMMAND transportTest
|
||||
)
|
||||
|
||||
|
|
|
@ -654,6 +654,23 @@ _err:
|
|||
return -1;
|
||||
}
|
||||
|
||||
int64_t walGetVerRetention(SWal* pWal, int64_t bytes) {
|
||||
int64_t ver = -1;
|
||||
int64_t totSize = 0;
|
||||
taosThreadMutexLock(&pWal->mutex);
|
||||
int32_t fileIdx = taosArrayGetSize(pWal->fileInfoSet);
|
||||
while (--fileIdx) {
|
||||
SWalFileInfo* pInfo = taosArrayGet(pWal->fileInfoSet, fileIdx);
|
||||
if (totSize >= bytes) {
|
||||
ver = pInfo->lastVer;
|
||||
break;
|
||||
}
|
||||
totSize += pInfo->fileSize;
|
||||
}
|
||||
taosThreadMutexUnlock(&pWal->mutex);
|
||||
return ver + 1;
|
||||
}
|
||||
|
||||
int walCheckAndRepairIdx(SWal* pWal) {
|
||||
int32_t sz = taosArrayGetSize(pWal->fileInfoSet);
|
||||
int32_t fileIdx = sz;
|
||||
|
|
|
@ -441,4 +441,5 @@ TEST_F(WalRetentionEnv, repairMeta1) {
|
|||
EXPECT_EQ(newStr[j], pRead->pHead->head.body[j]);
|
||||
}
|
||||
}
|
||||
walCloseReader(pRead);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,14 @@
|
|||
extern int wcwidth(wchar_t c);
|
||||
extern int wcswidth(const wchar_t *s, size_t n);
|
||||
|
||||
char *tstrdup(const char *str) {
|
||||
#ifdef WINDOWS
|
||||
return _strdup(str);
|
||||
#else
|
||||
return strdup(str);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef WINDOWS
|
||||
char *strsep(char **stringp, const char *delim) {
|
||||
char *s;
|
||||
|
|
|
@ -111,11 +111,6 @@ void fileOperateOnBusy(void *param) {
|
|||
|
||||
ret = taosUnLockFile(pFile);
|
||||
printf("On busy thread unlock file ret:%d\n", ret);
|
||||
#ifdef _TD_DARWIN_64
|
||||
ASSERT_EQ(ret, 0);
|
||||
#else
|
||||
ASSERT_NE(ret, 0);
|
||||
#endif
|
||||
|
||||
ret = taosCloseFile(&pFile);
|
||||
printf("On busy thread close file ret:%d\n", ret);
|
||||
|
@ -134,6 +129,8 @@ TEST(osTest, osFile) {
|
|||
printf("create file success\n");
|
||||
taosCloseFile(&pOutFD);
|
||||
|
||||
taosCloseFile(&pOutFD);
|
||||
|
||||
TdFilePtr pFile = taosOpenFile(fname, TD_FILE_CREATE | TD_FILE_WRITE);
|
||||
printf("open file\n");
|
||||
ASSERT_NE(pFile, nullptr);
|
||||
|
|
|
@ -430,6 +430,7 @@ int32_t taosArraySearchIdx(const SArray* pArray, const void* key, __compar_fn_t
|
|||
return item == NULL ? -1 : (int32_t)((char*)item - (char*)pArray->pData) / pArray->elemSize;
|
||||
}
|
||||
|
||||
#ifdef BUILD_NO_CALL
|
||||
static int32_t taosArrayPartition(SArray* pArray, int32_t i, int32_t j, __ext_compar_fn_t fn, const void* userData) {
|
||||
void* key = taosArrayGetP(pArray, i);
|
||||
while (i < j) {
|
||||
|
@ -485,6 +486,7 @@ static void taosArrayInsertSort(SArray* pArray, __ext_compar_fn_t fn, const void
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t taosEncodeArray(void** buf, const SArray* pArray, FEncode encode) {
|
||||
int32_t tlen = 0;
|
||||
|
|
|
@ -1097,7 +1097,8 @@ int32_t patternMatch(const char *pattern, size_t psize, const char *str, size_t
|
|||
c1 = str[j++];
|
||||
++nMatchChar;
|
||||
|
||||
if (c == '\\' && pattern[i] == '_' && c1 == '_') {
|
||||
if (c == '\\' && pattern[i] == c1 &&
|
||||
(c1 == '_' || c1 == '%')) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -33,6 +33,14 @@ ENDIF()
|
|||
|
||||
INCLUDE_DIRECTORIES(${TD_SOURCE_DIR}/src/util/inc)
|
||||
|
||||
# arrayTest
|
||||
add_executable(arrayTest "arrayTest.cpp")
|
||||
target_link_libraries(arrayTest os util gtest_main)
|
||||
add_test(
|
||||
NAME arrayTest
|
||||
COMMAND arrayTest
|
||||
)
|
||||
|
||||
# # freelistTest
|
||||
# add_executable(freelistTest "")
|
||||
# target_sources(freelistTest
|
||||
|
|
|
@ -82,3 +82,138 @@ TEST(arrayTest, array_search_test) {
|
|||
|
||||
taosArrayDestroy(pa);
|
||||
}
|
||||
|
||||
// call taosArrayResize
|
||||
TEST(arrayTest, array_data_correct) {
|
||||
SArray* pa = (SArray*)taosArrayInit(1, sizeof(int32_t));
|
||||
SArray* pa1 = (SArray*)taosArrayInit(1, sizeof(int32_t));
|
||||
size_t cnt = 1000;
|
||||
|
||||
|
||||
for (int32_t i = 0; i < cnt; ++i) {
|
||||
taosArrayPush(pa, &i);
|
||||
int32_t v = cnt + i;
|
||||
taosArrayPush(pa1, &v);
|
||||
}
|
||||
ASSERT_EQ(taosArrayGetSize(pa), cnt);
|
||||
ASSERT_EQ(taosArrayAddBatch(pa, NULL, 0), nullptr);
|
||||
ASSERT_NE(taosArrayAddBatch(pa, taosArrayGet(pa1, 0), cnt), nullptr);
|
||||
|
||||
int32_t* pv = NULL;
|
||||
for (int32_t i = 0; i < cnt*2; i++) {
|
||||
pv = (int32_t*)taosArrayGet(pa, i);
|
||||
ASSERT_EQ(*pv, i);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pa);
|
||||
taosArrayDestroy(pa1);
|
||||
}
|
||||
|
||||
// free
|
||||
static void arrayFree(void *param) {
|
||||
void *pItem = *(void **)param;
|
||||
if (pItem != NULL) {
|
||||
taosMemoryFree(pItem);
|
||||
}
|
||||
}
|
||||
|
||||
// string compare
|
||||
static int32_t strCompare(const void *a, const void *b) {
|
||||
const char *x = *(const char **)a;
|
||||
const char *y = *(const char **)b;
|
||||
|
||||
return strcmp(x, y);
|
||||
}
|
||||
|
||||
// int32 compare
|
||||
static int int32Compare(const void* a, const void* b) {
|
||||
int32_t l = *(int32_t*)a;
|
||||
int32_t r = *(int32_t*)b;
|
||||
return l - r;
|
||||
}
|
||||
|
||||
// no need free
|
||||
TEST(arrayTest, check_duplicate_nofree) {
|
||||
// no need free item
|
||||
int32_t count = 5;
|
||||
SArray* pa = taosArrayInit(1, sizeof(int32_t));
|
||||
for (int32_t i = 1; i <= count; i++) {
|
||||
for (int32_t j = 0; j < i; j++) {
|
||||
taosArrayPush(pa, &i);
|
||||
//printf(" nofree put i=%d v=%d\n",i, i);
|
||||
}
|
||||
}
|
||||
|
||||
taosArrayRemoveDuplicate(pa, int32Compare, NULL);
|
||||
printf("nofree taosArrayRemoveDuplicate size=%d\n", (int32_t)taosArrayGetSize(pa));
|
||||
ASSERT_EQ(taosArrayGetSize(pa), count);
|
||||
for (int32_t i = 1; i <= count; i++) {
|
||||
int32_t v = *(int32_t*)taosArrayGet(pa, i-1);
|
||||
//printf(" nofree get i=%d v=%d\n",i, v);
|
||||
ASSERT_EQ(v, i);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pa);
|
||||
}
|
||||
|
||||
// need free
|
||||
TEST(arrayTest, check_duplicate_needfree) {
|
||||
// no need free item
|
||||
int32_t count = 5;
|
||||
const char* format="hello-word-%d";
|
||||
SArray *pa = taosArrayInit(1, sizeof(char *));
|
||||
for (int32_t i = 1; i <= count; i++) {
|
||||
for (int32_t j = 0; j < i; j++) {
|
||||
char *v = (char *)taosMemoryCalloc(100, sizeof(char));
|
||||
sprintf(v, format, i);
|
||||
//printf(" needfree put i=%d v=%s\n", i, v);
|
||||
taosArrayPush(pa, &v);
|
||||
}
|
||||
}
|
||||
|
||||
taosArrayRemoveDuplicate(pa, strCompare, arrayFree);
|
||||
printf("needfree taosArrayRemoveDuplicate size=%d\n", (int32_t)taosArrayGetSize(pa));
|
||||
ASSERT_EQ(taosArrayGetSize(pa), count);
|
||||
char value[100];
|
||||
for (int32_t i = 1; i <= count; i++) {
|
||||
sprintf(value, format, i);
|
||||
char * v = (char *)taosArrayGetP(pa, i - 1);
|
||||
//printf(" needfree get i=%d v=%s\n", i, v);
|
||||
ASSERT_STREQ(v, value);
|
||||
}
|
||||
|
||||
taosArrayClearP(pa, taosMemoryFree);
|
||||
taosArrayDestroyP(pa, taosMemoryFree);
|
||||
}
|
||||
|
||||
// over all
|
||||
TEST(arrayTest, check_overall) {
|
||||
|
||||
ASSERT_EQ(taosArrayInit(1, 0), nullptr);
|
||||
ASSERT_EQ(taosArrayGet(NULL, 1), nullptr);
|
||||
ASSERT_EQ(taosArrayGetP(NULL, 1), nullptr);
|
||||
ASSERT_EQ(taosArrayInsert(NULL, 1, NULL), nullptr);
|
||||
|
||||
//ASSERT_EQ(taosArrayInit(0x10000000, 10000), nullptr);
|
||||
//ASSERT_EQ(taosArrayInit_s(10000,0x10000000-1), nullptr);
|
||||
|
||||
SArray* pa = taosArrayInit(1, sizeof(uint64_t));
|
||||
ASSERT_EQ(taosArrayGet(pa, 10), nullptr);
|
||||
ASSERT_EQ(taosArrayGetLast(pa), nullptr);
|
||||
|
||||
uint64_t v = 100;
|
||||
taosArrayPush(pa, &v);
|
||||
taosArrayPopFrontBatch(pa, 100);
|
||||
FDelete fnNull = NULL;
|
||||
taosArrayClearEx(pa, fnNull);
|
||||
taosArrayDestroyEx(pa, fnNull);
|
||||
|
||||
int32_t count = 5;
|
||||
uint64_t list[5]= {1,2,3,4,5};
|
||||
|
||||
SArray* pb = taosArrayFromList(list, count, sizeof(uint64_t));
|
||||
for (int32_t i=0; i < count; i++) {
|
||||
ASSERT_EQ(*(uint64_t*)taosArrayGet(pb, i), list[i]);
|
||||
}
|
||||
taosArrayDestroy(pb);
|
||||
}
|
|
@ -217,6 +217,7 @@ void testFlushAndReadBackBuffer() {
|
|||
pPg = (SFilePage*)getBufPage(pBuf, pageId);
|
||||
ASSERT_TRUE(checkBufVarData(pPg, rowData + 3, len));
|
||||
destroyDiskbasedBuf(pBuf);
|
||||
taosMemoryFree(rowData);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
|
@ -204,6 +204,11 @@ TEST(utilTest, char_pattern_match_test) {
|
|||
const char* str12 = NULL;
|
||||
ret = patternMatch(pattern12, 4, str12, 0, &pInfo);
|
||||
ASSERT_EQ(ret, TSDB_PATTERN_NOMATCH);
|
||||
|
||||
const char* pattern13 = "a\\%c";
|
||||
const char* str13 = "a%c";
|
||||
ret = patternMatch(pattern13, 5, str13, strlen(str13), &pInfo);
|
||||
ASSERT_EQ(ret, TSDB_PATTERN_MATCH);
|
||||
}
|
||||
|
||||
TEST(utilTest, char_pattern_match_no_terminated) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#ADD_SUBDIRECTORY(examples/c)
|
||||
ADD_SUBDIRECTORY(tsim)
|
||||
ADD_SUBDIRECTORY(test/c)
|
||||
#ADD_SUBDIRECTORY(comparisonTest/tdengine)
|
||||
|
||||
if(${BUILD_TEST})
|
||||
add_subdirectory(taosc_test)
|
||||
endif(${BUILD_TEST})
|
|
@ -0,0 +1,40 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
import taos
|
||||
from frame.log import *
|
||||
from frame.cases import *
|
||||
from frame.sql import *
|
||||
|
||||
class TDTestCase:
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
|
||||
# run
|
||||
def run(self):
|
||||
# check two db query result same
|
||||
tdLog.info(f"hello world.")
|
||||
|
||||
# stop
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,40 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
import taos
|
||||
from frame.log import *
|
||||
from frame.cases import *
|
||||
from frame.sql import *
|
||||
|
||||
class TDTestCase:
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
|
||||
# run
|
||||
def run(self):
|
||||
# check two db query result same
|
||||
tdLog.info(f"hello world.")
|
||||
|
||||
# stop
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,163 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
from frame.log import *
|
||||
from frame.cases import *
|
||||
from frame.sql import *
|
||||
import threading
|
||||
import random
|
||||
import string
|
||||
import time
|
||||
|
||||
|
||||
#
|
||||
# Auto Gen class
|
||||
#
|
||||
class AutoGen:
|
||||
def __init__(self):
|
||||
self.ts = 1600000000000
|
||||
self.batch_size = 100
|
||||
seed = time.time() % 10000
|
||||
random.seed(seed)
|
||||
|
||||
# set start ts
|
||||
def set_start_ts(self, ts):
|
||||
self.ts = ts
|
||||
|
||||
# set batch size
|
||||
def set_batch_size(self, batch_size):
|
||||
self.batch_size = batch_size
|
||||
|
||||
# _columns_sql
|
||||
def gen_columns_sql(self, pre, cnt, binary_len, nchar_len):
|
||||
types = [
|
||||
'timestamp',
|
||||
'tinyint',
|
||||
'smallint',
|
||||
'tinyint unsigned',
|
||||
'smallint unsigned',
|
||||
'int',
|
||||
'bigint',
|
||||
'int unsigned',
|
||||
'bigint unsigned',
|
||||
'float',
|
||||
'double',
|
||||
'bool',
|
||||
f'varchar({binary_len})',
|
||||
f'nchar({nchar_len})'
|
||||
]
|
||||
|
||||
sqls = ""
|
||||
metas = []
|
||||
for i in range(cnt):
|
||||
colname = f"{pre}{i}"
|
||||
sel = i % len(types)
|
||||
coltype = types[sel]
|
||||
sql = f"{colname} {coltype}"
|
||||
if sqls != "":
|
||||
sqls += ","
|
||||
sqls += sql
|
||||
metas.append(sel)
|
||||
|
||||
return metas, sqls;
|
||||
|
||||
# gen tags data
|
||||
def gen_data(self, i, marr):
|
||||
datas = ""
|
||||
for c in marr:
|
||||
data = ""
|
||||
if c == 0 : # timestamp
|
||||
data = "%d" % (self.ts + i)
|
||||
elif c <= 4 : # small
|
||||
data = "%d"%(i%128)
|
||||
elif c <= 8 : # int
|
||||
data = f"{i}"
|
||||
elif c <= 10 : # float
|
||||
data = "%f"%(i+i/1000)
|
||||
elif c <= 11 : # bool
|
||||
data = "%d"%(i%2)
|
||||
elif c == 12 : # binary
|
||||
data = '"' + self.random_string(self.bin_len) + '"'
|
||||
elif c == 13 : # binary
|
||||
data = '"' + self.random_string(self.nch_len) + '"'
|
||||
|
||||
if datas != "":
|
||||
datas += ","
|
||||
datas += data
|
||||
|
||||
return datas
|
||||
|
||||
# generate specail wide random string
|
||||
def random_string(self, count):
|
||||
letters = string.ascii_letters
|
||||
return ''.join(random.choice(letters) for i in range(count))
|
||||
|
||||
# create db
|
||||
def create_db(self, dbname, vgroups = 2, replica = 1):
|
||||
self.dbname = dbname
|
||||
tdSql.execute(f'create database {dbname} vgroups {vgroups} replica {replica}')
|
||||
tdSql.execute(f'use {dbname}')
|
||||
|
||||
# create table or stable
|
||||
def create_stable(self, stbname, tag_cnt, column_cnt, binary_len, nchar_len):
|
||||
self.bin_len = binary_len
|
||||
self.nch_len = nchar_len
|
||||
self.stbname = stbname
|
||||
self.mtags, tags = self.gen_columns_sql("t", tag_cnt, binary_len, nchar_len)
|
||||
self.mcols, cols = self.gen_columns_sql("c", column_cnt - 1, binary_len, nchar_len)
|
||||
|
||||
sql = f"create table {stbname} (ts timestamp, {cols}) tags({tags})"
|
||||
tdSql.execute(sql)
|
||||
|
||||
# create child table
|
||||
def create_child(self, stbname, prename, cnt):
|
||||
self.child_cnt = cnt
|
||||
self.child_name = prename
|
||||
for i in range(cnt):
|
||||
tags_data = self.gen_data(i, self.mtags)
|
||||
sql = f"create table {prename}{i} using {stbname} tags({tags_data})"
|
||||
tdSql.execute(sql)
|
||||
|
||||
tdLog.info(f"create child tables {cnt} ok")
|
||||
|
||||
def insert_data_child(self, child_name, cnt, batch_size, step):
|
||||
values = ""
|
||||
print("insert child data")
|
||||
ts = self.ts
|
||||
|
||||
# loop do
|
||||
for i in range(cnt):
|
||||
value = self.gen_data(i, self.mcols)
|
||||
ts += step
|
||||
values += f"({ts},{value}) "
|
||||
if batch_size == 1 or (i > 0 and i % batch_size == 0) :
|
||||
sql = f"insert into {child_name} values {values}"
|
||||
tdSql.execute(sql)
|
||||
values = ""
|
||||
|
||||
# end batch
|
||||
if values != "":
|
||||
sql = f"insert into {child_name} values {values}"
|
||||
tdSql.execute(sql)
|
||||
tdLog.info(f" insert data i={i}")
|
||||
values = ""
|
||||
|
||||
tdLog.info(f" insert child data {child_name} finished, insert rows={cnt}")
|
||||
|
||||
# insert data
|
||||
def insert_data(self, cnt):
|
||||
for i in range(self.child_cnt):
|
||||
name = f"{self.child_name}{i}"
|
||||
self.insert_data_child(name, cnt, self.batch_size, 1)
|
||||
|
||||
tdLog.info(f" insert data ok, child table={self.child_cnt} insert rows={cnt}")
|
||||
|
||||
# insert same timestamp to all childs
|
||||
def insert_samets(self, cnt):
|
||||
for i in range(self.child_cnt):
|
||||
name = f"{self.child_name}{i}"
|
||||
self.insert_data_child(name, cnt, self.batch_size, 0)
|
||||
|
||||
tdLog.info(f" insert same timestamp ok, child table={self.child_cnt} insert rows={cnt}")
|
||||
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
class DataBoundary:
|
||||
def __init__(self):
|
||||
self.TINYINT_BOUNDARY = [-128, 127]
|
||||
self.SMALLINT_BOUNDARY = [-32768, 32767]
|
||||
self.INT_BOUNDARY = [-2147483648, 2147483647]
|
||||
self.BIGINT_BOUNDARY = [-9223372036854775808, 9223372036854775807]
|
||||
self.UTINYINT_BOUNDARY = [0, 255]
|
||||
self.USMALLINT_BOUNDARY = [0, 65535]
|
||||
self.UINT_BOUNDARY = [0, 4294967295]
|
||||
self.UBIGINT_BOUNDARY = [0, 18446744073709551615]
|
||||
self.FLOAT_BOUNDARY = [-3.40E+38, 3.40E+38]
|
||||
self.DOUBLE_BOUNDARY = [-1.7e+308, 1.7e+308]
|
||||
self.BOOL_BOUNDARY = [True, False]
|
||||
self.BINARY_MAX_LENGTH = 16374
|
||||
self.NCHAR_MAX_LENGTH = 4093
|
||||
self.DBNAME_MAX_LENGTH = 64
|
||||
self.STBNAME_MAX_LENGTH = 192
|
||||
self.TBNAME_MAX_LENGTH = 192
|
||||
self.CHILD_TBNAME_MAX_LENGTH = 192
|
||||
self.TAG_KEY_MAX_LENGTH = 64
|
||||
self.COL_KEY_MAX_LENGTH = 64
|
||||
self.MAX_TAG_COUNT = 128
|
||||
self.MAX_TAG_COL_COUNT = 4096
|
||||
self.mnodeShmSize = [6292480, 2147483647]
|
||||
self.mnodeShmSize_default = 6292480
|
||||
self.vnodeShmSize = [6292480, 2147483647]
|
||||
self.vnodeShmSize_default = 31458304
|
||||
self.DB_PARAM_BUFFER_CONFIG = {"create_name": "buffer", "query_name": "buffer", "vnode_json_key": "szBuf", "boundary": [3, 16384], "default": 96}
|
||||
self.DB_PARAM_CACHELAST_CONFIG = {"create_name": "cachelast", "query_name": "cache_model", "vnode_json_key": "", "boundary": [0, 1, 2, 3], "default": 0}
|
||||
self.DB_PARAM_COMP_CONFIG = {"create_name": "comp", "query_name": "compression", "vnode_json_key": "", "boundary": [0, 1, 2], "default": 2}
|
||||
self.DB_PARAM_DURATION_CONFIG = {"create_name": "duration", "query_name": "duration", "vnode_json_key": "daysPerFile", "boundary": [1, 3650, '60m', '5256000m', '1h', '87600h', '1d', '3650d'], "default": "14400m"}
|
||||
self.DB_PARAM_FSYNC_CONFIG = {"create_name": "fsync", "query_name": "fsync", "vnode_json_key": "", "boundary": [0, 180000], "default": 3000}
|
||||
self.DB_PARAM_KEEP_CONFIG = {"create_name": "keep", "query_name": "fsync", "vnode_json_key": "", "boundary": [1, 365000,'1440m','525600000m','24h','8760000h','1d','365000d'], "default": "5256000m,5256000m,5256000m"}
|
||||
self.DB_PARAM_MAXROWS_CONFIG = {"create_name": "maxrows", "query_name": "maxrows", "vnode_json_key": "maxRows", "boundary": [200, 10000], "default": 4096}
|
||||
self.DB_PARAM_MINROWS_CONFIG = {"create_name": "minrows", "query_name": "minrows", "vnode_json_key": "minRows", "boundary": [10, 1000], "default": 100}
|
||||
self.DB_PARAM_NTABLES_CONFIG = {"create_name": "ntables", "query_name": "ntables", "vnode_json_key": "", "boundary": 0, "default": 0}
|
||||
self.DB_PARAM_PAGES_CONFIG = {"create_name": "pages", "query_name": "pages", "vnode_json_key": "szCache", "boundary": [64], "default": 256}
|
||||
self.DB_PARAM_PAGESIZE_CONFIG = {"create_name": "pagesize", "query_name": "pagesize", "vnode_json_key": "szPage", "boundary": [1, 16384], "default": 4}
|
||||
self.DB_PARAM_PRECISION_CONFIG = {"create_name": "precision", "query_name": "precision", "vnode_json_key": "", "boundary": ['ms', 'us', 'ns'], "default": "ms"}
|
||||
self.DB_PARAM_REPLICA_CONFIG = {"create_name": "replica", "query_name": "replica", "vnode_json_key": "", "boundary": [1], "default": 1}
|
||||
self.DB_PARAM_SINGLE_STABLE_CONFIG = {"create_name": "single_stable", "query_name": "single_stable_model", "vnode_json_key": "", "boundary": [0, 1], "default": 0}
|
||||
self.DB_PARAM_STRICT_CONFIG = {"create_name": "strict", "query_name": "strict", "vnode_json_key": "", "boundary": {"off": 0, "strict": 1}, "default": "off"}
|
||||
self.DB_PARAM_VGROUPS_CONFIG = {"create_name": "vgroups", "query_name": "vgroups", "vnode_json_key": "", "boundary": [1, 32], "default": 2}
|
||||
self.DB_PARAM_WAL_CONFIG = {"create_name": "wal", "query_name": "wal", "vnode_json_key": "", "boundary": [1, 2], "default": 1}
|
|
@ -0,0 +1,150 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import datetime
|
||||
import inspect
|
||||
import importlib
|
||||
import traceback
|
||||
from frame.log import *
|
||||
|
||||
|
||||
class TDCase:
|
||||
def __init__(self, name, case):
|
||||
self.name = name
|
||||
self.case = case
|
||||
self._logSql = True
|
||||
|
||||
|
||||
class TDCases:
|
||||
def __init__(self):
|
||||
self.linuxCases = []
|
||||
self.windowsCases = []
|
||||
self.clusterCases = []
|
||||
|
||||
def __dynamicLoadModule(self, fileName):
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
return importlib.import_module(moduleName, package='..')
|
||||
|
||||
def logSql(self, logSql):
|
||||
self._logSql = logSql
|
||||
|
||||
def addWindows(self, name, case):
|
||||
self.windowsCases.append(TDCase(name, case))
|
||||
|
||||
def addLinux(self, name, case):
|
||||
self.linuxCases.append(TDCase(name, case))
|
||||
|
||||
def addCluster(self, name, case):
|
||||
self.clusterCases.append(TDCase(name, case))
|
||||
|
||||
def runAllLinux(self, conn):
|
||||
# TODO: load all Linux cases here
|
||||
runNum = 0
|
||||
for tmp in self.linuxCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
case = testModule.TDTestCase()
|
||||
case.init(conn)
|
||||
case.run()
|
||||
case.stop()
|
||||
runNum += 1
|
||||
continue
|
||||
|
||||
tdLog.info("total %d Linux test case(s) executed" % (runNum))
|
||||
|
||||
def runOneLinux(self, conn, fileName, replicaVar=1):
|
||||
testModule = self.__dynamicLoadModule(fileName)
|
||||
|
||||
runNum = 0
|
||||
for tmp in self.linuxCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
case = testModule.TDTestCase()
|
||||
case.init(conn, self._logSql, replicaVar)
|
||||
try:
|
||||
case.run()
|
||||
except Exception as e:
|
||||
tdLog.notice(repr(e))
|
||||
traceback.print_exc()
|
||||
tdLog.exit("%s failed" % (fileName))
|
||||
case.stop()
|
||||
runNum += 1
|
||||
continue
|
||||
|
||||
def runAllWindows(self, conn):
|
||||
# TODO: load all Windows cases here
|
||||
runNum = 0
|
||||
for tmp in self.windowsCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
case = testModule.TDTestCase()
|
||||
case.init(conn)
|
||||
case.run()
|
||||
case.stop()
|
||||
runNum += 1
|
||||
continue
|
||||
|
||||
tdLog.notice("total %d Windows test case(s) executed" % (runNum))
|
||||
|
||||
def runOneWindows(self, conn, fileName, replicaVar=1):
|
||||
testModule = self.__dynamicLoadModule(fileName)
|
||||
|
||||
runNum = 0
|
||||
for tmp in self.windowsCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
case = testModule.TDTestCase()
|
||||
case.init(conn, self._logSql,replicaVar)
|
||||
try:
|
||||
case.run()
|
||||
except Exception as e:
|
||||
tdLog.notice(repr(e))
|
||||
tdLog.exit("%s failed" % (fileName))
|
||||
case.stop()
|
||||
runNum += 1
|
||||
continue
|
||||
tdLog.notice("total %d Windows case(s) executed" % (runNum))
|
||||
|
||||
def runAllCluster(self):
|
||||
# TODO: load all cluster case module here
|
||||
|
||||
runNum = 0
|
||||
for tmp in self.clusterCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
tdLog.notice("run cases like %s" % (fileName))
|
||||
case = testModule.TDTestCase()
|
||||
case.init()
|
||||
case.run()
|
||||
case.stop()
|
||||
runNum += 1
|
||||
continue
|
||||
|
||||
tdLog.notice("total %d Cluster test case(s) executed" % (runNum))
|
||||
|
||||
def runOneCluster(self, fileName):
|
||||
testModule = self.__dynamicLoadModule(fileName)
|
||||
|
||||
runNum = 0
|
||||
for tmp in self.clusterCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
tdLog.notice("run cases like %s" % (fileName))
|
||||
case = testModule.TDTestCase()
|
||||
case.init()
|
||||
case.run()
|
||||
case.stop()
|
||||
runNum += 1
|
||||
continue
|
||||
|
||||
tdLog.notice("total %d Cluster test case(s) executed" % (runNum))
|
||||
|
||||
|
||||
tdCases = TDCases()
|
|
@ -0,0 +1,108 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import socket
|
||||
|
||||
from frame.log import *
|
||||
from frame.sql import *
|
||||
from frame.cases import *
|
||||
from frame.dnodes import *
|
||||
from frame.common import *
|
||||
|
||||
class ClusterDnodes(TDDnodes):
|
||||
"""rewrite TDDnodes and make MyDdnodes as TDDnodes child class"""
|
||||
def __init__(self ,dnodes_lists):
|
||||
|
||||
super(ClusterDnodes,self).__init__()
|
||||
self.dnodes = dnodes_lists # dnode must be TDDnode instance
|
||||
self.simDeployed = False
|
||||
self.testCluster = False
|
||||
self.valgrind = 0
|
||||
self.killValgrind = 1
|
||||
|
||||
|
||||
class ConfigureyCluster:
|
||||
"""This will create defined number of dnodes and create a cluster.
|
||||
at the same time, it will return TDDnodes list: dnodes, """
|
||||
hostname = socket.gethostname()
|
||||
|
||||
def __init__(self):
|
||||
self.dnodes = []
|
||||
self.dnodeNums = 5
|
||||
self.independent = True
|
||||
self.startPort = 6030
|
||||
self.portStep = 100
|
||||
self.mnodeNums = 0
|
||||
|
||||
def configure_cluster(self ,dnodeNums=5,mnodeNums=0,independentMnode=True,startPort=6030,portStep=100,hostname="%s"%hostname):
|
||||
self.startPort=int(startPort)
|
||||
self.portStep=int(portStep)
|
||||
self.hostname=hostname
|
||||
self.dnodeNums = int(dnodeNums)
|
||||
self.mnodeNums = int(mnodeNums)
|
||||
self.dnodes = []
|
||||
startPort_sec = int(startPort+portStep)
|
||||
for num in range(1, (self.dnodeNums+1)):
|
||||
dnode = TDDnode(num)
|
||||
dnode.addExtraCfg("firstEp", f"{hostname}:{self.startPort}")
|
||||
dnode.addExtraCfg("fqdn", f"{hostname}")
|
||||
dnode.addExtraCfg("serverPort", f"{self.startPort + (num-1)*self.portStep}")
|
||||
dnode.addExtraCfg("secondEp", f"{hostname}:{startPort_sec}")
|
||||
|
||||
# configure dnoe of independent mnodes
|
||||
if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == True :
|
||||
tdLog.info(f"set mnode:{num} supportVnodes 0")
|
||||
dnode.addExtraCfg("supportVnodes", 0)
|
||||
# print(dnode)
|
||||
self.dnodes.append(dnode)
|
||||
return self.dnodes
|
||||
|
||||
def create_dnode(self,conn,dnodeNum):
|
||||
tdSql.init(conn.cursor())
|
||||
dnodeNum=int(dnodeNum)
|
||||
for dnode in self.dnodes[1:dnodeNum]:
|
||||
# print(dnode.cfgDict)
|
||||
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||
tdSql.execute(" create dnode '%s';"%dnode_id)
|
||||
|
||||
|
||||
def create_mnode(self,conn,mnodeNums):
|
||||
tdSql.init(conn.cursor())
|
||||
mnodeNums=int(mnodeNums)
|
||||
for i in range(2,mnodeNums+1):
|
||||
tdLog.info("create mnode on dnode %d"%i)
|
||||
tdSql.execute(" create mnode on dnode %d;"%i)
|
||||
|
||||
|
||||
|
||||
def check_dnode(self,conn):
|
||||
tdSql.init(conn.cursor())
|
||||
count=0
|
||||
while count < 5:
|
||||
tdSql.query("select * from information_schema.ins_dnodes")
|
||||
# tdLog.debug(tdSql.queryResult)
|
||||
status=0
|
||||
for i in range(self.dnodeNums):
|
||||
if tdSql.queryResult[i][4] == "ready":
|
||||
status+=1
|
||||
# tdLog.debug(status)
|
||||
|
||||
if status == self.dnodeNums:
|
||||
tdLog.debug(" create cluster with %d dnode and check cluster dnode all ready within 5s! " %self.dnodeNums)
|
||||
break
|
||||
count+=1
|
||||
time.sleep(1)
|
||||
else:
|
||||
tdLog.exit("create cluster with %d dnode but check dnode not ready within 5s ! "%self.dnodeNums)
|
||||
|
||||
def checkConnectStatus(self,dnodeNo,hostname=hostname):
|
||||
dnodeNo = int(dnodeNo)
|
||||
tdLog.info("check dnode-%d connection"%(dnodeNo+1))
|
||||
hostname = socket.gethostname()
|
||||
port = 6030 + dnodeNo*100
|
||||
connectToDnode = tdCom.newcon(host=hostname,port=port)
|
||||
return connectToDnode
|
||||
|
||||
cluster = ConfigureyCluster()
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,197 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# basic type
|
||||
TAOS_DATA_TYPE = [
|
||||
"INT", "BIGINT", "SMALLINT", "TINYINT", "INT UNSIGNED", "BIGINT UNSIGNED", "SMALLINT UNSIGNED", "TINYINT UNSIGNED",
|
||||
"FLOAT", "DOUBLE",
|
||||
"BOOL",
|
||||
"BINARY", "NCHAR", "VARCHAR",
|
||||
"TIMESTAMP",
|
||||
# "MEDIUMBLOB", "BLOB", # add in 3.x
|
||||
# "DECIMAL", "NUMERIC", # add in 3.x
|
||||
"JSON", # only for tag
|
||||
]
|
||||
|
||||
TAOS_NUM_TYPE = [
|
||||
"INT", "BIGINT", "SMALLINT", "TINYINT", "INT UNSIGNED", "BIGINT UNSIGNED", "SMALLINT UNSIGNED", "TINYINT UNSIGNED", "FLOAT", "DOUBLE",
|
||||
# "DECIMAL", "NUMERIC", # add in 3.x
|
||||
]
|
||||
TAOS_CHAR_TYPE = [
|
||||
"BINARY", "NCHAR", "VARCHAR",
|
||||
]
|
||||
TAOS_BOOL_TYPE = ["BOOL",]
|
||||
TAOS_TS_TYPE = ["TIMESTAMP",]
|
||||
TAOS_BIN_TYPE = [
|
||||
"MEDIUMBLOB", "BLOB", # add in 3.x
|
||||
]
|
||||
|
||||
TAOS_TIME_INIT = ["b", "u", "a", "s", "m", "h", "d", "w", "n", "y"]
|
||||
TAOS_PRECISION = ["ms", "us", "ns"]
|
||||
PRECISION_DEFAULT = "ms"
|
||||
PRECISION = PRECISION_DEFAULT
|
||||
|
||||
TAOS_KEYWORDS = [
|
||||
"ABORT", "CREATE", "IGNORE", "NULL", "STAR",
|
||||
"ACCOUNT", "CTIME", "IMMEDIATE", "OF", "STATE",
|
||||
"ACCOUNTS", "DATABASE", "IMPORT", "OFFSET", "STATEMENT",
|
||||
"ADD", "DATABASES", "IN", "OR", "STATE_WINDOW",
|
||||
"AFTER", "DAYS", "INITIALLY", "ORDER", "STORAGE",
|
||||
"ALL", "DBS", "INSERT", "PARTITIONS", "STREAM",
|
||||
"ALTER", "DEFERRED", "INSTEAD", "PASS", "STREAMS",
|
||||
"AND", "DELIMITERS", "INT", "PLUS", "STRING",
|
||||
"AS", "DESC", "INTEGER", "PPS", "SYNCDB",
|
||||
"ASC", "DESCRIBE", "INTERVAL", "PRECISION", "TABLE",
|
||||
"ATTACH", "DETACH", "INTO", "PREV", "TABLES",
|
||||
"BEFORE", "DISTINCT", "IS", "PRIVILEGE", "TAG",
|
||||
"BEGIN", "DIVIDE", "ISNULL", "QTIME", "TAGS",
|
||||
"BETWEEN", "DNODE", "JOIN", "QUERIES", "TBNAME",
|
||||
"BIGINT", "DNODES", "KEEP", "QUERY", "TIMES",
|
||||
"BINARY", "DOT", "KEY", "QUORUM", "TIMESTAMP",
|
||||
"BITAND", "DOUBLE", "KILL", "RAISE", "TINYINT",
|
||||
"BITNOT", "DROP", "LE", "REM", "TOPIC",
|
||||
"BITOR", "EACH", "LIKE", "REPLACE", "TOPICS",
|
||||
"BLOCKS", "END", "LIMIT", "REPLICA", "TRIGGER",
|
||||
"BOOL", "EQ", "LINEAR", "RESET", "TSERIES",
|
||||
"BY", "EXISTS", "LOCAL", "RESTRICT", "UMINUS",
|
||||
"CACHE", "EXPLAIN", "LP", "ROW", "UNION",
|
||||
"CACHEMODEL", "FAIL", "LSHIFT", "RP", "UNSIGNED",
|
||||
"CASCADE", "FILE", "LT", "RSHIFT", "UPDATE",
|
||||
"CHANGE", "FILL", "MATCH", "SCORES", "UPLUS",
|
||||
"CLUSTER", "FLOAT", "MAXROWS", "SELECT", "USE",
|
||||
"COLON", "FOR", "MINROWS", "SEMI", "USER",
|
||||
"COLUMN", "FROM", "MINUS", "SESSION", "USERS",
|
||||
"COMMA", "FSYNC", "MNODES", "SET", "USING",
|
||||
"COMP", "GE", "MODIFY", "SHOW", "VALUES",
|
||||
"COMPACT", "GLOB", "MODULES", "SLASH", "VARIABLE",
|
||||
"CONCAT", "GRANTS", "NCHAR", "SLIDING", "VARIABLES",
|
||||
"CONFLICT", "GROUP", "NE", "SLIMIT", "VGROUPS",
|
||||
"CONNECTION", "GT", "NONE", "SMALLINT", "VIEW",
|
||||
"CONNECTIONS", "HAVING", "NOT", "SOFFSET", "VNODES",
|
||||
"CONNS", "ID", "NOTNULL", "STABLE", "WAL",
|
||||
"COPY", "IF", "NOW", "STABLES", "WHERE",
|
||||
]
|
||||
|
||||
NUM_FUNC = [
|
||||
"ABS", "ACOS", "ASIN", "ATAN", "CEIL", "COS", "FLOOR", "LOG", "POW", "ROUND", "SIN", "SQRT", "TAN",
|
||||
]
|
||||
|
||||
STR_FUNC = [
|
||||
"CHAR_LENGTH", "CONCAT", "CONCAT_WS", "LENGTH", "LOWER","LTRIM", "RTRIM", "SUBSTR", "UPPER",
|
||||
]
|
||||
|
||||
CONVER_FUNC = ["CASR", "TO_ISO8601", "TO_JSON", "TP_UNIXTIMESTAMP"]
|
||||
|
||||
SELECT_FUNC = [
|
||||
"APERCENTILE", "BOTTOM", "FIRST", "INTERP", "LAST", "MAX", "MIN", "PERCENTILE", "TAIL", "TOP", "UNIQUE",
|
||||
]
|
||||
|
||||
AGG_FUNC = [
|
||||
"AVG", "COUNT", "ELAPSED", "LEASTSQUARES", "MODE", "SPREAD", "STDDEV", "SUM", "HYPERLOGLOG", "HISTOGRAM",
|
||||
]
|
||||
|
||||
TS_FUNC = [
|
||||
"CSUM", "DERIVATIVE", "DIFF", "IRATE", "MAVG", "SAMPLE", "STATECOUNT", "STATEDURATION", "TWA"
|
||||
]
|
||||
|
||||
SYSINFO_FUNC = [
|
||||
"DATABASE", "CLIENT_VERSION", "SERVER_VERSION", "SERVER_STATUS", "CURRENT_USER", "USER"
|
||||
]
|
||||
|
||||
|
||||
|
||||
# basic data type boundary
|
||||
TINYINT_MAX = 127
|
||||
TINYINT_MIN = -128
|
||||
|
||||
TINYINT_UN_MAX = 255
|
||||
TINYINT_UN_MIN = 0
|
||||
|
||||
SMALLINT_MAX = 32767
|
||||
SMALLINT_MIN = -32768
|
||||
|
||||
SMALLINT_UN_MAX = 65535
|
||||
SMALLINT_UN_MIN = 0
|
||||
|
||||
INT_MAX = 2_147_483_647
|
||||
INT_MIN = -2_147_483_648
|
||||
|
||||
INT_UN_MAX = 4_294_967_295
|
||||
INT_UN_MIN = 0
|
||||
|
||||
BIGINT_MAX = 9_223_372_036_854_775_807
|
||||
BIGINT_MIN = -9_223_372_036_854_775_808
|
||||
|
||||
BIGINT_UN_MAX = 18_446_744_073_709_551_615
|
||||
BIGINT_UN_MIN = 0
|
||||
|
||||
FLOAT_MAX = 3.40E+38
|
||||
FLOAT_MIN = -3.40E+38
|
||||
|
||||
DOUBLE_MAX = 1.7E+308
|
||||
DOUBLE_MIN = -1.7E+308
|
||||
|
||||
# schema boundary
|
||||
BINARY_LENGTH_MAX = 16374
|
||||
NCAHR_LENGTH_MAX = 4093
|
||||
DBNAME_LENGTH_MAX = 64
|
||||
|
||||
STBNAME_LENGTH_MAX = 192
|
||||
STBNAME_LENGTH_MIN = 1
|
||||
|
||||
TBNAME_LENGTH_MAX = 192
|
||||
TBNAME_LENGTH_MIN = 1
|
||||
|
||||
CHILD_TBNAME_LENGTH_MAX = 192
|
||||
CHILD_TBNAME_LENGTH_MIN = 1
|
||||
|
||||
TAG_NAME_LENGTH_MAX = 64
|
||||
TAG_NAME_LENGTH_MIN = 1
|
||||
|
||||
COL_NAME_LENGTH_MAX = 64
|
||||
COL_NAME_LENGTH_MIN = 1
|
||||
|
||||
TAG_COUNT_MAX = 128
|
||||
TAG_COUNT_MIN = 1
|
||||
|
||||
COL_COUNT_MAX = 4096
|
||||
COL_COUNT_MIN = 2
|
||||
|
||||
TAG_COL_COUNT_MAX = 4096
|
||||
TAG_COL_COUNT_MIN = 3
|
||||
|
||||
MNODE_SHM_SIZE_MAX = 2_147_483_647
|
||||
MNODE_SHM_SIZE_MIN = 6_292_480
|
||||
MNODE_SHM_SIZE_DEFAULT = 6_292_480
|
||||
|
||||
VNODE_SHM_SIZE_MAX = 2_147_483_647
|
||||
VNODE_SHM_SIZE_MIN = 6_292_480
|
||||
VNODE_SHM_SIZE_DEFAULT = 31_458_304
|
||||
|
||||
# time_init
|
||||
TIME_MS = 1
|
||||
TIME_US = TIME_MS/1000
|
||||
TIME_NS = TIME_US/1000
|
||||
|
||||
TIME_S = 1000 * TIME_MS
|
||||
TIME_M = 60 * TIME_S
|
||||
TIME_H = 60 * TIME_M
|
||||
TIME_D = 24 * TIME_H
|
||||
TIME_W = 7 * TIME_D
|
||||
TIME_N = 30 * TIME_D
|
||||
TIME_Y = 365 * TIME_D
|
||||
|
||||
|
||||
# session parameters
|
||||
INTERVAL_MIN = 1 * TIME_MS if PRECISION == PRECISION_DEFAULT else 1 * TIME_US
|
||||
|
||||
|
||||
# streams and related agg-function
|
||||
SMA_INDEX_FUNCTIONS = ["MIN", "MAX"]
|
||||
ROLLUP_FUNCTIONS = ["AVG", "SUM", "MIN", "MAX", "LAST", "FIRST"]
|
||||
BLOCK_FUNCTIONS = ["SUM", "MIN", "MAX"]
|
||||
SMA_WATMARK_MAXDELAY_INIT = ['a', "s", "m"]
|
||||
WATERMARK_MAX = 900000
|
||||
WATERMARK_MIN = 0
|
||||
|
||||
MAX_DELAY_MAX = 900000
|
||||
MAX_DELAY_MIN = 1
|
|
@ -0,0 +1,502 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
from frame.log import *
|
||||
|
||||
|
||||
class TDSimClient:
|
||||
def __init__(self):
|
||||
self.testCluster = False
|
||||
|
||||
self.cfgDict = {
|
||||
"numOfLogLines": "100000000",
|
||||
"numOfThreadsPerCore": "2.0",
|
||||
"locale": "en_US.UTF-8",
|
||||
"charset": "UTF-8",
|
||||
"asyncLog": "0",
|
||||
"minTablesPerVnode": "4",
|
||||
"maxTablesPerVnode": "1000",
|
||||
"tableIncStepPerVnode": "10000",
|
||||
"maxVgroupsPerDb": "1000",
|
||||
"sdbDebugFlag": "143",
|
||||
"rpcDebugFlag": "135",
|
||||
"tmrDebugFlag": "131",
|
||||
"cDebugFlag": "135",
|
||||
"udebugFlag": "135",
|
||||
"jnidebugFlag": "135",
|
||||
"qdebugFlag": "135",
|
||||
"telemetryReporting": "0",
|
||||
}
|
||||
def init(self, path):
|
||||
self.__init__()
|
||||
self.path = path
|
||||
|
||||
def getLogDir(self):
|
||||
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||
return self.logDir
|
||||
|
||||
def getCfgDir(self):
|
||||
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||
return self.cfgDir
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def addExtraCfg(self, option, value):
|
||||
self.cfgDict.update({option: value})
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def deploy(self):
|
||||
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||
self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg")
|
||||
|
||||
cmd = "rm -rf " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "touch " + self.cfgPath
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
if self.testCluster:
|
||||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("logDir", self.logDir)
|
||||
|
||||
for key, value in self.cfgDict.items():
|
||||
self.cfg(key, value)
|
||||
|
||||
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
||||
|
||||
|
||||
class TDDnode:
|
||||
def __init__(self, index):
|
||||
self.index = index
|
||||
self.running = 0
|
||||
self.deployed = 0
|
||||
self.testCluster = False
|
||||
self.valgrind = 0
|
||||
|
||||
def init(self, path):
|
||||
self.path = path
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def getDataSize(self):
|
||||
totalSize = 0
|
||||
|
||||
if (self.deployed == 1):
|
||||
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
||||
for f in filenames:
|
||||
fp = os.path.join(dirpath, f)
|
||||
|
||||
if not os.path.islink(fp):
|
||||
totalSize = totalSize + os.path.getsize(fp)
|
||||
|
||||
return totalSize
|
||||
|
||||
def deploy(self):
|
||||
self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log")
|
||||
self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data")
|
||||
self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg")
|
||||
self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg")
|
||||
|
||||
cmd = "rm -rf " + self.dataDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.dataDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "touch " + self.cfgPath
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
if self.testCluster:
|
||||
self.startIP()
|
||||
|
||||
if self.testCluster:
|
||||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("internalIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("privateIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("dataDir", self.dataDir)
|
||||
self.cfg("logDir", self.logDir)
|
||||
self.cfg("numOfLogLines", "100000000")
|
||||
self.cfg("mnodeEqualVnodeNum", "0")
|
||||
self.cfg("walLevel", "2")
|
||||
self.cfg("fsync", "1000")
|
||||
self.cfg("statusInterval", "1")
|
||||
self.cfg("numOfMnodes", "3")
|
||||
self.cfg("numOfThreadsPerCore", "2.0")
|
||||
self.cfg("monitor", "0")
|
||||
self.cfg("maxVnodeConnections", "30000")
|
||||
self.cfg("maxMgmtConnections", "30000")
|
||||
self.cfg("maxMeterConnections", "30000")
|
||||
self.cfg("maxShellConns", "30000")
|
||||
self.cfg("locale", "en_US.UTF-8")
|
||||
self.cfg("charset", "UTF-8")
|
||||
self.cfg("asyncLog", "0")
|
||||
self.cfg("anyIp", "0")
|
||||
self.cfg("dDebugFlag", "135")
|
||||
self.cfg("mDebugFlag", "135")
|
||||
self.cfg("sdbDebugFlag", "135")
|
||||
self.cfg("rpcDebugFlag", "135")
|
||||
self.cfg("tmrDebugFlag", "131")
|
||||
self.cfg("cDebugFlag", "135")
|
||||
self.cfg("httpDebugFlag", "135")
|
||||
self.cfg("monitorDebugFlag", "135")
|
||||
self.cfg("udebugFlag", "135")
|
||||
self.cfg("jnidebugFlag", "135")
|
||||
self.cfg("qdebugFlag", "135")
|
||||
self.deployed = 1
|
||||
tdLog.debug(
|
||||
"dnode:%d is deployed and configured by %s" %
|
||||
(self.index, self.cfgPath))
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def start(self):
|
||||
buildPath = self.getBuildPath()
|
||||
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
|
||||
binPath = buildPath + "/build/bin/taosd"
|
||||
|
||||
if self.deployed == 0:
|
||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||
|
||||
if self.valgrind == 0:
|
||||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||
|
||||
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
|
||||
print(cmd)
|
||||
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||
|
||||
tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index))
|
||||
time.sleep(5)
|
||||
|
||||
def stop(self):
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
while(processID):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
for port in range(6030, 6041):
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
|
||||
def forcestop(self):
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
for port in range(6030, 6041):
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
|
||||
|
||||
def startIP(self):
|
||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def stopIP(self):
|
||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
|
||||
self.index, self.index)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def getDnodeRootDir(self, index):
|
||||
dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index)
|
||||
return dnodeRootDir
|
||||
|
||||
def getDnodesRootDir(self):
|
||||
dnodesRootDir = os.path.join(self.path,"sim","psim")
|
||||
return dnodesRootDir
|
||||
|
||||
|
||||
class TDDnodes:
|
||||
def __init__(self):
|
||||
self.dnodes = []
|
||||
self.dnodes.append(TDDnode(1))
|
||||
self.dnodes.append(TDDnode(2))
|
||||
self.dnodes.append(TDDnode(3))
|
||||
self.dnodes.append(TDDnode(4))
|
||||
self.dnodes.append(TDDnode(5))
|
||||
self.dnodes.append(TDDnode(6))
|
||||
self.dnodes.append(TDDnode(7))
|
||||
self.dnodes.append(TDDnode(8))
|
||||
self.dnodes.append(TDDnode(9))
|
||||
self.dnodes.append(TDDnode(10))
|
||||
self.simDeployed = False
|
||||
|
||||
def init(self, path):
|
||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
binPath = os.path.dirname(os.path.realpath(__file__))
|
||||
binPath = binPath + "/../../../debug/"
|
||||
tdLog.debug("binPath %s" % (binPath))
|
||||
binPath = os.path.realpath(binPath)
|
||||
tdLog.debug("binPath real path %s" % (binPath))
|
||||
|
||||
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
|
||||
# tdLog.debug(cmd)
|
||||
# os.system(cmd)
|
||||
|
||||
# cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
# tdLog.debug("execute %s" % (cmd))
|
||||
|
||||
# cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
# tdLog.debug("execute %s" % (cmd))
|
||||
|
||||
if path == "":
|
||||
# self.path = os.path.expanduser('~')
|
||||
self.path = os.path.abspath(binPath + "../../")
|
||||
else:
|
||||
self.path = os.path.realpath(path)
|
||||
|
||||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].init(self.path)
|
||||
|
||||
self.sim = TDSimClient()
|
||||
self.sim.init(self.path)
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def deploy(self, index):
|
||||
self.sim.setTestCluster(self.testCluster)
|
||||
|
||||
if (self.simDeployed == False):
|
||||
self.sim.deploy()
|
||||
self.simDeployed = True
|
||||
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].setTestCluster(self.testCluster)
|
||||
self.dnodes[index - 1].setValgrind(self.valgrind)
|
||||
self.dnodes[index - 1].deploy()
|
||||
|
||||
def cfg(self, index, option, value):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].cfg(option, value)
|
||||
|
||||
def start(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].start()
|
||||
|
||||
def stop(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].stop()
|
||||
|
||||
def getDataSize(self, index):
|
||||
self.check(index)
|
||||
return self.dnodes[index - 1].getDataSize()
|
||||
|
||||
def forcestop(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].forcestop()
|
||||
|
||||
def startIP(self, index):
|
||||
self.check(index)
|
||||
|
||||
if self.testCluster:
|
||||
self.dnodes[index - 1].startIP()
|
||||
|
||||
def stopIP(self, index):
|
||||
self.check(index)
|
||||
|
||||
if self.dnodes[index - 1].testCluster:
|
||||
self.dnodes[index - 1].stopIP()
|
||||
|
||||
def check(self, index):
|
||||
if index < 1 or index > 10:
|
||||
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
||||
|
||||
def stopAll(self):
|
||||
tdLog.info("stop all dnodes")
|
||||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].stop()
|
||||
|
||||
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
if processID:
|
||||
cmd = "sudo systemctl stop taosd"
|
||||
os.system(cmd)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
|
||||
def getDnodesRootDir(self):
|
||||
dnodesRootDir = "%s/sim" % (self.path)
|
||||
return dnodesRootDir
|
||||
|
||||
def getSimCfgPath(self):
|
||||
return self.sim.getCfgDir()
|
||||
|
||||
def getSimLogPath(self):
|
||||
return self.sim.getLogDir()
|
||||
|
||||
def addSimExtraCfg(self, option, value):
|
||||
self.sim.addExtraCfg(option, value)
|
||||
|
||||
|
||||
tdDnodes = TDDnodes()
|
|
@ -0,0 +1,500 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
from frame.log import *
|
||||
|
||||
|
||||
class TDSimClient:
|
||||
def __init__(self):
|
||||
self.testCluster = False
|
||||
|
||||
self.cfgDict = {
|
||||
"numOfLogLines": "100000000",
|
||||
"numOfThreadsPerCore": "2.0",
|
||||
"locale": "en_US.UTF-8",
|
||||
"charset": "UTF-8",
|
||||
"asyncLog": "0",
|
||||
"anyIp": "0",
|
||||
"sdbDebugFlag": "135",
|
||||
"rpcDebugFlag": "135",
|
||||
"tmrDebugFlag": "131",
|
||||
"cDebugFlag": "135",
|
||||
"udebugFlag": "135",
|
||||
"jnidebugFlag": "135",
|
||||
"qdebugFlag": "135",
|
||||
"telemetryReporting": "0",
|
||||
}
|
||||
|
||||
def init(self, path):
|
||||
self.__init__()
|
||||
self.path = path
|
||||
|
||||
def getLogDir(self):
|
||||
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||
return self.logDir
|
||||
|
||||
def getCfgDir(self):
|
||||
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||
return self.cfgDir
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def addExtraCfg(self, option, value):
|
||||
self.cfgDict.update({option: value})
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def deploy(self):
|
||||
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||
self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg")
|
||||
|
||||
cmd = "rm -rf " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "touch " + self.cfgPath
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
if self.testCluster:
|
||||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("logDir", self.logDir)
|
||||
|
||||
for key, value in self.cfgDict.items():
|
||||
self.cfg(key, value)
|
||||
|
||||
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
||||
|
||||
|
||||
class TDDnode:
|
||||
def __init__(self, index):
|
||||
self.index = index
|
||||
self.running = 0
|
||||
self.deployed = 0
|
||||
self.testCluster = False
|
||||
self.valgrind = 0
|
||||
|
||||
def init(self, path):
|
||||
self.path = path
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def getDataSize(self):
|
||||
totalSize = 0
|
||||
|
||||
if (self.deployed == 1):
|
||||
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
||||
for f in filenames:
|
||||
fp = os.path.join(dirpath, f)
|
||||
|
||||
if not os.path.islink(fp):
|
||||
totalSize = totalSize + os.path.getsize(fp)
|
||||
|
||||
return totalSize
|
||||
|
||||
def deploy(self):
|
||||
self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log")
|
||||
self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data")
|
||||
self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg")
|
||||
self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg")
|
||||
|
||||
cmd = "rm -rf " + self.dataDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.dataDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "touch " + self.cfgPath
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
if self.testCluster:
|
||||
self.startIP()
|
||||
|
||||
if self.testCluster:
|
||||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("internalIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("privateIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("dataDir", self.dataDir)
|
||||
self.cfg("logDir", self.logDir)
|
||||
self.cfg("numOfLogLines", "100000000")
|
||||
self.cfg("mnodeEqualVnodeNum", "0")
|
||||
self.cfg("walLevel", "2")
|
||||
self.cfg("fsync", "1000")
|
||||
self.cfg("statusInterval", "1")
|
||||
self.cfg("numOfMnodes", "3")
|
||||
self.cfg("numOfThreadsPerCore", "2.0")
|
||||
self.cfg("monitor", "0")
|
||||
self.cfg("maxVnodeConnections", "30000")
|
||||
self.cfg("maxMgmtConnections", "30000")
|
||||
self.cfg("maxMeterConnections", "30000")
|
||||
self.cfg("maxShellConns", "30000")
|
||||
self.cfg("locale", "en_US.UTF-8")
|
||||
self.cfg("charset", "UTF-8")
|
||||
self.cfg("asyncLog", "0")
|
||||
self.cfg("anyIp", "0")
|
||||
self.cfg("dDebugFlag", "135")
|
||||
self.cfg("mDebugFlag", "135")
|
||||
self.cfg("sdbDebugFlag", "135")
|
||||
self.cfg("rpcDebugFlag", "135")
|
||||
self.cfg("tmrDebugFlag", "131")
|
||||
self.cfg("cDebugFlag", "135")
|
||||
self.cfg("httpDebugFlag", "135")
|
||||
self.cfg("monitorDebugFlag", "135")
|
||||
self.cfg("udebugFlag", "135")
|
||||
self.cfg("jnidebugFlag", "135")
|
||||
self.cfg("qdebugFlag", "135")
|
||||
self.deployed = 1
|
||||
tdLog.debug(
|
||||
"dnode:%d is deployed and configured by %s" %
|
||||
(self.index, self.cfgPath))
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def start(self):
|
||||
buildPath = self.getBuildPath()
|
||||
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
|
||||
binPath = buildPath + "/build/bin/taosd"
|
||||
|
||||
if self.deployed == 0:
|
||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||
|
||||
if self.valgrind == 0:
|
||||
cmd = "nohup %s -c %s --random-file-fail-factor 0 > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||
|
||||
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
|
||||
print(cmd)
|
||||
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||
|
||||
tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index))
|
||||
time.sleep(5)
|
||||
|
||||
def stop(self):
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
while(processID):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
for port in range(6030, 6041):
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
|
||||
def forcestop(self):
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
for port in range(6030, 6041):
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
|
||||
|
||||
def startIP(self):
|
||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def stopIP(self):
|
||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
|
||||
self.index, self.index)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def getDnodeRootDir(self, index):
|
||||
dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index)
|
||||
return dnodeRootDir
|
||||
|
||||
def getDnodesRootDir(self):
|
||||
dnodesRootDir = os.path.join(self.path,"sim","psim")
|
||||
return dnodesRootDir
|
||||
|
||||
|
||||
class TDDnodes:
|
||||
def __init__(self):
|
||||
self.dnodes = []
|
||||
self.dnodes.append(TDDnode(1))
|
||||
self.dnodes.append(TDDnode(2))
|
||||
self.dnodes.append(TDDnode(3))
|
||||
self.dnodes.append(TDDnode(4))
|
||||
self.dnodes.append(TDDnode(5))
|
||||
self.dnodes.append(TDDnode(6))
|
||||
self.dnodes.append(TDDnode(7))
|
||||
self.dnodes.append(TDDnode(8))
|
||||
self.dnodes.append(TDDnode(9))
|
||||
self.dnodes.append(TDDnode(10))
|
||||
self.simDeployed = False
|
||||
|
||||
def init(self, path):
|
||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
binPath = os.path.dirname(os.path.realpath(__file__))
|
||||
binPath = binPath + "/../../../debug/"
|
||||
tdLog.debug("binPath %s" % (binPath))
|
||||
binPath = os.path.realpath(binPath)
|
||||
tdLog.debug("binPath real path %s" % (binPath))
|
||||
|
||||
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
|
||||
# tdLog.debug(cmd)
|
||||
# os.system(cmd)
|
||||
|
||||
# cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
# tdLog.debug("execute %s" % (cmd))
|
||||
|
||||
# cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
# tdLog.debug("execute %s" % (cmd))
|
||||
|
||||
if path == "":
|
||||
# self.path = os.path.expanduser('~')
|
||||
self.path = os.path.abspath(binPath + "../../")
|
||||
else:
|
||||
self.path = os.path.realpath(path)
|
||||
|
||||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].init(self.path)
|
||||
|
||||
self.sim = TDSimClient()
|
||||
self.sim.init(self.path)
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def deploy(self, index):
|
||||
self.sim.setTestCluster(self.testCluster)
|
||||
|
||||
if (self.simDeployed == False):
|
||||
self.sim.deploy()
|
||||
self.simDeployed = True
|
||||
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].setTestCluster(self.testCluster)
|
||||
self.dnodes[index - 1].setValgrind(self.valgrind)
|
||||
self.dnodes[index - 1].deploy()
|
||||
|
||||
def cfg(self, index, option, value):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].cfg(option, value)
|
||||
|
||||
def start(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].start()
|
||||
|
||||
def stop(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].stop()
|
||||
|
||||
def getDataSize(self, index):
|
||||
self.check(index)
|
||||
return self.dnodes[index - 1].getDataSize()
|
||||
|
||||
def forcestop(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].forcestop()
|
||||
|
||||
def startIP(self, index):
|
||||
self.check(index)
|
||||
|
||||
if self.testCluster:
|
||||
self.dnodes[index - 1].startIP()
|
||||
|
||||
def stopIP(self, index):
|
||||
self.check(index)
|
||||
|
||||
if self.dnodes[index - 1].testCluster:
|
||||
self.dnodes[index - 1].stopIP()
|
||||
|
||||
def check(self, index):
|
||||
if index < 1 or index > 10:
|
||||
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
||||
|
||||
def stopAll(self):
|
||||
tdLog.info("stop all dnodes")
|
||||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].stop()
|
||||
|
||||
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
if processID:
|
||||
cmd = "sudo systemctl stop taosd"
|
||||
os.system(cmd)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
|
||||
def getDnodesRootDir(self):
|
||||
dnodesRootDir = "%s/sim" % (self.path)
|
||||
return dnodesRootDir
|
||||
|
||||
def getSimCfgPath(self):
|
||||
return self.sim.getCfgDir()
|
||||
|
||||
def getSimLogPath(self):
|
||||
return self.sim.getLogDir()
|
||||
|
||||
def addSimExtraCfg(self, option, value):
|
||||
self.sim.addExtraCfg(option, value)
|
||||
|
||||
|
||||
tdDnodes = TDDnodes()
|
|
@ -0,0 +1,497 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
from frame.log import *
|
||||
|
||||
|
||||
class TDSimClient:
|
||||
def __init__(self):
|
||||
self.testCluster = False
|
||||
|
||||
self.cfgDict = {
|
||||
"numOfLogLines": "100000000",
|
||||
"locale": "en_US.UTF-8",
|
||||
"charset": "UTF-8",
|
||||
"asyncLog": "0",
|
||||
"rpcDebugFlag": "135",
|
||||
"tmrDebugFlag": "131",
|
||||
"cDebugFlag": "135",
|
||||
"udebugFlag": "135",
|
||||
"jnidebugFlag": "135",
|
||||
"qdebugFlag": "135",
|
||||
"telemetryReporting": "0",
|
||||
}
|
||||
|
||||
def init(self, path):
|
||||
self.__init__()
|
||||
self.path = path
|
||||
|
||||
def getLogDir(self):
|
||||
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||
return self.logDir
|
||||
|
||||
def getCfgDir(self):
|
||||
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||
return self.cfgDir
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def addExtraCfg(self, option, value):
|
||||
self.cfgDict.update({option: value})
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def deploy(self):
|
||||
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||
self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg")
|
||||
|
||||
cmd = "rm -rf " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "touch " + self.cfgPath
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
if self.testCluster:
|
||||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("logDir", self.logDir)
|
||||
|
||||
for key, value in self.cfgDict.items():
|
||||
self.cfg(key, value)
|
||||
|
||||
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
||||
|
||||
|
||||
class TDDnode:
|
||||
def __init__(self, index):
|
||||
self.index = index
|
||||
self.running = 0
|
||||
self.deployed = 0
|
||||
self.testCluster = False
|
||||
self.valgrind = 0
|
||||
|
||||
def init(self, path):
|
||||
self.path = path
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def getDataSize(self):
|
||||
totalSize = 0
|
||||
|
||||
if (self.deployed == 1):
|
||||
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
||||
for f in filenames:
|
||||
fp = os.path.join(dirpath, f)
|
||||
|
||||
if not os.path.islink(fp):
|
||||
totalSize = totalSize + os.path.getsize(fp)
|
||||
|
||||
return totalSize
|
||||
|
||||
def deploy(self):
|
||||
self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log")
|
||||
self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data")
|
||||
self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg")
|
||||
self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg")
|
||||
|
||||
cmd = "rm -rf " + self.dataDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.dataDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "touch " + self.cfgPath
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
if self.testCluster:
|
||||
self.startIP()
|
||||
|
||||
if self.testCluster:
|
||||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("internalIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("privateIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("dataDir", self.dataDir)
|
||||
self.cfg("logDir", self.logDir)
|
||||
self.cfg("numOfLogLines", "100000000")
|
||||
self.cfg("mnodeEqualVnodeNum", "0")
|
||||
self.cfg("walLevel", "2")
|
||||
self.cfg("fsync", "1000")
|
||||
self.cfg("statusInterval", "1")
|
||||
self.cfg("numOfMnodes", "3")
|
||||
self.cfg("numOfThreadsPerCore", "2.0")
|
||||
self.cfg("monitor", "0")
|
||||
self.cfg("maxVnodeConnections", "30000")
|
||||
self.cfg("maxMgmtConnections", "30000")
|
||||
self.cfg("maxMeterConnections", "30000")
|
||||
self.cfg("maxShellConns", "30000")
|
||||
self.cfg("locale", "en_US.UTF-8")
|
||||
self.cfg("charset", "UTF-8")
|
||||
self.cfg("asyncLog", "0")
|
||||
self.cfg("anyIp", "0")
|
||||
self.cfg("dDebugFlag", "135")
|
||||
self.cfg("mDebugFlag", "135")
|
||||
self.cfg("sdbDebugFlag", "135")
|
||||
self.cfg("rpcDebugFlag", "135")
|
||||
self.cfg("tmrDebugFlag", "131")
|
||||
self.cfg("cDebugFlag", "135")
|
||||
self.cfg("httpDebugFlag", "135")
|
||||
self.cfg("monitorDebugFlag", "135")
|
||||
self.cfg("udebugFlag", "135")
|
||||
self.cfg("jnidebugFlag", "135")
|
||||
self.cfg("qdebugFlag", "135")
|
||||
self.deployed = 1
|
||||
tdLog.debug(
|
||||
"dnode:%d is deployed and configured by %s" %
|
||||
(self.index, self.cfgPath))
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def start(self):
|
||||
buildPath = self.getBuildPath()
|
||||
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
|
||||
binPath = buildPath + "/build/bin/taosd"
|
||||
|
||||
if self.deployed == 0:
|
||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||
|
||||
if self.valgrind == 0:
|
||||
cmd = "nohup %s -c %s --alloc-random-fail --random-file-fail-factor 5 > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||
|
||||
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
|
||||
print(cmd)
|
||||
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||
|
||||
tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index))
|
||||
time.sleep(5)
|
||||
|
||||
def stop(self):
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
while(processID):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
for port in range(6030, 6041):
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
|
||||
def forcestop(self):
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
for port in range(6030, 6041):
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
|
||||
|
||||
def startIP(self):
|
||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def stopIP(self):
|
||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
|
||||
self.index, self.index)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def getDnodeRootDir(self, index):
|
||||
dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index)
|
||||
return dnodeRootDir
|
||||
|
||||
def getDnodesRootDir(self):
|
||||
dnodesRootDir = os.path.join(self.path,"sim","psim")
|
||||
return dnodesRootDir
|
||||
|
||||
|
||||
class TDDnodes:
|
||||
def __init__(self):
|
||||
self.dnodes = []
|
||||
self.dnodes.append(TDDnode(1))
|
||||
self.dnodes.append(TDDnode(2))
|
||||
self.dnodes.append(TDDnode(3))
|
||||
self.dnodes.append(TDDnode(4))
|
||||
self.dnodes.append(TDDnode(5))
|
||||
self.dnodes.append(TDDnode(6))
|
||||
self.dnodes.append(TDDnode(7))
|
||||
self.dnodes.append(TDDnode(8))
|
||||
self.dnodes.append(TDDnode(9))
|
||||
self.dnodes.append(TDDnode(10))
|
||||
self.simDeployed = False
|
||||
|
||||
def init(self, path):
|
||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
binPath = os.path.dirname(os.path.realpath(__file__))
|
||||
binPath = binPath + "/../../../debug/"
|
||||
tdLog.debug("binPath %s" % (binPath))
|
||||
binPath = os.path.realpath(binPath)
|
||||
tdLog.debug("binPath real path %s" % (binPath))
|
||||
|
||||
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
|
||||
# tdLog.debug(cmd)
|
||||
# os.system(cmd)
|
||||
|
||||
# cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
# tdLog.debug("execute %s" % (cmd))
|
||||
|
||||
# cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
# tdLog.debug("execute %s" % (cmd))
|
||||
|
||||
if path == "":
|
||||
# self.path = os.path.expanduser('~')
|
||||
self.path = os.path.abspath(binPath + "../../")
|
||||
else:
|
||||
self.path = os.path.realpath(path)
|
||||
|
||||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].init(self.path)
|
||||
|
||||
self.sim = TDSimClient()
|
||||
self.sim.init(self.path)
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def deploy(self, index):
|
||||
self.sim.setTestCluster(self.testCluster)
|
||||
|
||||
if (self.simDeployed == False):
|
||||
self.sim.deploy()
|
||||
self.simDeployed = True
|
||||
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].setTestCluster(self.testCluster)
|
||||
self.dnodes[index - 1].setValgrind(self.valgrind)
|
||||
self.dnodes[index - 1].deploy()
|
||||
|
||||
def cfg(self, index, option, value):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].cfg(option, value)
|
||||
|
||||
def start(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].start()
|
||||
|
||||
def stop(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].stop()
|
||||
|
||||
def getDataSize(self, index):
|
||||
self.check(index)
|
||||
return self.dnodes[index - 1].getDataSize()
|
||||
|
||||
def forcestop(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].forcestop()
|
||||
|
||||
def startIP(self, index):
|
||||
self.check(index)
|
||||
|
||||
if self.testCluster:
|
||||
self.dnodes[index - 1].startIP()
|
||||
|
||||
def stopIP(self, index):
|
||||
self.check(index)
|
||||
|
||||
if self.dnodes[index - 1].testCluster:
|
||||
self.dnodes[index - 1].stopIP()
|
||||
|
||||
def check(self, index):
|
||||
if index < 1 or index > 10:
|
||||
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
||||
|
||||
def stopAll(self):
|
||||
tdLog.info("stop all dnodes")
|
||||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].stop()
|
||||
|
||||
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
if processID:
|
||||
cmd = "sudo systemctl stop taosd"
|
||||
os.system(cmd)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
|
||||
def getDnodesRootDir(self):
|
||||
dnodesRootDir = "%s/sim" % (self.path)
|
||||
return dnodesRootDir
|
||||
|
||||
def getSimCfgPath(self):
|
||||
return self.sim.getCfgDir()
|
||||
|
||||
def getSimLogPath(self):
|
||||
return self.sim.getLogDir()
|
||||
|
||||
def addSimExtraCfg(self, option, value):
|
||||
self.sim.addExtraCfg(option, value)
|
||||
|
||||
|
||||
tdDnodes = TDDnodes()
|
|
@ -0,0 +1,890 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import os.path
|
||||
import platform
|
||||
import distro
|
||||
import subprocess
|
||||
from time import sleep
|
||||
import base64
|
||||
import json
|
||||
import copy
|
||||
from fabric2 import Connection
|
||||
from frame.log import *
|
||||
from shutil import which
|
||||
|
||||
|
||||
class TDSimClient:
|
||||
def __init__(self, path):
|
||||
self.testCluster = False
|
||||
self.path = path
|
||||
self.cfgDict = {
|
||||
"fqdn": "localhost",
|
||||
"numOfLogLines": "100000000",
|
||||
"locale": "en_US.UTF-8",
|
||||
"charset": "UTF-8",
|
||||
"asyncLog": "0",
|
||||
"rpcDebugFlag": "135",
|
||||
"tmrDebugFlag": "131",
|
||||
"cDebugFlag": "135",
|
||||
"uDebugFlag": "135",
|
||||
"jniDebugFlag": "135",
|
||||
"qDebugFlag": "135",
|
||||
"supportVnodes": "1024",
|
||||
"enableQueryHb": "1",
|
||||
"telemetryReporting": "0",
|
||||
"tqDebugflag": "135",
|
||||
"wDebugflag":"135",
|
||||
}
|
||||
|
||||
def getLogDir(self):
|
||||
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||
return self.logDir
|
||||
|
||||
def getCfgDir(self):
|
||||
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||
return self.cfgDir
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def addExtraCfg(self, option, value):
|
||||
self.cfgDict.update({option: value})
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def deploy(self, *updatecfgDict):
|
||||
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||
self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg")
|
||||
|
||||
cmd = "rm -rf " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
# cmd = "mkdir -p " + self.logDir
|
||||
# if os.system(cmd) != 0:
|
||||
# tdLog.exit(cmd)
|
||||
os.makedirs(self.logDir)
|
||||
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
# cmd = "mkdir -p " + self.cfgDir
|
||||
# if os.system(cmd) != 0:
|
||||
# tdLog.exit(cmd)
|
||||
os.makedirs(self.cfgDir)
|
||||
|
||||
cmd = "touch " + self.cfgPath
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
if self.testCluster:
|
||||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("logDir", self.logDir)
|
||||
|
||||
for key, value in self.cfgDict.items():
|
||||
self.cfg(key, value)
|
||||
|
||||
try:
|
||||
if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
|
||||
clientCfg = dict (updatecfgDict[0][0].get('clientCfg'))
|
||||
for key, value in clientCfg.items():
|
||||
self.cfg(key, value)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
||||
|
||||
|
||||
class TDDnode:
|
||||
def __init__(self, index):
|
||||
self.index = index
|
||||
self.running = 0
|
||||
self.deployed = 0
|
||||
self.testCluster = False
|
||||
self.valgrind = 0
|
||||
self.asan = False
|
||||
self.remoteIP = ""
|
||||
self.cfgDict = {
|
||||
"fqdn": "localhost",
|
||||
"monitor": "0",
|
||||
"maxShellConns": "30000",
|
||||
"locale": "en_US.UTF-8",
|
||||
"charset": "UTF-8",
|
||||
"asyncLog": "0",
|
||||
"mDebugFlag": "143",
|
||||
"dDebugFlag": "143",
|
||||
"vDebugFlag": "143",
|
||||
"tqDebugFlag": "143",
|
||||
"cDebugFlag": "143",
|
||||
"stDebugFlag": "143",
|
||||
"smaDebugFlag": "143",
|
||||
"jniDebugFlag": "143",
|
||||
"qDebugFlag": "143",
|
||||
"rpcDebugFlag": "143",
|
||||
"tmrDebugFlag": "131",
|
||||
"uDebugFlag": "135",
|
||||
"sDebugFlag": "135",
|
||||
"wDebugFlag": "135",
|
||||
"numOfLogLines": "100000000",
|
||||
"statusInterval": "1",
|
||||
"enableQueryHb": "1",
|
||||
"supportVnodes": "1024",
|
||||
"telemetryReporting": "0"
|
||||
}
|
||||
|
||||
def init(self, path, remoteIP = ""):
|
||||
self.path = path
|
||||
self.remoteIP = remoteIP
|
||||
if (not self.remoteIP == ""):
|
||||
try:
|
||||
self.config = eval(self.remoteIP)
|
||||
self.remote_conn = Connection(host=self.config["host"], port=self.config["port"], user=self.config["user"], connect_kwargs={'password':self.config["password"]})
|
||||
except Exception as r:
|
||||
print(r)
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def setAsan(self, value):
|
||||
self.asan = value
|
||||
if value:
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
self.execPath = os.path.abspath(self.path + "/community/tests/script/sh/exec.sh")
|
||||
else:
|
||||
self.execPath = os.path.abspath(self.path + "/tests/script/sh/exec.sh")
|
||||
|
||||
def getDataSize(self):
|
||||
totalSize = 0
|
||||
|
||||
if (self.deployed == 1):
|
||||
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
||||
for f in filenames:
|
||||
fp = os.path.join(dirpath, f)
|
||||
|
||||
if not os.path.islink(fp):
|
||||
totalSize = totalSize + os.path.getsize(fp)
|
||||
|
||||
return totalSize
|
||||
|
||||
def addExtraCfg(self, option, value):
|
||||
self.cfgDict.update({option: value})
|
||||
|
||||
def remoteExec(self, updateCfgDict, execCmd):
|
||||
valgrindStr = ''
|
||||
if (self.valgrind==1):
|
||||
valgrindStr = '-g'
|
||||
remoteCfgDict = copy.deepcopy(updateCfgDict)
|
||||
if ("logDir" in remoteCfgDict):
|
||||
del remoteCfgDict["logDir"]
|
||||
if ("dataDir" in remoteCfgDict):
|
||||
del remoteCfgDict["dataDir"]
|
||||
if ("cfgDir" in remoteCfgDict):
|
||||
del remoteCfgDict["cfgDir"]
|
||||
remoteCfgDictStr = base64.b64encode(json.dumps(remoteCfgDict).encode()).decode()
|
||||
execCmdStr = base64.b64encode(execCmd.encode()).decode()
|
||||
with self.remote_conn.cd((self.config["path"]+sys.path[0].replace(self.path, '')).replace('\\','/')):
|
||||
self.remote_conn.run("python3 ./test.py %s -d %s -e %s"%(valgrindStr,remoteCfgDictStr,execCmdStr))
|
||||
|
||||
def deploy(self, *updatecfgDict):
|
||||
self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log")
|
||||
self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data")
|
||||
self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg")
|
||||
self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg")
|
||||
|
||||
cmd = "rm -rf " + self.dataDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
# cmd = "mkdir -p " + self.dataDir
|
||||
# if os.system(cmd) != 0:
|
||||
# tdLog.exit(cmd)
|
||||
os.makedirs(self.dataDir)
|
||||
|
||||
# cmd = "mkdir -p " + self.logDir
|
||||
# if os.system(cmd) != 0:
|
||||
# tdLog.exit(cmd)
|
||||
os.makedirs(self.logDir)
|
||||
|
||||
# cmd = "mkdir -p " + self.cfgDir
|
||||
# if os.system(cmd) != 0:
|
||||
# tdLog.exit(cmd)
|
||||
os.makedirs(self.cfgDir)
|
||||
|
||||
cmd = "touch " + self.cfgPath
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
if self.testCluster:
|
||||
self.startIP()
|
||||
|
||||
if self.testCluster:
|
||||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("internalIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("privateIp", "192.168.0.%d" % (self.index))
|
||||
self.cfgDict["dataDir"] = self.dataDir
|
||||
self.cfgDict["logDir"] = self.logDir
|
||||
# self.cfg("dataDir",self.dataDir)
|
||||
# self.cfg("logDir",self.logDir)
|
||||
# print(updatecfgDict)
|
||||
isFirstDir = 1
|
||||
if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
|
||||
for key, value in updatecfgDict[0][0].items():
|
||||
if key == "clientCfg" and self.remoteIP == "" and not platform.system().lower() == 'windows':
|
||||
continue
|
||||
if value == 'dataDir':
|
||||
if isFirstDir:
|
||||
self.cfgDict.pop('dataDir')
|
||||
self.cfg(value, key)
|
||||
isFirstDir = 0
|
||||
else:
|
||||
self.cfg(value, key)
|
||||
else:
|
||||
self.addExtraCfg(key, value)
|
||||
if (self.remoteIP == ""):
|
||||
for key, value in self.cfgDict.items():
|
||||
self.cfg(key, value)
|
||||
else:
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)"%self.index)
|
||||
|
||||
self.deployed = 1
|
||||
tdLog.debug(
|
||||
"dnode:%d is deployed and configured by %s" %
|
||||
(self.index, self.cfgPath))
|
||||
|
||||
def getPath(self, tool="taosd"):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
paths = []
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ((tool) in files or ("%s.exe"%tool) in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
paths.append(os.path.join(root, tool))
|
||||
break
|
||||
if (len(paths) == 0):
|
||||
return ""
|
||||
return paths[0]
|
||||
|
||||
def starttaosd(self):
|
||||
binPath = self.getPath()
|
||||
|
||||
if (binPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found: %s" % binPath)
|
||||
|
||||
if self.deployed == 0:
|
||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||
|
||||
if self.valgrind == 0:
|
||||
if platform.system().lower() == 'windows':
|
||||
cmd = "mintty -h never %s -c %s" % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
if self.asan:
|
||||
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
||||
self.path, self.index)
|
||||
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
||||
binPath, self.cfgDir, asanDir)
|
||||
else:
|
||||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
cmd = "mintty -h never %s %s -c %s" % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
else:
|
||||
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
|
||||
print(cmd)
|
||||
|
||||
if (not self.remoteIP == ""):
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.start(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index))
|
||||
self.running = 1
|
||||
else:
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||
if self.valgrind == 0:
|
||||
time.sleep(0.1)
|
||||
key1 = 'from offline to online'
|
||||
bkey1 = bytes(key1, encoding="utf8")
|
||||
key2= 'TDengine initialized successfully'
|
||||
bkey2 = bytes(key2, encoding="utf8")
|
||||
logFile = self.logDir + "/taosdlog.0"
|
||||
i = 0
|
||||
# while not os.path.exists(logFile):
|
||||
# sleep(0.1)
|
||||
# i += 1
|
||||
# if i > 10:
|
||||
# break
|
||||
# tailCmdStr = 'tail -f '
|
||||
# if platform.system().lower() == 'windows':
|
||||
# tailCmdStr = 'tail -n +0 -f '
|
||||
# popen = subprocess.Popen(
|
||||
# tailCmdStr + logFile,
|
||||
# stdout=subprocess.PIPE,
|
||||
# stderr=subprocess.PIPE,
|
||||
# shell=True)
|
||||
# pid = popen.pid
|
||||
# # print('Popen.pid:' + str(pid))
|
||||
# timeout = time.time() + 60 * 2
|
||||
# while True:
|
||||
# line = popen.stdout.readline().strip()
|
||||
# print(line)
|
||||
# if bkey1 in line:
|
||||
# popen.kill()
|
||||
# break
|
||||
# elif bkey2 in line:
|
||||
# popen.kill()
|
||||
# break
|
||||
# if time.time() > timeout:
|
||||
# print(time.time(),timeout)
|
||||
# tdLog.exit('wait too long for taosd start')
|
||||
tdLog.debug("the dnode:%d has been started." % (self.index))
|
||||
else:
|
||||
tdLog.debug(
|
||||
"wait 10 seconds for the dnode:%d to start." %
|
||||
(self.index))
|
||||
time.sleep(10)
|
||||
|
||||
def start(self):
|
||||
binPath = self.getPath()
|
||||
|
||||
if (binPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found: %s" % binPath)
|
||||
|
||||
if self.deployed == 0:
|
||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||
|
||||
if self.valgrind == 0:
|
||||
if platform.system().lower() == 'windows':
|
||||
cmd = "mintty -h never %s -c %s" % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
if self.asan:
|
||||
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
||||
self.path, self.index)
|
||||
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
||||
binPath, self.cfgDir, asanDir)
|
||||
else:
|
||||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
cmd = "mintty -h never %s %s -c %s" % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
else:
|
||||
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
|
||||
print(cmd)
|
||||
|
||||
if (not self.remoteIP == ""):
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.start(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index))
|
||||
self.running = 1
|
||||
else:
|
||||
os.system("rm -rf %s/taosdlog.0"%self.logDir)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||
if self.valgrind == 0:
|
||||
time.sleep(0.1)
|
||||
key = 'from offline to online'
|
||||
bkey = bytes(key, encoding="utf8")
|
||||
logFile = self.logDir + "/taosdlog.0"
|
||||
i = 0
|
||||
while not os.path.exists(logFile):
|
||||
sleep(0.1)
|
||||
i += 1
|
||||
if i > 50:
|
||||
break
|
||||
with open(logFile) as f:
|
||||
timeout = time.time() + 10 * 2
|
||||
while True:
|
||||
line = f.readline().encode('utf-8')
|
||||
if bkey in line:
|
||||
break
|
||||
if time.time() > timeout:
|
||||
tdLog.exit('wait too long for taosd start')
|
||||
tdLog.debug("the dnode:%d has been started." % (self.index))
|
||||
else:
|
||||
tdLog.debug(
|
||||
"wait 10 seconds for the dnode:%d to start." %
|
||||
(self.index))
|
||||
time.sleep(10)
|
||||
|
||||
def startWithoutSleep(self):
|
||||
binPath = self.getPath()
|
||||
|
||||
if (binPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found: %s" % binPath)
|
||||
|
||||
if self.deployed == 0:
|
||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||
|
||||
if self.valgrind == 0:
|
||||
if platform.system().lower() == 'windows':
|
||||
cmd = "mintty -h never %s -c %s" % (binPath, self.cfgDir)
|
||||
else:
|
||||
if self.asan:
|
||||
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
||||
self.path, self.index)
|
||||
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
||||
binPath, self.cfgDir, asanDir)
|
||||
else:
|
||||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
|
||||
if platform.system().lower() == 'windows':
|
||||
cmd = "mintty -h never %s %s -c %s" % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
else:
|
||||
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
print(cmd)
|
||||
|
||||
if (self.remoteIP == ""):
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
else:
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.startWithoutSleep(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index))
|
||||
|
||||
self.running = 1
|
||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||
|
||||
def stop(self):
|
||||
if self.asan:
|
||||
stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index)
|
||||
tdLog.info("execute script: " + stopCmd)
|
||||
os.system(stopCmd)
|
||||
return
|
||||
|
||||
if (not self.remoteIP == ""):
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
|
||||
tdLog.info("stop dnode%d"%self.index)
|
||||
return
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
|
||||
onlyKillOnceWindows = 0
|
||||
while(processID):
|
||||
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
if platform.system().lower() == 'windows':
|
||||
killCmd = "kill -INT %s > nul 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
onlyKillOnceWindows = 1
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
if not platform.system().lower() == 'windows':
|
||||
for port in range(6030, 6041):
|
||||
fuserCmd = "fuser -k -n tcp %d > /dev/null" % port
|
||||
os.system(fuserCmd)
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
|
||||
|
||||
def stoptaosd(self):
|
||||
tdLog.debug("start to stop taosd on dnode: %d "% (self.index))
|
||||
# print(self.asan,self.running,self.remoteIP,self.valgrind)
|
||||
if self.asan:
|
||||
stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index)
|
||||
tdLog.info("execute script: " + stopCmd)
|
||||
os.system(stopCmd)
|
||||
return
|
||||
|
||||
if (not self.remoteIP == ""):
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
|
||||
tdLog.info("stop dnode%d"%self.index)
|
||||
return
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
if platform.system().lower() == 'windows':
|
||||
psCmd = "for /f %%a in ('wmic process where \"name='taosd.exe' and CommandLine like '%%dnode%d%%'\" get processId ^| xargs echo ^| awk ^'{print $2}^' ^&^& echo aa') do @(ps | grep %%a | awk '{print $1}' | xargs)" % (self.index)
|
||||
else:
|
||||
psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}' | xargs" % (toBeKilled,self.index)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
|
||||
onlyKillOnceWindows = 0
|
||||
while(processID):
|
||||
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
if platform.system().lower() == 'windows':
|
||||
killCmd = "kill -INT %s > nul 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
onlyKillOnceWindows = 1
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
|
||||
def forcestop(self):
|
||||
if self.asan:
|
||||
stopCmd = "%s -s stop -n dnode%d -x SIGKILL" + \
|
||||
(self.execPath, self.index)
|
||||
tdLog.info("execute script: " + stopCmd)
|
||||
os.system(stopCmd)
|
||||
return
|
||||
|
||||
if (not self.remoteIP == ""):
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].forcestop()"%(self.index-1,self.index-1))
|
||||
return
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
|
||||
onlyKillOnceWindows = 0
|
||||
while(processID):
|
||||
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
onlyKillOnceWindows = 1
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
for port in range(6030, 6041):
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
|
||||
|
||||
def startIP(self):
|
||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def stopIP(self):
|
||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
|
||||
self.index, self.index)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def getDnodeRootDir(self, index):
|
||||
dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index)
|
||||
return dnodeRootDir
|
||||
|
||||
def getDnodesRootDir(self):
|
||||
dnodesRootDir = os.path.join(self.path,"sim","psim")
|
||||
return dnodesRootDir
|
||||
|
||||
|
||||
class TDDnodes:
|
||||
def __init__(self):
|
||||
self.dnodes = []
|
||||
self.dnodes.append(TDDnode(1))
|
||||
self.dnodes.append(TDDnode(2))
|
||||
self.dnodes.append(TDDnode(3))
|
||||
self.dnodes.append(TDDnode(4))
|
||||
self.dnodes.append(TDDnode(5))
|
||||
self.dnodes.append(TDDnode(6))
|
||||
self.dnodes.append(TDDnode(7))
|
||||
self.dnodes.append(TDDnode(8))
|
||||
self.dnodes.append(TDDnode(9))
|
||||
self.dnodes.append(TDDnode(10))
|
||||
self.simDeployed = False
|
||||
self.testCluster = False
|
||||
self.valgrind = 0
|
||||
self.asan = False
|
||||
self.killValgrind = 0
|
||||
|
||||
def init(self, path, remoteIP = ""):
|
||||
binPath = self.dnodes[0].getPath() + "/../../../"
|
||||
# tdLog.debug("binPath %s" % (binPath))
|
||||
binPath = os.path.realpath(binPath)
|
||||
# tdLog.debug("binPath real path %s" % (binPath))
|
||||
|
||||
if path == "":
|
||||
self.path = os.path.abspath(binPath + "../../")
|
||||
else:
|
||||
self.path = os.path.realpath(path)
|
||||
|
||||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].init(self.path, remoteIP)
|
||||
self.sim = TDSimClient(self.path)
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def setAsan(self, value):
|
||||
self.asan = value
|
||||
if value:
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
self.stopDnodesPath = os.path.abspath(self.path + "/community/tests/script/sh/stop_dnodes.sh")
|
||||
self.stopDnodesSigintPath = os.path.abspath(self.path + "/community/tests/script/sh/sigint_stop_dnodes.sh")
|
||||
else:
|
||||
self.stopDnodesPath = os.path.abspath(self.path + "/tests/script/sh/stop_dnodes.sh")
|
||||
self.stopDnodesSigintPath = os.path.abspath(self.path + "/tests/script/sh/sigint_stop_dnodes.sh")
|
||||
tdLog.info("run in address sanitizer mode")
|
||||
|
||||
def setKillValgrind(self, value):
|
||||
self.killValgrind = value
|
||||
|
||||
def deploy(self, index, *updatecfgDict):
|
||||
self.sim.setTestCluster(self.testCluster)
|
||||
|
||||
if (self.simDeployed == False):
|
||||
self.sim.deploy(updatecfgDict)
|
||||
self.simDeployed = True
|
||||
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].setTestCluster(self.testCluster)
|
||||
self.dnodes[index - 1].setValgrind(self.valgrind)
|
||||
self.dnodes[index - 1].setAsan(self.asan)
|
||||
self.dnodes[index - 1].deploy(updatecfgDict)
|
||||
|
||||
def cfg(self, index, option, value):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].cfg(option, value)
|
||||
|
||||
def starttaosd(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].starttaosd()
|
||||
|
||||
def stoptaosd(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].stoptaosd()
|
||||
|
||||
def start(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].start()
|
||||
|
||||
def startWithoutSleep(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].startWithoutSleep()
|
||||
|
||||
def stop(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].stop()
|
||||
|
||||
def getDataSize(self, index):
|
||||
self.check(index)
|
||||
return self.dnodes[index - 1].getDataSize()
|
||||
|
||||
def forcestop(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].forcestop()
|
||||
|
||||
def startIP(self, index):
|
||||
self.check(index)
|
||||
|
||||
if self.testCluster:
|
||||
self.dnodes[index - 1].startIP()
|
||||
|
||||
def stopIP(self, index):
|
||||
self.check(index)
|
||||
|
||||
if self.dnodes[index - 1].testCluster:
|
||||
self.dnodes[index - 1].stopIP()
|
||||
|
||||
def check(self, index):
|
||||
if index < 1 or index > 10:
|
||||
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
||||
|
||||
def StopAllSigint(self):
|
||||
tdLog.info("stop all dnodes sigint, asan:%d" % self.asan)
|
||||
if self.asan:
|
||||
tdLog.info("execute script: %s" % self.stopDnodesSigintPath)
|
||||
os.system(self.stopDnodesSigintPath)
|
||||
tdLog.info("execute finished")
|
||||
return
|
||||
|
||||
def killProcesser(self, processerName):
|
||||
if platform.system().lower() == 'windows':
|
||||
killCmd = ("wmic process where name=\"%s.exe\" call terminate > NUL 2>&1" % processerName)
|
||||
psCmd = ("wmic process where name=\"%s.exe\" | findstr \"%s.exe\"" % (processerName, processerName))
|
||||
else:
|
||||
killCmd = (
|
||||
"ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1"
|
||||
% processerName
|
||||
)
|
||||
psCmd = ("ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % processerName)
|
||||
|
||||
processID = ""
|
||||
|
||||
try:
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
while processID:
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
try:
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
except Exception as err:
|
||||
processID = ""
|
||||
tdLog.debug('**** kill pid warn: {err}')
|
||||
except Exception as err:
|
||||
processID = ""
|
||||
tdLog.debug(f'**** find pid warn: {err}')
|
||||
|
||||
|
||||
|
||||
def stopAll(self):
|
||||
tdLog.info("stop all dnodes, asan:%d" % self.asan)
|
||||
if platform.system().lower() != 'windows':
|
||||
distro_id = distro.id()
|
||||
else:
|
||||
distro_id = "not alpine"
|
||||
if self.asan and distro_id != "alpine":
|
||||
tdLog.info("execute script: %s" % self.stopDnodesPath)
|
||||
os.system(self.stopDnodesPath)
|
||||
tdLog.info("execute finished")
|
||||
return
|
||||
|
||||
if (not self.dnodes[0].remoteIP == ""):
|
||||
self.dnodes[0].remoteExec(self.dnodes[0].cfgDict, "for i in range(len(tdDnodes.dnodes)):\n tdDnodes.dnodes[i].running=1\ntdDnodes.stopAll()")
|
||||
return
|
||||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].stop()
|
||||
|
||||
|
||||
if (distro_id == "alpine"):
|
||||
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
print(processID)
|
||||
killCmd = "kill -9 %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
elif platform.system().lower() == 'windows':
|
||||
self.killProcesser("taosd")
|
||||
self.killProcesser("tmq_sim")
|
||||
self.killProcesser("taosBenchmark")
|
||||
else:
|
||||
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
if processID:
|
||||
cmd = "sudo systemctl stop taosd"
|
||||
os.system(cmd)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}' | xargs"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
killCmd = "kill -9 %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
if self.killValgrind == 1:
|
||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
if platform.system().lower() == 'windows':
|
||||
killCmd = "kill -TERM %s > nul 2>&1" % processID
|
||||
else:
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
|
||||
def getDnodesRootDir(self):
|
||||
dnodesRootDir = "%s/sim" % (self.path)
|
||||
return dnodesRootDir
|
||||
|
||||
def getSimCfgPath(self):
|
||||
return self.sim.getCfgDir()
|
||||
|
||||
def getSimLogPath(self):
|
||||
return self.sim.getLogDir()
|
||||
|
||||
def addSimExtraCfg(self, option, value):
|
||||
self.sim.addExtraCfg(option, value)
|
||||
|
||||
def getAsan(self):
|
||||
return self.asan
|
||||
|
||||
tdDnodes = TDDnodes()
|
|
@ -0,0 +1,65 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
class GetTime:
|
||||
|
||||
def get_ms_timestamp(self,ts_str):
|
||||
_ts_str = ts_str
|
||||
if "+" in _ts_str:
|
||||
timestamp = datetime.fromisoformat(_ts_str)
|
||||
return int((timestamp-datetime.fromtimestamp(0,timestamp.tzinfo)).total_seconds())*1000+int(timestamp.microsecond / 1000)
|
||||
if " " in ts_str:
|
||||
p = ts_str.split(" ")[1]
|
||||
if len(p) > 15 :
|
||||
_ts_str = ts_str[:-3]
|
||||
if ':' in _ts_str and '.' in _ts_str:
|
||||
timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S.%f")
|
||||
date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000)
|
||||
elif ':' in _ts_str and '.' not in _ts_str:
|
||||
timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S")
|
||||
date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000)
|
||||
else:
|
||||
timestamp = datetime.strptime(_ts_str, "%Y-%m-%d")
|
||||
date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000)
|
||||
return date_time
|
||||
def get_us_timestamp(self,ts_str):
|
||||
_ts = self.get_ms_timestamp(ts_str) * 1000
|
||||
if " " in ts_str:
|
||||
p = ts_str.split(" ")[1]
|
||||
if len(p) > 12:
|
||||
us_ts = p[12:15]
|
||||
_ts += int(us_ts)
|
||||
return _ts
|
||||
def get_ns_timestamp(self,ts_str):
|
||||
_ts = self.get_us_timestamp(ts_str) *1000
|
||||
if " " in ts_str:
|
||||
p = ts_str.split(" ")[1]
|
||||
if len(p) > 15:
|
||||
us_ts = p[15:]
|
||||
_ts += int(us_ts)
|
||||
return _ts
|
||||
def time_transform(self,ts_str,precision):
|
||||
date_time = []
|
||||
if precision == 'ms':
|
||||
for i in ts_str:
|
||||
date_time.append(self.get_ms_timestamp(i))
|
||||
elif precision == 'us':
|
||||
for i in ts_str:
|
||||
date_time.append(self.get_us_timestamp(i))
|
||||
elif precision == 'ns':
|
||||
for i in ts_str:
|
||||
date_time.append(self.get_ns_timestamp(i))
|
||||
return date_time
|
|
@ -0,0 +1,49 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import datetime
|
||||
from distutils.log import warn as printf
|
||||
|
||||
|
||||
class TDLog:
|
||||
def __init__(self):
|
||||
self.path = ""
|
||||
|
||||
def info(self, info):
|
||||
print("%s %s\n" % (datetime.datetime.now(), info))
|
||||
|
||||
def sleep(self, sec):
|
||||
print("%s sleep %d seconds" % (datetime.datetime.now(), sec))
|
||||
time.sleep(sec)
|
||||
|
||||
def debug(self, err):
|
||||
print("\033[1;36m%s %s\033[0m" % (datetime.datetime.now(), err))
|
||||
|
||||
def success(self, info):
|
||||
printf("\033[1;32m%s %s\033[0m" % (datetime.datetime.now(), info))
|
||||
|
||||
def notice(self, err):
|
||||
print("\033[1;33m%s %s\033[0m" % (datetime.datetime.now(), err))
|
||||
|
||||
def exit(self, err):
|
||||
print("\033[1;31m%s %s\033[0m" % (datetime.datetime.now(), err))
|
||||
sys.exit(1)
|
||||
|
||||
def printNoPrefix(self, info):
|
||||
print("\033[1;36m%s\033[0m" % (info))
|
||||
|
||||
|
||||
tdLog = TDLog()
|
|
@ -0,0 +1,83 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
import os
|
||||
from frame.log import *
|
||||
|
||||
|
||||
|
||||
class TDFindPath:
|
||||
"""This class is for finding path within TDengine
|
||||
"""
|
||||
def __init__(self):
|
||||
self.file = ""
|
||||
|
||||
|
||||
def init(self, file):
|
||||
"""[summary]
|
||||
|
||||
Args:
|
||||
file (str): the file location you want to start the query. Generally using __file__
|
||||
"""
|
||||
self.file = file
|
||||
|
||||
def getTaosdemoPath(self):
|
||||
"""for finding the path of directory containing taosdemo
|
||||
|
||||
Returns:
|
||||
str: the path to directory containing taosdemo
|
||||
"""
|
||||
selfPath = os.path.dirname(os.path.realpath(self.file))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info(f"taosd found in {buildPath}")
|
||||
return buildPath + "/build/bin/"
|
||||
|
||||
def getTDenginePath(self):
|
||||
"""for finding the root path of TDengine
|
||||
|
||||
Returns:
|
||||
str: the root path of TDengine
|
||||
"""
|
||||
selfPath = os.path.dirname(os.path.realpath(self.file))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
print(projPath)
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("sim" in dirs):
|
||||
print(root)
|
||||
rootRealPath = os.path.realpath(root)
|
||||
if (rootRealPath == ""):
|
||||
tdLog.exit("TDengine not found!")
|
||||
else:
|
||||
tdLog.info(f"TDengine found in {rootRealPath}")
|
||||
return rootRealPath
|
||||
|
||||
tdFindPath = TDFindPath()
|
|
@ -0,0 +1,655 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import datetime
|
||||
import inspect
|
||||
import traceback
|
||||
import psutil
|
||||
import shutil
|
||||
import pandas as pd
|
||||
from frame.log import *
|
||||
from frame.constant import *
|
||||
|
||||
# from datetime import timezone
|
||||
import time
|
||||
|
||||
def _parse_ns_timestamp(timestr):
|
||||
dt_obj = datetime.datetime.strptime(timestr[:len(timestr)-3], "%Y-%m-%d %H:%M:%S.%f")
|
||||
tz = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1e9) + int(dt_obj.microsecond * 1000) + int(timestr[-3:])
|
||||
return tz
|
||||
|
||||
|
||||
def _parse_datetime(timestr):
|
||||
try:
|
||||
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
class TDSql:
|
||||
def __init__(self):
|
||||
self.queryRows = 0
|
||||
self.queryCols = 0
|
||||
self.affectedRows = 0
|
||||
|
||||
def init(self, cursor, log=False):
|
||||
self.cursor = cursor
|
||||
|
||||
if (log):
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
self.cursor.log(caller.filename + ".sql")
|
||||
|
||||
def close(self):
|
||||
self.cursor.close()
|
||||
|
||||
def prepare(self, dbname="db", drop=True, **kwargs):
|
||||
tdLog.info(f"prepare database:{dbname}")
|
||||
s = 'reset query cache'
|
||||
try:
|
||||
self.cursor.execute(s)
|
||||
except:
|
||||
tdLog.notice("'reset query cache' is not supported")
|
||||
if drop:
|
||||
s = f'drop database if exists {dbname}'
|
||||
self.cursor.execute(s)
|
||||
s = f'create database {dbname}'
|
||||
for k, v in kwargs.items():
|
||||
s += f" {k} {v}"
|
||||
if "duration" not in kwargs:
|
||||
s += " duration 300"
|
||||
self.cursor.execute(s)
|
||||
s = f'use {dbname}'
|
||||
self.cursor.execute(s)
|
||||
time.sleep(2)
|
||||
|
||||
def error(self, sql, expectedErrno = None, expectErrInfo = None):
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
expectErrNotOccured = True
|
||||
|
||||
try:
|
||||
self.cursor.execute(sql)
|
||||
except BaseException as e:
|
||||
expectErrNotOccured = False
|
||||
self.errno = e.errno
|
||||
error_info = repr(e)
|
||||
self.error_info = ','.join(error_info[error_info.index('(')+1:-1].split(",")[:-1]).replace("'","")
|
||||
# self.error_info = (','.join(error_info.split(",")[:-1]).split("(",1)[1:][0]).replace("'","")
|
||||
if expectErrNotOccured:
|
||||
tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql))
|
||||
else:
|
||||
self.queryRows = 0
|
||||
self.queryCols = 0
|
||||
self.queryResult = None
|
||||
|
||||
if expectedErrno != None:
|
||||
if expectedErrno == self.errno:
|
||||
tdLog.info("sql:%s, expected errno %s occured" % (sql, expectedErrno))
|
||||
else:
|
||||
tdLog.exit("%s(%d) failed: sql:%s, errno %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.errno, expectedErrno))
|
||||
else:
|
||||
tdLog.info("sql:%s, expect error occured" % (sql))
|
||||
|
||||
if expectErrInfo != None:
|
||||
if expectErrInfo == self.error_info or expectErrInfo in self.error_info:
|
||||
tdLog.info("sql:%s, expected expectErrInfo %s occured" % (sql, expectErrInfo))
|
||||
else:
|
||||
tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
|
||||
else:
|
||||
tdLog.info("sql:%s, expect error occured" % (sql))
|
||||
|
||||
return self.error_info
|
||||
|
||||
def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None):
|
||||
self.sql = sql
|
||||
i=1
|
||||
while i <= queryTimes:
|
||||
try:
|
||||
self.cursor.execute(sql)
|
||||
self.queryResult = self.cursor.fetchall()
|
||||
self.queryRows = len(self.queryResult)
|
||||
self.queryCols = len(self.cursor.description)
|
||||
|
||||
if count_expected_res is not None:
|
||||
counter = 0
|
||||
while count_expected_res != self.queryResult[0][0]:
|
||||
self.cursor.execute(sql)
|
||||
self.queryResult = self.cursor.fetchall()
|
||||
if counter < queryTimes:
|
||||
counter += 0.5
|
||||
time.sleep(0.5)
|
||||
else:
|
||||
return False
|
||||
if row_tag:
|
||||
return self.queryResult
|
||||
return self.queryRows
|
||||
except Exception as e:
|
||||
tdLog.notice("Try to query again, query times: %d "%i)
|
||||
if i == queryTimes:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
raise Exception(repr(e))
|
||||
i+=1
|
||||
time.sleep(1)
|
||||
pass
|
||||
|
||||
|
||||
def is_err_sql(self, sql):
|
||||
err_flag = True
|
||||
try:
|
||||
self.cursor.execute(sql)
|
||||
except BaseException:
|
||||
err_flag = False
|
||||
|
||||
return False if err_flag else True
|
||||
|
||||
def getVariable(self, search_attr):
|
||||
'''
|
||||
get variable of search_attr access "show variables"
|
||||
'''
|
||||
try:
|
||||
sql = 'show variables'
|
||||
param_list = self.query(sql, row_tag=True)
|
||||
for param in param_list:
|
||||
if param[0] == search_attr:
|
||||
return param[1], param_list
|
||||
except Exception as e:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
raise Exception(repr(e))
|
||||
|
||||
def getColNameList(self, sql, col_tag=None):
|
||||
self.sql = sql
|
||||
try:
|
||||
col_name_list = []
|
||||
col_type_list = []
|
||||
self.cursor.execute(sql)
|
||||
for query_col in self.cursor.description:
|
||||
col_name_list.append(query_col[0])
|
||||
col_type_list.append(query_col[1])
|
||||
except Exception as e:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
raise Exception(repr(e))
|
||||
if col_tag:
|
||||
return col_name_list, col_type_list
|
||||
return col_name_list
|
||||
|
||||
def waitedQuery(self, sql, expectRows, timeout):
|
||||
tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout))
|
||||
self.sql = sql
|
||||
try:
|
||||
for i in range(timeout):
|
||||
self.cursor.execute(sql)
|
||||
self.queryResult = self.cursor.fetchall()
|
||||
self.queryRows = len(self.queryResult)
|
||||
self.queryCols = len(self.cursor.description)
|
||||
tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
|
||||
if self.queryRows >= expectRows:
|
||||
return (self.queryRows, i)
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
raise Exception(repr(e))
|
||||
return (self.queryRows, timeout)
|
||||
|
||||
def getRows(self):
|
||||
return self.queryRows
|
||||
|
||||
def checkRows(self, expectRows):
|
||||
if self.queryRows == expectRows:
|
||||
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows))
|
||||
return True
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, self.queryRows, expectRows)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
|
||||
|
||||
def checkRows_range(self, excepte_row_list):
|
||||
if self.queryRows in excepte_row_list:
|
||||
tdLog.info(f"sql:{self.sql}, queryRows:{self.queryRows} in expect:{excepte_row_list}")
|
||||
return True
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{self.sql}, queryRows:{self.queryRows} not in expect:{excepte_row_list}")
|
||||
|
||||
def checkCols(self, expectCols):
|
||||
if self.queryCols == expectCols:
|
||||
tdLog.info("sql:%s, queryCols:%d == expect:%d" % (self.sql, self.queryCols, expectCols))
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, self.queryCols, expectCols)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, queryCols:%d != expect:%d" % args)
|
||||
|
||||
def checkRowCol(self, row, col):
|
||||
caller = inspect.getframeinfo(inspect.stack()[2][0])
|
||||
if row < 0:
|
||||
args = (caller.filename, caller.lineno, self.sql, row)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
|
||||
if col < 0:
|
||||
args = (caller.filename, caller.lineno, self.sql, row)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
|
||||
if row > self.queryRows:
|
||||
args = (caller.filename, caller.lineno, self.sql, row, self.queryRows)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
|
||||
if col > self.queryCols:
|
||||
args = (caller.filename, caller.lineno, self.sql, col, self.queryCols)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
|
||||
|
||||
def checkDataType(self, row, col, dataType):
|
||||
self.checkRowCol(row, col)
|
||||
return self.cursor.istype(col, dataType)
|
||||
|
||||
|
||||
def checkData(self, row, col, data, show = False):
|
||||
if row >= self.queryRows:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row+1, self.queryRows)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
|
||||
if col >= self.queryCols:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, col+1, self.queryCols)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
|
||||
|
||||
self.checkRowCol(row, col)
|
||||
|
||||
if self.queryResult[row][col] != data:
|
||||
if self.cursor.istype(col, "TIMESTAMP"):
|
||||
# suppose user want to check nanosecond timestamp if a longer data passed``
|
||||
if isinstance(data,str) :
|
||||
if (len(data) >= 28):
|
||||
if self.queryResult[row][col] == _parse_ns_timestamp(data):
|
||||
if(show):
|
||||
tdLog.info("check successfully")
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
else:
|
||||
if self.queryResult[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc):
|
||||
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
||||
if(show):
|
||||
tdLog.info("check successfully")
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
return
|
||||
elif isinstance(data,int):
|
||||
if len(str(data)) == 16:
|
||||
precision = 'us'
|
||||
elif len(str(data)) == 13:
|
||||
precision = 'ms'
|
||||
elif len(str(data)) == 19:
|
||||
precision = 'ns'
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
return
|
||||
success = False
|
||||
if precision == 'ms':
|
||||
dt_obj = self.queryResult[row][col]
|
||||
ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1000) + int(dt_obj.microsecond/1000)
|
||||
if ts == data:
|
||||
success = True
|
||||
elif precision == 'us':
|
||||
dt_obj = self.queryResult[row][col]
|
||||
ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1e6) + int(dt_obj.microsecond)
|
||||
if ts == data:
|
||||
success = True
|
||||
elif precision == 'ns':
|
||||
if data == self.queryResult[row][col]:
|
||||
success = True
|
||||
if success:
|
||||
if(show):
|
||||
tdLog.info("check successfully")
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
return
|
||||
elif isinstance(data,datetime.datetime):
|
||||
dt_obj = self.queryResult[row][col]
|
||||
delt_data = data-datetime.datetime.fromtimestamp(0,data.tzinfo)
|
||||
delt_result = self.queryResult[row][col] - datetime.datetime.fromtimestamp(0,self.queryResult[row][col].tzinfo)
|
||||
if delt_data == delt_result:
|
||||
if(show):
|
||||
tdLog.info("check successfully")
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
return
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
|
||||
if str(self.queryResult[row][col]) == str(data):
|
||||
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
||||
if(show):
|
||||
tdLog.info("check successfully")
|
||||
return
|
||||
|
||||
elif isinstance(data, float):
|
||||
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001:
|
||||
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
||||
if(show):
|
||||
tdLog.info("check successfully")
|
||||
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001:
|
||||
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
||||
if(show):
|
||||
tdLog.info("check successfully")
|
||||
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
return
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
if(show):
|
||||
tdLog.info("check successfully")
|
||||
|
||||
# return true or false replace exit, no print out
|
||||
def checkRowColNoExit(self, row, col):
|
||||
caller = inspect.getframeinfo(inspect.stack()[2][0])
|
||||
if row < 0:
|
||||
args = (caller.filename, caller.lineno, self.sql, row)
|
||||
return False
|
||||
if col < 0:
|
||||
args = (caller.filename, caller.lineno, self.sql, row)
|
||||
return False
|
||||
if row > self.queryRows:
|
||||
args = (caller.filename, caller.lineno, self.sql, row, self.queryRows)
|
||||
return False
|
||||
if col > self.queryCols:
|
||||
args = (caller.filename, caller.lineno, self.sql, col, self.queryCols)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# return true or false replace exit, no print out
|
||||
def checkDataNoExit(self, row, col, data):
|
||||
if self.checkRowColNoExit(row, col) == False:
|
||||
return False
|
||||
if self.queryResult[row][col] != data:
|
||||
if self.cursor.istype(col, "TIMESTAMP"):
|
||||
# suppose user want to check nanosecond timestamp if a longer data passed
|
||||
if (len(data) >= 28):
|
||||
if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data):
|
||||
return True
|
||||
else:
|
||||
if self.queryResult[row][col] == _parse_datetime(data):
|
||||
return True
|
||||
return False
|
||||
|
||||
if str(self.queryResult[row][col]) == str(data):
|
||||
return True
|
||||
elif isinstance(data, float):
|
||||
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001:
|
||||
return True
|
||||
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# loop execute sql then sleep(waitTime) , if checkData ok break loop
|
||||
def checkDataLoop(self, row, col, data, sql, loopCount, waitTime):
|
||||
# loop check util checkData return true
|
||||
for i in range(loopCount):
|
||||
self.query(sql)
|
||||
if self.checkDataNoExit(row, col, data) :
|
||||
self.checkData(row, col, data)
|
||||
return
|
||||
time.sleep(waitTime)
|
||||
|
||||
# last check
|
||||
self.query(sql)
|
||||
self.checkData(row, col, data)
|
||||
|
||||
|
||||
def getData(self, row, col):
|
||||
self.checkRowCol(row, col)
|
||||
return self.queryResult[row][col]
|
||||
|
||||
def getResult(self, sql):
|
||||
self.sql = sql
|
||||
try:
|
||||
self.cursor.execute(sql)
|
||||
self.queryResult = self.cursor.fetchall()
|
||||
except Exception as e:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
raise Exception(repr(e))
|
||||
return self.queryResult
|
||||
|
||||
def executeTimes(self, sql, times):
|
||||
for i in range(times):
|
||||
try:
|
||||
return self.cursor.execute(sql)
|
||||
except BaseException:
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
def execute(self, sql, queryTimes=30, show=False):
|
||||
self.sql = sql
|
||||
if show:
|
||||
tdLog.info(sql)
|
||||
i=1
|
||||
while i <= queryTimes:
|
||||
try:
|
||||
self.affectedRows = self.cursor.execute(sql)
|
||||
return self.affectedRows
|
||||
except Exception as e:
|
||||
tdLog.notice("Try to execute sql again, query times: %d "%i)
|
||||
if i == queryTimes:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
raise Exception(repr(e))
|
||||
i+=1
|
||||
time.sleep(1)
|
||||
pass
|
||||
|
||||
def checkAffectedRows(self, expectAffectedRows):
|
||||
if self.affectedRows != expectAffectedRows:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, self.affectedRows, expectAffectedRows)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, affectedRows:%d != expect:%d" % args)
|
||||
|
||||
tdLog.info("sql:%s, affectedRows:%d == expect:%d" % (self.sql, self.affectedRows, expectAffectedRows))
|
||||
|
||||
def checkColNameList(self, col_name_list, expect_col_name_list):
|
||||
if col_name_list == expect_col_name_list:
|
||||
tdLog.info("sql:%s, col_name_list:%s == expect_col_name_list:%s" % (self.sql, col_name_list, expect_col_name_list))
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, col_name_list, expect_col_name_list)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args)
|
||||
|
||||
def __check_equal(self, elm, expect_elm):
|
||||
if elm == expect_elm:
|
||||
return True
|
||||
if type(elm) in(list, tuple) and type(expect_elm) in(list, tuple):
|
||||
if len(elm) != len(expect_elm):
|
||||
return False
|
||||
if len(elm) == 0:
|
||||
return True
|
||||
for i in range(len(elm)):
|
||||
flag = self.__check_equal(elm[i], expect_elm[i])
|
||||
if not flag:
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
def checkEqual(self, elm, expect_elm):
|
||||
if elm == expect_elm:
|
||||
tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm))
|
||||
return
|
||||
if self.__check_equal(elm, expect_elm):
|
||||
tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm))
|
||||
return
|
||||
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
||||
# tdLog.info("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||
raise Exception("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||
|
||||
def checkNotEqual(self, elm, expect_elm):
|
||||
if elm != expect_elm:
|
||||
tdLog.info("sql:%s, elm:%s != expect_elm:%s" % (self.sql, elm, expect_elm))
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
||||
tdLog.info("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args)
|
||||
raise Exception
|
||||
|
||||
def get_times(self, time_str, precision="ms"):
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
if time_str[-1] not in TAOS_TIME_INIT:
|
||||
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: {time_str} not a standard taos time init")
|
||||
if precision not in TAOS_PRECISION:
|
||||
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: {precision} not a standard taos time precision")
|
||||
|
||||
if time_str[-1] == TAOS_TIME_INIT[0]:
|
||||
times = int(time_str[:-1]) * TIME_NS
|
||||
if time_str[-1] == TAOS_TIME_INIT[1]:
|
||||
times = int(time_str[:-1]) * TIME_US
|
||||
if time_str[-1] == TAOS_TIME_INIT[2]:
|
||||
times = int(time_str[:-1]) * TIME_MS
|
||||
if time_str[-1] == TAOS_TIME_INIT[3]:
|
||||
times = int(time_str[:-1]) * TIME_S
|
||||
if time_str[-1] == TAOS_TIME_INIT[4]:
|
||||
times = int(time_str[:-1]) * TIME_M
|
||||
if time_str[-1] == TAOS_TIME_INIT[5]:
|
||||
times = int(time_str[:-1]) * TIME_H
|
||||
if time_str[-1] == TAOS_TIME_INIT[6]:
|
||||
times = int(time_str[:-1]) * TIME_D
|
||||
if time_str[-1] == TAOS_TIME_INIT[7]:
|
||||
times = int(time_str[:-1]) * TIME_W
|
||||
if time_str[-1] == TAOS_TIME_INIT[8]:
|
||||
times = int(time_str[:-1]) * TIME_N
|
||||
if time_str[-1] == TAOS_TIME_INIT[9]:
|
||||
times = int(time_str[:-1]) * TIME_Y
|
||||
|
||||
if precision == "ms":
|
||||
return int(times)
|
||||
elif precision == "us":
|
||||
return int(times*1000)
|
||||
elif precision == "ns":
|
||||
return int(times*1000*1000)
|
||||
|
||||
def get_type(self, col):
|
||||
if self.cursor.istype(col, "BOOL"):
|
||||
return "BOOL"
|
||||
if self.cursor.istype(col, "INT"):
|
||||
return "INT"
|
||||
if self.cursor.istype(col, "BIGINT"):
|
||||
return "BIGINT"
|
||||
if self.cursor.istype(col, "TINYINT"):
|
||||
return "TINYINT"
|
||||
if self.cursor.istype(col, "SMALLINT"):
|
||||
return "SMALLINT"
|
||||
if self.cursor.istype(col, "FLOAT"):
|
||||
return "FLOAT"
|
||||
if self.cursor.istype(col, "DOUBLE"):
|
||||
return "DOUBLE"
|
||||
if self.cursor.istype(col, "BINARY"):
|
||||
return "BINARY"
|
||||
if self.cursor.istype(col, "NCHAR"):
|
||||
return "NCHAR"
|
||||
if self.cursor.istype(col, "TIMESTAMP"):
|
||||
return "TIMESTAMP"
|
||||
if self.cursor.istype(col, "JSON"):
|
||||
return "JSON"
|
||||
if self.cursor.istype(col, "TINYINT UNSIGNED"):
|
||||
return "TINYINT UNSIGNED"
|
||||
if self.cursor.istype(col, "SMALLINT UNSIGNED"):
|
||||
return "SMALLINT UNSIGNED"
|
||||
if self.cursor.istype(col, "INT UNSIGNED"):
|
||||
return "INT UNSIGNED"
|
||||
if self.cursor.istype(col, "BIGINT UNSIGNED"):
|
||||
return "BIGINT UNSIGNED"
|
||||
|
||||
def taosdStatus(self, state):
|
||||
tdLog.sleep(5)
|
||||
pstate = 0
|
||||
for i in range(30):
|
||||
pstate = 0
|
||||
pl = pspids()
|
||||
for pid in pl:
|
||||
try:
|
||||
if psProcess(pid).name() == 'taosd':
|
||||
print('have already started')
|
||||
pstate = 1
|
||||
break
|
||||
except psNoSuchProcess:
|
||||
pass
|
||||
if pstate == state :break
|
||||
if state or pstate:
|
||||
tdLog.sleep(1)
|
||||
continue
|
||||
pstate = 0
|
||||
break
|
||||
|
||||
args=(pstate,state)
|
||||
if pstate == state:
|
||||
tdLog.info("taosd state is %d == expect:%d" %args)
|
||||
else:
|
||||
tdLog.exit("taosd state is %d != expect:%d" %args)
|
||||
pass
|
||||
|
||||
def haveFile(self, dir, state):
|
||||
if os.path.exists(dir) and os.path.isdir(dir):
|
||||
if not os.listdir(dir):
|
||||
if state :
|
||||
tdLog.exit("dir: %s is empty, expect: not empty" %dir)
|
||||
else:
|
||||
tdLog.info("dir: %s is empty, expect: empty" %dir)
|
||||
else:
|
||||
if state :
|
||||
tdLog.info("dir: %s is not empty, expect: not empty" %dir)
|
||||
else:
|
||||
tdLog.exit("dir: %s is not empty, expect: empty" %dir)
|
||||
else:
|
||||
tdLog.exit("dir: %s doesn't exist" %dir)
|
||||
def createDir(self, dir):
|
||||
if os.path.exists(dir):
|
||||
shrmtree(dir)
|
||||
tdLog.info("dir: %s is removed" %dir)
|
||||
os.makedirs( dir, 755 )
|
||||
tdLog.info("dir: %s is created" %dir)
|
||||
pass
|
||||
|
||||
tdSql = TDSql()
|
|
@ -0,0 +1,70 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from sql import tdSql
|
||||
|
||||
class TDSetSql:
|
||||
def init(self, conn, logSql):
|
||||
|
||||
self.stbname = 'stb'
|
||||
|
||||
def set_create_normaltable_sql(self, ntbname='ntb',
|
||||
column_dict={'ts':'timestamp','col1':'tinyint','col2':'smallint','col3':'int','col4':'bigint','col5': 'unsigned int','col6': 'unsigned tinyint','col7': 'unsigned smallint',
|
||||
'col8': 'unsigned int','col9': 'unsigned bigint','col10': 'float','col11': 'double','col12': 'bool','col13': 'binary(20)','col14': 'nchar(20)'}):
|
||||
column_sql = ''
|
||||
for k, v in column_dict.items():
|
||||
column_sql += f"{k} {v},"
|
||||
create_ntb_sql = f'create table {ntbname} ({column_sql[:-1]})'
|
||||
return create_ntb_sql
|
||||
|
||||
def set_create_stable_sql(self,stbname='stb',
|
||||
column_dict={'ts':'timestamp','col1':'tinyint','col2':'smallint','col3':'int','col4':'bigint','col5': 'unsigned int','col6': 'unsigned tinyint','col7': 'unsigned smallint',
|
||||
'col8': 'unsigned int','col9': 'unsigned bigint','col10': 'float','col11': 'double','col12': 'bool','col13': 'binary(20)','col14': 'nchar(20)'},
|
||||
tag_dict={'ts_tag':'timestamp','t1':'tinyint','t2':'smallint','t3':'int','t4':'bigint','t5': 'unsigned int','t6': 'unsigned tinyint','t7': 'unsigned smallint',
|
||||
't8': 'unsigned int','t9': 'unsigned bigint','t10': 'float','t11': 'double','t12': 'bool','t13': 'binary(20)','t14': 'nchar(20)'}):
|
||||
column_sql = ''
|
||||
tag_sql = ''
|
||||
for k,v in column_dict.items():
|
||||
column_sql += f"{k} {v},"
|
||||
for k,v in tag_dict.items():
|
||||
tag_sql += f"{k} {v},"
|
||||
create_stb_sql = f'create table {stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})'
|
||||
return create_stb_sql
|
||||
|
||||
def set_insertsql(self,column_dict,tbname,binary_str=None,nchar_str=None):
|
||||
sql = ''
|
||||
for k, v in column_dict.items():
|
||||
if v.lower() == 'timestamp' or v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or \
|
||||
v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned' or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned' or v.lower() == 'bool':
|
||||
sql += '%d,'
|
||||
elif v.lower() == 'float' or v.lower() == 'double':
|
||||
sql += '%f,'
|
||||
elif 'binary' in v.lower():
|
||||
sql += f'"{binary_str}%d",'
|
||||
elif 'nchar' in v.lower():
|
||||
sql += f'"{nchar_str}%d",'
|
||||
return (f'insert into {tbname} values({sql[:-1]})')
|
||||
|
||||
def insert_values(self,column_dict,i,insert_sql,insert_list,ts):
|
||||
for k, v in column_dict.items():
|
||||
if v.lower() in[ 'tinyint' , 'smallint' , 'int', 'bigint' , 'tinyint unsigned' , 'smallint unsigned' , 'int unsigned' , 'bigint unsigned'] or\
|
||||
'binary' in v.lower() or 'nchar' in v.lower():
|
||||
insert_list.append(0 + i)
|
||||
elif v.lower() == 'float' or v.lower() == 'double':
|
||||
insert_list.append(0.1 + i)
|
||||
elif v.lower() == 'bool':
|
||||
insert_list.append(i % 2)
|
||||
elif v.lower() == 'timestamp':
|
||||
insert_list.append(ts + i)
|
||||
tdSql.execute(insert_sql%(tuple(insert_list)))
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2020 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import datetime
|
||||
from frame.log import *
|
||||
|
||||
class TDSub:
|
||||
def __init__(self):
|
||||
self.consumedRows = 0
|
||||
self.consumedCols = 0
|
||||
|
||||
def init(self, sub):
|
||||
self.sub = sub
|
||||
|
||||
def close(self, keepProgress):
|
||||
self.sub.close(keepProgress)
|
||||
|
||||
def consume(self):
|
||||
self.result = self.sub.consume()
|
||||
self.result.fetch_all()
|
||||
self.consumedRows = self.result.row_count
|
||||
self.consumedCols = self.result.field_count
|
||||
return self.consumedRows
|
||||
|
||||
def checkRows(self, expectRows):
|
||||
if self.consumedRows != expectRows:
|
||||
tdLog.exit("consumed rows:%d != expect:%d" % (self.consumedRows, expectRows))
|
||||
tdLog.info("consumed rows:%d == expect:%d" % (self.consumedRows, expectRows))
|
||||
|
||||
|
||||
tdSub = TDSub()
|
|
@ -0,0 +1,261 @@
|
|||
import requests
|
||||
from fabric2 import Connection
|
||||
from frame.log import *
|
||||
from frame.common import *
|
||||
|
||||
|
||||
class TAdapter:
|
||||
def __init__(self):
|
||||
self.running = 0
|
||||
self.deployed = 0
|
||||
self.remoteIP = ""
|
||||
self.taosadapter_cfg_dict = {
|
||||
"debug" : True,
|
||||
"taosConfigDir" : "",
|
||||
"port" : 6041,
|
||||
"logLevel" : "error",
|
||||
"cors" : {
|
||||
"allowAllOrigins" : True,
|
||||
},
|
||||
"pool" : {
|
||||
"maxConnect" : 4000,
|
||||
"maxIdle" : 4000,
|
||||
"idleTimeout" : "1h"
|
||||
},
|
||||
"ssl" : {
|
||||
"enable" : False,
|
||||
"certFile" : "",
|
||||
"keyFile" : "",
|
||||
},
|
||||
"log" : {
|
||||
"path" : "",
|
||||
"rotationCount" : 30,
|
||||
"rotationTime" : "24h",
|
||||
"rotationSize" : "1GB",
|
||||
"enableRecordHttpSql" : True,
|
||||
"sqlRotationCount" : 2,
|
||||
"sqlRotationTime" : "24h",
|
||||
"sqlRotationSize" : "1GB",
|
||||
},
|
||||
"monitor" : {
|
||||
"collectDuration" : "3s",
|
||||
"incgroup" : False,
|
||||
"pauseQueryMemoryThreshold" : 70,
|
||||
"pauseAllMemoryThreshold" : 80,
|
||||
"identity" : "",
|
||||
"writeToTD" : True,
|
||||
"user" : "root",
|
||||
"password" : "taosdata",
|
||||
"writeInterval" : "30s"
|
||||
},
|
||||
"opentsdb" : {
|
||||
"enable" : True
|
||||
},
|
||||
"influxdb" : {
|
||||
"enable" : True
|
||||
},
|
||||
"statsd" : {
|
||||
"enable" : True
|
||||
},
|
||||
"collectd" : {
|
||||
"enable" : True
|
||||
},
|
||||
"opentsdb_telnet" : {
|
||||
"enable" : True
|
||||
},
|
||||
"node_exporter" : {
|
||||
"enable" : True
|
||||
},
|
||||
"prometheus" : {
|
||||
"enable" : True
|
||||
},
|
||||
}
|
||||
# TODO: add taosadapter env:
|
||||
# 1. init cfg.toml.dict :OK
|
||||
# 2. dump dict to toml : OK
|
||||
# 3. update cfg.toml.dict :OK
|
||||
# 4. check adapter exists : OK
|
||||
# 5. deploy adapter cfg : OK
|
||||
# 6. adapter start : OK
|
||||
# 7. adapter stop
|
||||
|
||||
def init(self, path, remoteIP=""):
|
||||
self.path = path
|
||||
self.remoteIP = remoteIP
|
||||
binPath = get_path() + "/../../../"
|
||||
binPath = os.path.realpath(binPath)
|
||||
|
||||
if path == "":
|
||||
self.path = os.path.abspath(binPath + "../../")
|
||||
else:
|
||||
self.path = os.path.realpath(path)
|
||||
|
||||
if self.remoteIP:
|
||||
try:
|
||||
self.config = eval(remoteIP)
|
||||
self.remote_conn = Connection(host=self.config["host"], port=self.config["port"], user=self.config["user"], connect_kwargs={'password':self.config["password"]})
|
||||
except Exception as e:
|
||||
tdLog.notice(e)
|
||||
|
||||
def update_cfg(self, update_dict :dict):
|
||||
if not isinstance(update_dict, dict):
|
||||
return
|
||||
if "log" in update_dict and "path" in update_dict["log"]:
|
||||
del update_dict["log"]["path"]
|
||||
for key, value in update_dict.items():
|
||||
if key in ["cors", "pool", "ssl", "log", "monitor", "opentsdb", "influxdb", "statsd", "collectd", "opentsdb_telnet", "node_exporter", "prometheus"]:
|
||||
if isinstance(value, dict):
|
||||
for k, v in value.items():
|
||||
self.taosadapter_cfg_dict[key][k] = v
|
||||
else:
|
||||
self.taosadapter_cfg_dict[key] = value
|
||||
|
||||
def check_adapter(self):
|
||||
if get_path(tool="taosadapter"):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def remote_exec(self, updateCfgDict, execCmd):
|
||||
remoteCfgDict = copy.deepcopy(updateCfgDict)
|
||||
if "log" in remoteCfgDict and "path" in remoteCfgDict["log"]:
|
||||
del remoteCfgDict["log"]["path"]
|
||||
|
||||
remoteCfgDictStr = base64.b64encode(toml.dumps(remoteCfgDict).encode()).decode()
|
||||
execCmdStr = base64.b64encode(execCmd.encode()).decode()
|
||||
with self.remote_conn.cd((self.config["path"]+sys.path[0].replace(self.path, '')).replace('\\','/')):
|
||||
self.remote_conn.run(f"python3 ./test.py -D {remoteCfgDictStr} -e {execCmdStr}" )
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = f"echo {option} = {value} >> {self.cfg_path}"
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def deploy(self, *update_cfg_dict):
|
||||
self.log_dir = os.path.join(self.path,"sim","dnode1","log")
|
||||
self.cfg_dir = os.path.join(self.path,"sim","dnode1","cfg")
|
||||
self.cfg_path = os.path.join(self.cfg_dir,"taosadapter.toml")
|
||||
|
||||
cmd = f"touch {self.cfg_path}"
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
self.taosadapter_cfg_dict["log"]["path"] = self.log_dir
|
||||
if bool(update_cfg_dict):
|
||||
self.update_cfg(update_dict=update_cfg_dict)
|
||||
|
||||
if (self.remoteIP == ""):
|
||||
dict2toml(self.taosadapter_cfg_dict, self.cfg_path)
|
||||
else:
|
||||
self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.deploy(update_cfg_dict)")
|
||||
|
||||
self.deployed = 1
|
||||
|
||||
tdLog.debug(f"taosadapter is deployed and configured by {self.cfg_path}")
|
||||
|
||||
def start(self):
|
||||
bin_path = get_path(tool="taosadapter")
|
||||
|
||||
if (bin_path == ""):
|
||||
tdLog.exit("taosadapter not found!")
|
||||
else:
|
||||
tdLog.info(f"taosadapter found: {bin_path}")
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
cmd = f"mintty -h never {bin_path} -c {self.cfg_path}"
|
||||
else:
|
||||
cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null & "
|
||||
|
||||
if self.remoteIP:
|
||||
self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()")
|
||||
self.running = 1
|
||||
else:
|
||||
os.system(f"rm -rf {self.log_dir}{os.sep}taosadapter*")
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
tdLog.debug(f"taosadapter is running with {cmd} " )
|
||||
|
||||
time.sleep(0.1)
|
||||
|
||||
taosadapter_port = self.taosadapter_cfg_dict["port"]
|
||||
for i in range(5):
|
||||
ip = 'localhost'
|
||||
if self.remoteIP != "":
|
||||
ip = self.remoteIP
|
||||
url = f'http://{ip}:{taosadapter_port}/-/ping'
|
||||
try:
|
||||
r = requests.get(url)
|
||||
if r.status_code == 200:
|
||||
tdLog.info(f"the taosadapter has been started, using port:{taosadapter_port}")
|
||||
break
|
||||
except Exception:
|
||||
tdLog.info(f"the taosadapter do not started!!!")
|
||||
time.sleep(1)
|
||||
|
||||
def start_taosadapter(self):
|
||||
"""
|
||||
use this method, must deploy taosadapter
|
||||
"""
|
||||
bin_path = get_path(tool="taosadapter")
|
||||
|
||||
if (bin_path == ""):
|
||||
tdLog.exit("taosadapter not found!")
|
||||
else:
|
||||
tdLog.info(f"taosadapter found: {bin_path}")
|
||||
|
||||
if self.deployed == 0:
|
||||
tdLog.exit("taosadapter is not deployed")
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
cmd = f"mintty -h never {bin_path} -c {self.cfg_dir}"
|
||||
else:
|
||||
cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null & "
|
||||
|
||||
if self.remoteIP:
|
||||
self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()")
|
||||
self.running = 1
|
||||
else:
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
tdLog.debug(f"taosadapter is running with {cmd} " )
|
||||
|
||||
time.sleep(0.1)
|
||||
|
||||
def stop(self, force_kill=False):
|
||||
signal = "-9" if force_kill else "-15"
|
||||
if self.remoteIP:
|
||||
self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.running=1\ntAdapter.stop()")
|
||||
tdLog.info("stop taosadapter")
|
||||
return
|
||||
toBeKilled = "taosadapter"
|
||||
if platform.system().lower() == 'windows':
|
||||
psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
killCmd = "kill %s %s > nul 2>&1" % (signal, processID)
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
self.running = 0
|
||||
tdLog.debug(f"taosadapter is stopped by kill {signal}")
|
||||
|
||||
else:
|
||||
if self.running != 0:
|
||||
psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
killCmd = "kill %s %s > /dev/null 2>&1" % (signal, processID)
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
port = 6041
|
||||
fuserCmd = f"fuser -k -n tcp {port} > /dev/null"
|
||||
os.system(fuserCmd)
|
||||
self.running = 0
|
||||
tdLog.debug(f"taosadapter is stopped by kill {signal}")
|
||||
|
||||
|
||||
|
||||
tAdapter = TAdapter()
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue