other: merge 3.0

This commit is contained in:
Haojun Liao 2023-07-25 18:17:11 +08:00
commit 1c1bf63e1f
34 changed files with 242 additions and 175 deletions

View File

@ -214,19 +214,6 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|dnode\_ep|NCHAR|TAG|dnode endpoint| |dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id| |cluster\_id|NCHAR|TAG|cluster id|
### logs table
`logs` table contains login information records.
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|level|VARCHAR||log level|
|content|NCHAR||log content|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### log\_summary table ### log\_summary table
`log_summary` table contains log summary information records. `log_summary` table contains log summary information records.

View File

@ -1007,13 +1007,12 @@ consumer.close()
### Other sample programs ### Other sample programs
| Example program links | Example program content | | Example program links | Example program content |
| ------------------------------------------------------------------------------------------------------------- | ------------------- ---- | |-----------------------|-------------------------|
| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding, | [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding, bind multiple rows at once |
bind multiple rows at once | | [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | parameter binding, bind one row at once |
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | bind_row.py
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing | | [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing |
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags | | [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags |
| [tmq.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq.py) | TMQ subscription | | [tmq_consumer.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq_consumer.py) | TMQ subscription |
## Other notes ## Other notes

View File

@ -210,19 +210,6 @@ TDinsight dashboard 数据来源于 log 库存放监控数据的默认db
|dnode\_ep|NCHAR|TAG|dnode endpoint| |dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id| |cluster\_id|NCHAR|TAG|cluster id|
### logs 表
`logs` 表记录登录信息。
|field|type|is\_tag|comment|
|:----|:---|:-----|:------|
|ts|TIMESTAMP||timestamp|
|level|VARCHAR||log level|
|content|NCHAR||log content长度不超过1024字节|
|dnode\_id|INT|TAG|dnode id|
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|cluster\_id|NCHAR|TAG|cluster id|
### log\_summary 表 ### log\_summary 表
`log_summary` 记录日志统计信息。 `log_summary` 记录日志统计信息。

View File

@ -85,8 +85,14 @@ extern int64_t tsVndCommitMaxIntervalMs;
extern int64_t tsMndSdbWriteDelta; extern int64_t tsMndSdbWriteDelta;
extern int64_t tsMndLogRetention; extern int64_t tsMndLogRetention;
extern int8_t tsGrant; extern int8_t tsGrant;
extern int32_t tsMndGrantMode;
extern bool tsMndSkipGrant; extern bool tsMndSkipGrant;
// dnode
extern int64_t tsDndStart;
extern int64_t tsDndStartOsUptime;
extern int64_t tsDndUpTime;
// monitor // monitor
extern bool tsEnableMonitor; extern bool tsEnableMonitor;
extern int32_t tsMonitorInterval; extern int32_t tsMonitorInterval;

View File

@ -589,8 +589,9 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs
int32_t streamLaunchFillHistoryTask(SStreamTask* pTask); int32_t streamLaunchFillHistoryTask(SStreamTask* pTask);
int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask); int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask);
int32_t streamStartRecoverTask(SStreamTask* pTask, int8_t igUntreated); int32_t streamStartRecoverTask(SStreamTask* pTask, int8_t igUntreated);
void streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask); //void streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask);
int32_t streamTaskGetInputQItems(const SStreamTask* pTask); int32_t streamTaskGetInputQItems(const SStreamTask* pTask);
void streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer);
bool streamTaskRecoverScanStep1Finished(SStreamTask* pTask); bool streamTaskRecoverScanStep1Finished(SStreamTask* pTask);
bool streamTaskRecoverScanStep2Finished(SStreamTask* pTask); bool streamTaskRecoverScanStep2Finished(SStreamTask* pTask);

View File

@ -53,6 +53,7 @@ extern "C" {
#else #else
#include <argp.h> #include <argp.h>
#include <sys/prctl.h> #include <sys/prctl.h>
#include <sys/sysinfo.h>
#if defined(_TD_X86_) #if defined(_TD_X86_)
#include <cpuid.h> #include <cpuid.h>
#endif #endif

View File

@ -35,6 +35,7 @@ typedef struct {
bool taosCheckSystemIsLittleEnd(); bool taosCheckSystemIsLittleEnd();
void taosGetSystemInfo(); void taosGetSystemInfo();
int64_t taosGetOsUptime();
int32_t taosGetEmail(char *email, int32_t maxLen); int32_t taosGetEmail(char *email, int32_t maxLen);
int32_t taosGetOsReleaseName(char *releaseName, char* sName, char* ver, int32_t maxLen); int32_t taosGetOsReleaseName(char *releaseName, char* sName, char* ver, int32_t maxLen);
int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores); int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores);

View File

@ -77,8 +77,14 @@ int64_t tsVndCommitMaxIntervalMs = 600 * 1000;
int64_t tsMndSdbWriteDelta = 200; int64_t tsMndSdbWriteDelta = 200;
int64_t tsMndLogRetention = 2000; int64_t tsMndLogRetention = 2000;
int8_t tsGrant = 1; int8_t tsGrant = 1;
int32_t tsMndGrantMode = 0;
bool tsMndSkipGrant = false; bool tsMndSkipGrant = false;
// dnode
int64_t tsDndStart = 0;
int64_t tsDndStartOsUptime = 0;
int64_t tsDndUpTime = 0;
// monitor // monitor
bool tsEnableMonitor = true; bool tsEnableMonitor = true;
int32_t tsMonitorInterval = 30; int32_t tsMonitorInterval = 30;
@ -506,6 +512,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt64(pCfg, "mndLogRetention", tsMndLogRetention, 500, 10000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt64(pCfg, "mndLogRetention", tsMndLogRetention, 500, 10000, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "grantMode", tsMndGrantMode, 0, 10000, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddBool(pCfg, "skipGrant", tsMndSkipGrant, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "skipGrant", tsMndSkipGrant, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddBool(pCfg, "monitor", tsEnableMonitor, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "monitor", tsEnableMonitor, CFG_SCOPE_SERVER) != 0) return -1;
@ -915,6 +922,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsMndSdbWriteDelta = cfgGetItem(pCfg, "mndSdbWriteDelta")->i64; tsMndSdbWriteDelta = cfgGetItem(pCfg, "mndSdbWriteDelta")->i64;
tsMndLogRetention = cfgGetItem(pCfg, "mndLogRetention")->i64; tsMndLogRetention = cfgGetItem(pCfg, "mndLogRetention")->i64;
tsMndSkipGrant = cfgGetItem(pCfg, "skipGrant")->bval; tsMndSkipGrant = cfgGetItem(pCfg, "skipGrant")->bval;
tsMndGrantMode = cfgGetItem(pCfg, "grantMode")->i32;
tsStartUdfd = cfgGetItem(pCfg, "udf")->bval; tsStartUdfd = cfgGetItem(pCfg, "udf")->bval;
tstrncpy(tsUdfdResFuncs, cfgGetItem(pCfg, "udfdResFuncs")->str, sizeof(tsUdfdResFuncs)); tstrncpy(tsUdfdResFuncs, cfgGetItem(pCfg, "udfdResFuncs")->str, sizeof(tsUdfdResFuncs));

View File

@ -373,6 +373,8 @@ int mainWindows(int argc, char **argv) {
dInfo("start to init service"); dInfo("start to init service");
dmSetSignalHandle(); dmSetSignalHandle();
tsDndStart = taosGetTimestampMs();
tsDndStartOsUptime = taosGetOsUptime();
int32_t code = dmRun(); int32_t code = dmRun();
dInfo("shutting down the service"); dInfo("shutting down the service");

View File

@ -24,11 +24,15 @@ static void *dmStatusThreadFp(void *param) {
const static int16_t TRIM_FREQ = 30; const static int16_t TRIM_FREQ = 30;
int32_t trimCount = 0; int32_t trimCount = 0;
int32_t upTimeCount = 0;
int64_t upTime = 0;
while (1) { while (1) {
taosMsleep(200); taosMsleep(200);
if (pMgmt->pData->dropped || pMgmt->pData->stopped) break; if (pMgmt->pData->dropped || pMgmt->pData->stopped) break;
int64_t curTime = taosGetTimestampMs(); int64_t curTime = taosGetTimestampMs();
if (curTime < lastTime) lastTime = curTime;
float interval = (curTime - lastTime) / 1000.0f; float interval = (curTime - lastTime) / 1000.0f;
if (interval >= tsStatusInterval) { if (interval >= tsStatusInterval) {
dmSendStatusReq(pMgmt); dmSendStatusReq(pMgmt);
@ -38,6 +42,11 @@ static void *dmStatusThreadFp(void *param) {
if (trimCount == 0) { if (trimCount == 0) {
taosMemoryTrim(0); taosMemoryTrim(0);
} }
if ((upTimeCount = ((upTimeCount + 1) & 63)) == 0) {
upTime = taosGetOsUptime() - tsDndStartOsUptime;
tsDndUpTime = TMAX(tsDndUpTime, upTime);
}
} }
} }
@ -54,6 +63,7 @@ static void *dmMonitorThreadFp(void *param) {
if (pMgmt->pData->dropped || pMgmt->pData->stopped) break; if (pMgmt->pData->dropped || pMgmt->pData->stopped) break;
int64_t curTime = taosGetTimestampMs(); int64_t curTime = taosGetTimestampMs();
if (curTime < lastTime) lastTime = curTime;
float interval = (curTime - lastTime) / 1000.0f; float interval = (curTime - lastTime) / 1000.0f;
if (interval >= tsMonitorInterval) { if (interval >= tsMonitorInterval) {
(*pMgmt->sendMonitorReportFp)(); (*pMgmt->sendMonitorReportFp)();

View File

@ -290,6 +290,7 @@ int32_t dmInitClient(SDnode *pDnode) {
rpcInit.cfp = (RpcCfp)dmProcessRpcMsg; rpcInit.cfp = (RpcCfp)dmProcessRpcMsg;
rpcInit.sessions = 1024; rpcInit.sessions = 1024;
rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.user = TSDB_DEFAULT_USER;
rpcInit.idleTime = tsShellActivityTimer * 1000; rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.parent = pDnode; rpcInit.parent = pDnode;
rpcInit.rfp = rpcRfp; rpcInit.rfp = rpcRfp;

View File

@ -27,7 +27,7 @@ void mndCleanupCluster(SMnode *pMnode);
int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len); int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len);
int64_t mndGetClusterId(SMnode *pMnode); int64_t mndGetClusterId(SMnode *pMnode);
int64_t mndGetClusterCreateTime(SMnode *pMnode); int64_t mndGetClusterCreateTime(SMnode *pMnode);
float mndGetClusterUpTime(SMnode *pMnode); int64_t mndGetClusterUpTime(SMnode *pMnode);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -123,7 +123,7 @@ static int32_t mndGetClusterUpTimeImp(SClusterObj *pCluster) {
#endif #endif
} }
float mndGetClusterUpTime(SMnode *pMnode) { int64_t mndGetClusterUpTime(SMnode *pMnode) {
int64_t upTime = 0; int64_t upTime = 0;
void *pIter = NULL; void *pIter = NULL;
SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter); SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter);
@ -132,7 +132,7 @@ float mndGetClusterUpTime(SMnode *pMnode) {
mndReleaseCluster(pMnode, pCluster, pIter); mndReleaseCluster(pMnode, pCluster, pIter);
} }
return upTime / 86400.0f; return upTime;
} }
static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster) { static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster) {

View File

@ -655,6 +655,7 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg
STrans *pTrans = NULL; STrans *pTrans = NULL;
SDnodeObj *pDnode = NULL; SDnodeObj *pDnode = NULL;
bool cfgAll = pCfgReq->dnodeId == -1; bool cfgAll = pCfgReq->dnodeId == -1;
int32_t iter = 0;
SSdb *pSdb = pMnode->pSdb; SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL; void *pIter = NULL;
@ -662,6 +663,7 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg
if (cfgAll) { if (cfgAll) {
pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pDnode); pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pDnode);
if (pIter == NULL) break; if (pIter == NULL) break;
++iter;
} else if (!(pDnode = mndAcquireDnode(pMnode, pCfgReq->dnodeId))) { } else if (!(pDnode = mndAcquireDnode(pMnode, pCfgReq->dnodeId))) {
goto _OVER; goto _OVER;
} }
@ -699,7 +701,7 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg
} }
if (pTrans && mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; if (pTrans && mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
tsGrantHBInterval = TMIN(TMAX(5, iter / 2), 30);
terrno = 0; terrno = 0;
_OVER: _OVER:
@ -863,7 +865,7 @@ static int32_t mndProcessCreateDnodeReq(SRpcMsg *pReq) {
code = mndCreateDnode(pMnode, pReq, &createReq); code = mndCreateDnode(pMnode, pReq, &createReq);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS; if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
tsGrantHBInterval = 5;
_OVER: _OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
mError("dnode:%s:%d, failed to create since %s", createReq.fqdn, createReq.port, terrstr()); mError("dnode:%s:%d, failed to create since %s", createReq.fqdn, createReq.port, terrstr());

View File

@ -800,7 +800,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
if (pObj->id == pMnode->selfDnodeId) { if (pObj->id == pMnode->selfDnodeId) {
pClusterInfo->first_ep_dnode_id = pObj->id; pClusterInfo->first_ep_dnode_id = pObj->id;
tstrncpy(pClusterInfo->first_ep, pObj->pDnode->ep, sizeof(pClusterInfo->first_ep)); tstrncpy(pClusterInfo->first_ep, pObj->pDnode->ep, sizeof(pClusterInfo->first_ep));
pClusterInfo->master_uptime = mndGetClusterUpTime(pMnode); pClusterInfo->master_uptime = (float)mndGetClusterUpTime(pMnode) / 86400.0f;
// pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f); // pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f);
tstrncpy(desc.role, syncStr(TAOS_SYNC_STATE_LEADER), sizeof(desc.role)); tstrncpy(desc.role, syncStr(TAOS_SYNC_STATE_LEADER), sizeof(desc.role));
} else { } else {

View File

@ -877,6 +877,7 @@ typedef struct SCacheRowsReader {
SSHashObj *pTableMap; SSHashObj *pTableMap;
SArray *pLDataIterArray; SArray *pLDataIterArray;
struct SDataFileReader *pFileReader; struct SDataFileReader *pFileReader;
STFileSet *pCurFileSet;
STsdbReadSnap *pReadSnap; STsdbReadSnap *pReadSnap;
char *idstr; char *idstr;
int64_t lastTs; int64_t lastTs;

View File

@ -1107,16 +1107,14 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
int64_t st = taosGetTimestampMs(); int64_t st = taosGetTimestampMs();
// we have to continue retrying to successfully execute the scan history task. // we have to continue retrying to successfully execute the scan history task.
while (1) {
int8_t schedStatus = atomic_val_compare_exchange_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE, int8_t schedStatus = atomic_val_compare_exchange_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE,
TASK_SCHED_STATUS__WAITING); TASK_SCHED_STATUS__WAITING);
if (schedStatus == TASK_SCHED_STATUS__INACTIVE) { if (schedStatus != TASK_SCHED_STATUS__INACTIVE) {
break; tqError(
} "s-task:%s failed to start scan-history in first stream time window since already started, unexpected "
"sched-status:%d",
tqError("s-task:%s failed to start scan history in current time window, unexpected sched-status:%d, retry in 100ms",
id, schedStatus); id, schedStatus);
taosMsleep(100); return 0;
} }
ASSERT(pTask->status.pauseAllowed == false); ASSERT(pTask->status.pauseAllowed == false);
@ -1176,16 +1174,15 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
taosMsleep(100); taosMsleep(100);
} }
streamTaskHalt(pTask);
// now we can stop the stream task execution // now we can stop the stream task execution
// todo upgrade the statu to be HALT from PAUSE or NORMAL streamTaskHalt(pStreamTask);
pStreamTask->status.taskStatus = TASK_STATUS__HALT; tqDebug("s-task:%s level:%d is halt by fill-history task:%s", pStreamTask->id.idStr, pStreamTask->info.taskLevel,
tqDebug("s-task:%s level:%d status is set to halt by fill-history task:%s", pStreamTask->id.idStr, id);
pStreamTask->info.taskLevel, id);
// if it's an source task, extract the last version in wal. // if it's an source task, extract the last version in wal.
streamHistoryTaskSetVerRangeStep2(pTask); pRange = &pTask->dataRange.range;
int64_t latestVer = walReaderGetCurrentVer(pStreamTask->exec.pWalReader);
streamHistoryTaskSetVerRangeStep2(pTask, latestVer);
} }
if (!streamTaskRecoverScanStep1Finished(pTask)) { if (!streamTaskRecoverScanStep1Finished(pTask)) {

View File

@ -1949,7 +1949,8 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
STFileObj **pFileObj = state->pFileSet->farr; STFileObj **pFileObj = state->pFileSet->farr;
if (pFileObj[0] != NULL || pFileObj[3] != NULL) { if (pFileObj[0] != NULL || pFileObj[3] != NULL) {
SDataFileReaderConfig conf = {.tsdb = state->pTsdb, .szPage = state->pTsdb->pVnode->config.szPage}; if (state->pFileSet != state->pr->pCurFileSet) {
SDataFileReaderConfig conf = {.tsdb = state->pTsdb, .szPage = state->pTsdb->pVnode->config.tsdbPageSize};
const char *filesName[4] = {0}; const char *filesName[4] = {0};
if (pFileObj[0] != NULL) { if (pFileObj[0] != NULL) {
conf.files[0].file = *pFileObj[0]->f; conf.files[0].file = *pFileObj[0]->f;
@ -1978,6 +1979,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
loadDataTomb(state->pr, state->pr->pFileReader); loadDataTomb(state->pr, state->pr->pFileReader);
state->pr->pCurFileSet = state->pFileSet;
}
if (!state->pIndexList) { if (!state->pIndexList) {
state->pIndexList = taosArrayInit(1, sizeof(SBrinBlk)); state->pIndexList = taosArrayInit(1, sizeof(SBrinBlk));
} else { } else {
@ -2053,7 +2057,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
} }
if (!state->pLastRow) { if (!state->pLastRow) {
if (state->pLastIter) {
lastIterClose(&state->pLastIter); lastIterClose(&state->pLastIter);
}
clearLastFileSet(state); clearLastFileSet(state);
state->state = SFSNEXTROW_FILESET; state->state = SFSNEXTROW_FILESET;
@ -2154,7 +2160,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
} }
if (!state->pLastRow) { if (!state->pLastRow) {
if (state->pLastIter) {
lastIterClose(&state->pLastIter); lastIterClose(&state->pLastIter);
}
*ppRow = &state->row; *ppRow = &state->row;
--state->iRow; --state->iRow;
@ -2214,19 +2222,6 @@ _err:
return code; return code;
} }
int32_t clearNextRowFromFS(void *iter) {
int32_t code = 0;
SFSNextRowIter *state = (SFSNextRowIter *)iter;
if (!state) {
return code;
}
clearLastFileSet(state);
return code;
}
typedef enum SMEMNEXTROWSTATES { typedef enum SMEMNEXTROWSTATES {
SMEMNEXTROW_ENTER, SMEMNEXTROW_ENTER,
SMEMNEXTROW_NEXT, SMEMNEXTROW_NEXT,
@ -2346,6 +2341,36 @@ typedef struct CacheNextRowIter {
STsdb *pTsdb; STsdb *pTsdb;
} CacheNextRowIter; } CacheNextRowIter;
int32_t clearNextRowFromFS(void *iter) {
int32_t code = 0;
SFSNextRowIter *state = (SFSNextRowIter *)iter;
if (!state) {
return code;
}
if (state->pLastIter) {
lastIterClose(&state->pLastIter);
}
if (state->pBlockData) {
tBlockDataDestroy(state->pBlockData);
state->pBlockData = NULL;
}
if (state->pTSRow) {
taosMemoryFree(state->pTSRow);
state->pTSRow = NULL;
}
if (state->pRowIter->pSkyline) {
taosArrayDestroy(state->pRowIter->pSkyline);
state->pRowIter->pSkyline = NULL;
}
return code;
}
static void clearLastFileSet(SFSNextRowIter *state) { static void clearLastFileSet(SFSNextRowIter *state) {
if (state->pLastIter) { if (state->pLastIter) {
lastIterClose(&state->pLastIter); lastIterClose(&state->pLastIter);
@ -2359,6 +2384,8 @@ static void clearLastFileSet(SFSNextRowIter *state) {
if (state->pr->pFileReader) { if (state->pr->pFileReader) {
tsdbDataFileReaderClose(&state->pr->pFileReader); tsdbDataFileReaderClose(&state->pr->pFileReader);
state->pr->pFileReader = NULL; state->pr->pFileReader = NULL;
state->pr->pCurFileSet = NULL;
} }
if (state->pTSRow) { if (state->pTSRow) {

View File

@ -189,6 +189,10 @@ static int32_t tsdbCommitTombData(SCommitter2 *committer) {
committer->ctx->maxKey = committer->ctx->maxKey + 1; committer->ctx->maxKey = committer->ctx->maxKey + 1;
} }
if (record->ekey > committer->ctx->maxKey) {
committer->ctx->nextKey = committer->ctx->maxKey;
}
record->skey = TMAX(record->skey, committer->ctx->minKey); record->skey = TMAX(record->skey, committer->ctx->minKey);
record->ekey = TMIN(record->ekey, committer->ctx->maxKey); record->ekey = TMIN(record->ekey, committer->ctx->maxKey);

View File

@ -112,7 +112,10 @@ static char* getFileNamePrefix(STsdb *pTsdb, SDiskID did, int32_t fid, uint64_t
p += titoa(TD_VID(pTsdb->pVnode), 10, p); p += titoa(TD_VID(pTsdb->pVnode), 10, p);
*(p++) = 'f'; *(p++) = 'f';
p += titoa(fid, 10, p); if (fid < 0) {
*(p++) = '-';
}
p += titoa((fid < 0) ? -fid : fid, 10, p);
memcpy(p, "ver", 3); memcpy(p, "ver", 3);
p += 3; p += 3;

View File

@ -794,7 +794,7 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf) {
// open stt file reader if not // open stt file reader if not
if (pSttFileReader == NULL) { if (pSttFileReader == NULL) {
SSttFileReaderConfig conf = {.tsdb = pConf->pTsdb, .szPage = pConf->pTsdb->pVnode->config.szPage}; SSttFileReaderConfig conf = {.tsdb = pConf->pTsdb, .szPage = pConf->pTsdb->pVnode->config.tsdbPageSize};
conf.file[0] = *pSttLevel->fobjArr->data[i]->f; conf.file[0] = *pSttLevel->fobjArr->data[i]->f;
code = tsdbSttFileReaderOpen(pSttLevel->fobjArr->data[i]->fname, &conf, &pSttFileReader); code = tsdbSttFileReaderOpen(pSttLevel->fobjArr->data[i]->fname, &conf, &pSttFileReader);

View File

@ -189,7 +189,7 @@ static int32_t filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader, bo
STFileObj** pFileObj = pReader->status.pCurrentFileset->farr; STFileObj** pFileObj = pReader->status.pCurrentFileset->farr;
if (pFileObj[0] != NULL || pFileObj[3] != NULL) { if (pFileObj[0] != NULL || pFileObj[3] != NULL) {
SDataFileReaderConfig conf = {.tsdb = pReader->pTsdb, .szPage = pReader->pTsdb->pVnode->config.szPage}; SDataFileReaderConfig conf = {.tsdb = pReader->pTsdb, .szPage = pReader->pTsdb->pVnode->config.tsdbPageSize};
const char* filesName[4] = {0}; const char* filesName[4] = {0};

View File

@ -749,7 +749,7 @@ int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo) {
int64_t size; int64_t size;
TdFilePtr pOutFD = NULL; TdFilePtr pOutFD = NULL;
TdFilePtr PInFD = NULL; TdFilePtr PInFD = NULL;
int32_t szPage = pTsdb->pVnode->config.szPage; int32_t szPage = pTsdb->pVnode->config.tsdbPageSize;
char fNameFrom[TSDB_FILENAME_LEN]; char fNameFrom[TSDB_FILENAME_LEN];
char fNameTo[TSDB_FILENAME_LEN]; char fNameTo[TSDB_FILENAME_LEN];

View File

@ -2881,15 +2881,23 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
int32_t tableStartIdx = pInfo->tableStartIndex; int32_t tableStartIdx = pInfo->tableStartIndex;
int32_t tableEndIdx = pInfo->tableEndIndex; int32_t tableEndIdx = pInfo->tableEndIndex;
bool hasLimit = pInfo->limitInfo.limit.limit != -1 || pInfo->limitInfo.limit.offset != -1;
int64_t mergeLimit = -1;
if (hasLimit) {
mergeLimit = pInfo->limitInfo.limit.limit + pInfo->limitInfo.limit.offset;
}
size_t szRow = blockDataGetRowSize(pInfo->pResBlock);
if (hasLimit) {
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_SINGLESOURCE_SORT, -1, -1,
NULL, pTaskInfo->id.str, mergeLimit, szRow+8, tsPQSortMemThreshold * 1024* 1024);
} else {
pInfo->sortBufSize = 2048 * pInfo->bufPageSize; pInfo->sortBufSize = 2048 * pInfo->bufPageSize;
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_BLOCK_TS_MERGE, pInfo->bufPageSize, numOfBufPage, pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_BLOCK_TS_MERGE, pInfo->bufPageSize, numOfBufPage,
pInfo->pSortInputBlock, pTaskInfo->id.str, 0, 0, 0); pInfo->pSortInputBlock, pTaskInfo->id.str, 0, 0, 0);
int64_t mergeLimit = -1;
if (pInfo->limitInfo.limit.limit != -1 || pInfo->limitInfo.limit.offset != -1) {
mergeLimit = pInfo->limitInfo.limit.limit + pInfo->limitInfo.limit.offset;
}
tsortSetMergeLimit(pInfo->pSortHandle, mergeLimit); tsortSetMergeLimit(pInfo->pSortHandle, mergeLimit);
}
tsortSetFetchRawDataFp(pInfo->pSortHandle, getTableDataBlockImpl, NULL, NULL); tsortSetFetchRawDataFp(pInfo->pSortHandle, getTableDataBlockImpl, NULL, NULL);
// one table has one data block // one table has one data block

View File

@ -54,13 +54,6 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode*
int32_t numOfCols = 0; int32_t numOfCols = 0;
pOperator->exprSupp.pExprInfo = createExprInfo(pSortNode->pExprs, NULL, &numOfCols); pOperator->exprSupp.pExprInfo = createExprInfo(pSortNode->pExprs, NULL, &numOfCols);
pOperator->exprSupp.numOfExprs = numOfCols; pOperator->exprSupp.numOfExprs = numOfCols;
calcSortOperMaxTupleLength(pInfo, pSortNode->pSortKeys);
pInfo->maxRows = -1;
if (pSortNode->node.pLimit) {
SLimitNode* pLimit = (SLimitNode*)pSortNode->node.pLimit;
if (pLimit->limit > 0) pInfo->maxRows = pLimit->limit;
}
int32_t numOfOutputCols = 0; int32_t numOfOutputCols = 0;
int32_t code = int32_t code =
extractColMatchInfo(pSortNode->pTargets, pDescNode, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID, &pInfo->matchInfo); extractColMatchInfo(pSortNode->pTargets, pDescNode, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID, &pInfo->matchInfo);
@ -68,6 +61,13 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode*
goto _error; goto _error;
} }
calcSortOperMaxTupleLength(pInfo, pSortNode->pSortKeys);
pInfo->maxRows = -1;
if (pSortNode->node.pLimit) {
SLimitNode* pLimit = (SLimitNode*)pSortNode->node.pLimit;
if (pLimit->limit > 0) pInfo->maxRows = pLimit->limit + pLimit->offset;
}
pOperator->exprSupp.pCtx = pOperator->exprSupp.pCtx =
createSqlFunctionCtx(pOperator->exprSupp.pExprInfo, numOfCols, &pOperator->exprSupp.rowEntryInfoOffset, &pTaskInfo->storageAPI.functionStore); createSqlFunctionCtx(pOperator->exprSupp.pExprInfo, numOfCols, &pOperator->exprSupp.rowEntryInfoOffset, &pTaskInfo->storageAPI.functionStore);
initResultSizeInfo(&pOperator->resultInfo, 1024); initResultSizeInfo(&pOperator->resultInfo, 1024);

View File

@ -51,6 +51,7 @@ struct SSortHandle {
uint32_t tmpRowIdx; uint32_t tmpRowIdx;
int64_t mergeLimit; int64_t mergeLimit;
int64_t currMergeLimitTs;
int32_t sourceId; int32_t sourceId;
SSDataBlock* pDataBlock; SSDataBlock* pDataBlock;
@ -921,7 +922,8 @@ static int32_t sortBlocksToExtSource(SSortHandle* pHandle, SArray* aBlk, SBlockO
int32_t nMergedRows = 0; int32_t nMergedRows = 0;
bool mergeLimitReached = false; bool mergeLimitReached = false;
size_t blkPgSz = pgHeaderSz; size_t blkPgSz = pgHeaderSz;
int64_t lastPageBufTs = (order->order == TSDB_ORDER_ASC) ? INT64_MAX : INT64_MIN;
int64_t currTs = (order->order == TSDB_ORDER_ASC) ? INT64_MAX : INT64_MIN;
while (nRows < totalRows) { while (nRows < totalRows) {
int32_t minIdx = tMergeTreeGetChosenIndex(pTree); int32_t minIdx = tMergeTreeGetChosenIndex(pTree);
SSDataBlock* minBlk = taosArrayGetP(aBlk, minIdx); SSDataBlock* minBlk = taosArrayGetP(aBlk, minIdx);
@ -929,14 +931,21 @@ static int32_t sortBlocksToExtSource(SSortHandle* pHandle, SArray* aBlk, SBlockO
int32_t bufInc = getPageBufIncForRow(minBlk, minRow, pHandle->pDataBlock->info.rows); int32_t bufInc = getPageBufIncForRow(minBlk, minRow, pHandle->pDataBlock->info.rows);
if (blkPgSz <= pHandle->pageSize && blkPgSz + bufInc > pHandle->pageSize) { if (blkPgSz <= pHandle->pageSize && blkPgSz + bufInc > pHandle->pageSize) {
SColumnInfoData* tsCol = taosArrayGet(pHandle->pDataBlock->pDataBlock, order->slotId);
lastPageBufTs = ((int64_t*)tsCol->pData)[pHandle->pDataBlock->info.rows - 1];
appendDataBlockToPageBuf(pHandle, pHandle->pDataBlock, aPgId); appendDataBlockToPageBuf(pHandle, pHandle->pDataBlock, aPgId);
nMergedRows += pHandle->pDataBlock->info.rows; nMergedRows += pHandle->pDataBlock->info.rows;
blockDataCleanup(pHandle->pDataBlock); blockDataCleanup(pHandle->pDataBlock);
blkPgSz = pgHeaderSz; blkPgSz = pgHeaderSz;
bufInc = getPageBufIncForRow(minBlk, minRow, 0); bufInc = getPageBufIncForRow(minBlk, minRow, 0);
if ((pHandle->mergeLimit != -1) && (nMergedRows >= pHandle->mergeLimit)) { if ((pHandle->mergeLimit != -1) && (nMergedRows >= pHandle->mergeLimit)) {
mergeLimitReached = true; mergeLimitReached = true;
if ((lastPageBufTs < pHandle->currMergeLimitTs && order->order == TSDB_ORDER_ASC) ||
(lastPageBufTs > pHandle->currMergeLimitTs && order->order == TSDB_ORDER_DESC)) {
pHandle->currMergeLimitTs = lastPageBufTs;
}
break; break;
} }
} }
@ -955,8 +964,17 @@ static int32_t sortBlocksToExtSource(SSortHandle* pHandle, SArray* aBlk, SBlockO
} }
if (pHandle->pDataBlock->info.rows > 0) { if (pHandle->pDataBlock->info.rows > 0) {
if (!mergeLimitReached) { if (!mergeLimitReached) {
SColumnInfoData* tsCol = taosArrayGet(pHandle->pDataBlock->pDataBlock, order->slotId);
lastPageBufTs = ((int64_t*)tsCol->pData)[pHandle->pDataBlock->info.rows - 1];
appendDataBlockToPageBuf(pHandle, pHandle->pDataBlock, aPgId); appendDataBlockToPageBuf(pHandle, pHandle->pDataBlock, aPgId);
nMergedRows += pHandle->pDataBlock->info.rows; nMergedRows += pHandle->pDataBlock->info.rows;
if ((pHandle->mergeLimit != -1) && (nMergedRows >= pHandle->mergeLimit)) {
mergeLimitReached = true;
if ((lastPageBufTs < pHandle->currMergeLimitTs && order->order == TSDB_ORDER_ASC) ||
(lastPageBufTs > pHandle->currMergeLimitTs && order->order == TSDB_ORDER_DESC)) {
pHandle->currMergeLimitTs = lastPageBufTs;
}
}
} }
blockDataCleanup(pHandle->pDataBlock); blockDataCleanup(pHandle->pDataBlock);
} }
@ -982,11 +1000,24 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
SSortSource* pSrc = taosArrayGetP(pHandle->pOrderedSource, 0); SSortSource* pSrc = taosArrayGetP(pHandle->pOrderedSource, 0);
int32_t szSort = 0; int32_t szSort = 0;
if (pOrder->order == TSDB_ORDER_ASC) {
pHandle->currMergeLimitTs = INT64_MAX;
} else {
pHandle->currMergeLimitTs = INT64_MIN;
}
SArray* aBlkSort = taosArrayInit(8, POINTER_BYTES); SArray* aBlkSort = taosArrayInit(8, POINTER_BYTES);
SSHashObj* mUidBlk = tSimpleHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT)); SSHashObj* mUidBlk = tSimpleHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT));
while (1) { while (1) {
SSDataBlock* pBlk = pHandle->fetchfp(pSrc->param); SSDataBlock* pBlk = pHandle->fetchfp(pSrc->param);
if (pBlk != NULL) {
SColumnInfoData* tsCol = taosArrayGet(pBlk->pDataBlock, pOrder->slotId);
int64_t firstRowTs = *(int64_t*)tsCol->pData;
if ((pOrder->order == TSDB_ORDER_ASC && firstRowTs > pHandle->currMergeLimitTs) ||
(pOrder->order == TSDB_ORDER_DESC && firstRowTs < pHandle->currMergeLimitTs)) {
continue;
}
}
if (pBlk != NULL) { if (pBlk != NULL) {
szSort += blockDataGetSize(pBlk); szSort += blockDataGetSize(pBlk);
@ -1374,6 +1405,9 @@ static int32_t tsortOpenForPQSort(SSortHandle* pHandle) {
} }
static STupleHandle* tsortPQSortNextTuple(SSortHandle* pHandle) { static STupleHandle* tsortPQSortNextTuple(SSortHandle* pHandle) {
if (pHandle->pDataBlock == NULL) { // when no input stream datablock
return NULL;
}
blockDataCleanup(pHandle->pDataBlock); blockDataCleanup(pHandle->pDataBlock);
blockDataEnsureCapacity(pHandle->pDataBlock, 1); blockDataEnsureCapacity(pHandle->pDataBlock, 1);
// abandon the top tuple if queue size bigger than max size // abandon the top tuple if queue size bigger than max size

View File

@ -547,7 +547,7 @@ void monSendReport() {
monGenGrantJson(pMonitor); monGenGrantJson(pMonitor);
monGenDnodeJson(pMonitor); monGenDnodeJson(pMonitor);
monGenDiskJson(pMonitor); monGenDiskJson(pMonitor);
monGenLogJson(pMonitor); //monGenLogJson(pMonitor); // TS-3691
char *pCont = tjsonToString(pMonitor->pJson); char *pCont = tjsonToString(pMonitor->pJson);
// uDebugL("report cont:%s\n", pCont); // uDebugL("report cont:%s\n", pCont);

View File

@ -2934,14 +2934,14 @@ static int32_t createMultiResFuncsFromStar(STranslateContext* pCxt, SFunctionNod
static int32_t createTags(STranslateContext* pCxt, SNodeList** pOutput) { static int32_t createTags(STranslateContext* pCxt, SNodeList** pOutput) {
if (QUERY_NODE_REAL_TABLE != nodeType(((SSelectStmt*)pCxt->pCurrStmt)->pFromTable)) { if (QUERY_NODE_REAL_TABLE != nodeType(((SSelectStmt*)pCxt->pCurrStmt)->pFromTable)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_PC, return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_PC,
"The _TAGS pseudo column can only be used for subtable and supertable queries"); "The _TAGS pseudo column can only be used for child table and super table queries");
} }
SRealTableNode* pTable = (SRealTableNode*)(((SSelectStmt*)pCxt->pCurrStmt)->pFromTable); SRealTableNode* pTable = (SRealTableNode*)(((SSelectStmt*)pCxt->pCurrStmt)->pFromTable);
const STableMeta* pMeta = pTable->pMeta; const STableMeta* pMeta = pTable->pMeta;
if (TSDB_SUPER_TABLE != pMeta->tableType && TSDB_CHILD_TABLE != pMeta->tableType) { if (TSDB_SUPER_TABLE != pMeta->tableType && TSDB_CHILD_TABLE != pMeta->tableType) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_PC, return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_PC,
"The _TAGS pseudo column can only be used for subtable and supertable queries"); "The _TAGS pseudo column can only be used for child table and super table queries");
} }
SSchema* pTagsSchema = getTableTagSchema(pMeta); SSchema* pTagsSchema = getTableTagSchema(pMeta);

View File

@ -164,6 +164,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "%s function is not supported in fill query"; return "%s function is not supported in fill query";
case TSDB_CODE_PAR_INVALID_WINDOW_PC: case TSDB_CODE_PAR_INVALID_WINDOW_PC:
return "_WSTART, _WEND and _WDURATION can only be used in window query"; return "_WSTART, _WEND and _WDURATION can only be used in window query";
case TSDB_CODE_PAR_INVALID_TAGS_PC:
return "Tags can only applied to super table and child table";
case TSDB_CODE_PAR_WINDOW_NOT_ALLOWED_FUNC: case TSDB_CODE_PAR_WINDOW_NOT_ALLOWED_FUNC:
return "%s function is not supported in time window query"; return "%s function is not supported in time window query";
case TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC: case TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC:

View File

@ -687,6 +687,9 @@ int32_t streamTryExec(SStreamTask* pTask) {
streamSchedExec(pTask); streamSchedExec(pTask);
} }
} }
} else {
qDebug("s-task:%s already started to exec by other thread, status:%s, sched-status:%d", pTask->id.idStr,
streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus);
} }
return 0; return 0;

View File

@ -624,9 +624,8 @@ int32_t streamTaskRecoverSetAllStepFinished(SStreamTask* pTask) {
return qStreamRecoverSetAllStepFinished(exec); return qStreamRecoverSetAllStepFinished(exec);
} }
void streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask) { void streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer) {
SVersionRange* pRange = &pTask->dataRange.range; SVersionRange* pRange = &pTask->dataRange.range;
int64_t latestVer = walReaderGetCurrentVer(pTask->exec.pWalReader);
ASSERT(latestVer >= pRange->maxVer); ASSERT(latestVer >= pRange->maxVer);
int64_t nextStartVer = pRange->maxVer + 1; int64_t nextStartVer = pRange->maxVer + 1;

View File

@ -961,6 +961,18 @@ char *taosGetCmdlineByPID(int pid) {
#endif #endif
} }
int64_t taosGetOsUptime() {
#ifdef WINDOWS
#elif defined(_TD_DARWIN_64)
#else
struct sysinfo info;
if (0 == sysinfo(&info)) {
return (int64_t)info.uptime * 1000;
}
#endif
return 0;
}
void taosSetCoreDump(bool enable) { void taosSetCoreDump(bool enable) {
if (!enable) return; if (!enable) return;
#ifdef WINDOWS #ifdef WINDOWS

View File

@ -351,10 +351,10 @@ int32_t titoa(uint64_t val, size_t radix, char str[]) {
int32_t i = 0; int32_t i = 0;
uint64_t v = val; uint64_t v = val;
while(v > 0) { do {
buf[i++] = s[v % radix]; buf[i++] = s[v % radix];
v /= radix; v /= radix;
} } while (v > 0);
// reverse order // reverse order
for(int32_t j = 0; j < i; ++j) { for(int32_t j = 0; j < i; ++j) {

View File

@ -186,33 +186,6 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
tdLog.exit("total is null!") tdLog.exit("total is null!")
# log_infos ====================================
if "log_infos" not in infoDict or infoDict["log_infos"]== None:
tdLog.exit("log_infos is null!")
if "logs" not in infoDict["log_infos"] or len(infoDict["log_infos"]["logs"]) < 8:#!= 10:
tdLog.exit("logs is null!")
if "ts" not in infoDict["log_infos"]["logs"][0] or len(infoDict["log_infos"]["logs"][0]["ts"]) <= 10:
tdLog.exit("ts is null!")
if "level" not in infoDict["log_infos"]["logs"][0] or infoDict["log_infos"]["logs"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]:
tdLog.exit("level is null!")
if "content" not in infoDict["log_infos"]["logs"][0] or len(infoDict["log_infos"]["logs"][0]["ts"]) <= 1:
tdLog.exit("content is null!")
if "summary" not in infoDict["log_infos"] or len(infoDict["log_infos"]["summary"])!= 4:
tdLog.exit("summary is null!")
if "total" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["total"] < 0 :
tdLog.exit("total is null!")
if "level" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]:
tdLog.exit("level is null!")
def do_GET(self): def do_GET(self):
""" """
process GET request process GET request
@ -315,4 +288,3 @@ class TDTestCase:
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())