Merge branch '3.0' of https://github.com/taosdata/TDengine into feature/vnode_refact1
This commit is contained in:
commit
04e690b50b
|
@ -15,12 +15,12 @@ MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH
|
|||
MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH})
|
||||
|
||||
find_package(Git QUIET)
|
||||
if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git")
|
||||
if(GIT_FOUND AND EXISTS "${TD_SOURCE_DIR}/.git")
|
||||
# Update submodules as needed
|
||||
option(GIT_SUBMODULE "Check submodules during build" ON)
|
||||
if(GIT_SUBMODULE)
|
||||
message(STATUS "Submodule update")
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} submodule update --init --recursive
|
||||
execute_process(COMMAND cd ${TD_SOURCE_DIR} && ${GIT_EXECUTABLE} submodule update --init --recursive
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
RESULT_VARIABLE GIT_SUBMOD_RESULT)
|
||||
if(NOT GIT_SUBMOD_RESULT EQUAL "0")
|
||||
|
@ -29,7 +29,7 @@ if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git")
|
|||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT EXISTS "${PROJECT_SOURCE_DIR}/tools/taos-tools/CMakeLists.txt")
|
||||
if(NOT EXISTS "${TD_SOURCE_DIR}/tools/taos-tools/CMakeLists.txt")
|
||||
message(WARNING "The submodules were not downloaded! GIT_SUBMODULE was turned off or failed. Please update submodules manually if you need build them.")
|
||||
endif()
|
||||
|
||||
|
@ -66,7 +66,7 @@ ENDIF ()
|
|||
IF (TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/W3 /D_WIN32")
|
||||
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
||||
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
||||
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||
# ENDIF ()
|
||||
|
|
|
@ -23,7 +23,6 @@ extern "C" {
|
|||
#define TDENGINE_SYSTABLE_H
|
||||
|
||||
#define TSDB_INFORMATION_SCHEMA_DB "information_schema"
|
||||
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
|
||||
#define TSDB_INS_TABLE_DNODES "dnodes"
|
||||
#define TSDB_INS_TABLE_MNODES "mnodes"
|
||||
#define TSDB_INS_TABLE_MODULES "modules"
|
||||
|
@ -44,27 +43,27 @@ extern "C" {
|
|||
#define TSDB_INS_TABLE_VNODES "vnodes"
|
||||
#define TSDB_INS_TABLE_CONFIGS "configs"
|
||||
|
||||
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
|
||||
#define TSDB_PERFS_TABLE_SMAS "smas"
|
||||
#define TSDB_PERFS_TABLE_SUBSCRIBES "subscribes"
|
||||
#define TSDB_PERFS_TABLE_CONNECTIONS "connections"
|
||||
#define TSDB_PERFS_TABLE_QUERIES "queries"
|
||||
#define TSDB_PERFS_TABLE_TOPICS "topics"
|
||||
#define TSDB_PERFS_TABLE_CONSUMERS "consumers"
|
||||
#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "subscriptions"
|
||||
#define TSDB_PERFS_TABLE_OFFSETS "offsets"
|
||||
#define TSDB_PERFS_TABLE_TRANS "trans"
|
||||
#define TSDB_PERFS_TABLE_STREAMS "streams"
|
||||
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
|
||||
#define TSDB_PERFS_TABLE_SMAS "smas"
|
||||
#define TSDB_PERFS_TABLE_SUBSCRIBES "subscribes"
|
||||
#define TSDB_PERFS_TABLE_CONNECTIONS "connections"
|
||||
#define TSDB_PERFS_TABLE_QUERIES "queries"
|
||||
#define TSDB_PERFS_TABLE_TOPICS "topics"
|
||||
#define TSDB_PERFS_TABLE_CONSUMERS "consumers"
|
||||
#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "subscriptions"
|
||||
#define TSDB_PERFS_TABLE_OFFSETS "offsets"
|
||||
#define TSDB_PERFS_TABLE_TRANS "trans"
|
||||
#define TSDB_PERFS_TABLE_STREAMS "streams"
|
||||
|
||||
typedef struct SSysDbTableSchema {
|
||||
const char *name;
|
||||
const char* name;
|
||||
const int32_t type;
|
||||
const int32_t bytes;
|
||||
} SSysDbTableSchema;
|
||||
|
||||
typedef struct SSysTableMeta {
|
||||
const char *name;
|
||||
const SSysDbTableSchema *schema;
|
||||
const char* name;
|
||||
const SSysDbTableSchema* schema;
|
||||
const int32_t colNum;
|
||||
} SSysTableMeta;
|
||||
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
#define _TD_COMMON_GLOBAL_H_
|
||||
|
||||
#include "tarray.h"
|
||||
#include "tdef.h"
|
||||
#include "tconfig.h"
|
||||
#include "tdef.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -121,15 +121,16 @@ extern char tsCompressor[];
|
|||
extern int32_t tsDiskCfgNum;
|
||||
extern SDiskCfg tsDiskCfg[];
|
||||
|
||||
// internal
|
||||
extern int32_t tsTransPullupMs;
|
||||
extern int32_t tsMaRebalanceMs;
|
||||
// internal
|
||||
extern int32_t tsTransPullupInterval;
|
||||
extern int32_t tsMqRebalanceInterval;
|
||||
|
||||
#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
|
||||
|
||||
int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd, const char *envFile,
|
||||
char *apolloUrl, SArray *pArgs, bool tsc);
|
||||
int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc);
|
||||
int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd,
|
||||
const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc);
|
||||
int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile, char *apolloUrl, SArray *pArgs,
|
||||
bool tsc);
|
||||
void taosCleanupCfg();
|
||||
void taosCfgDynamicOptions(const char *option, const char *value);
|
||||
void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary);
|
||||
|
|
|
@ -204,7 +204,6 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp)
|
||||
|
||||
// sync integration
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_TIMEOUT, "vnode-sync-timeout", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_PING, "vnode-sync-ping", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_SYNC_PING_REPLY, "vnode-sync-ping-reply", NULL, NULL)
|
||||
|
|
|
@ -411,7 +411,6 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_SYN_INVALID_MSGLEN TAOS_DEF_ERROR_CODE(0, 0x0909)
|
||||
#define TSDB_CODE_SYN_INVALID_MSGTYPE TAOS_DEF_ERROR_CODE(0, 0x090A)
|
||||
|
||||
// sync integration
|
||||
#define TSDB_CODE_SYN_NOT_LEADER TAOS_DEF_ERROR_CODE(0, 0x0910)
|
||||
#define TSDB_CODE_SYN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x09FF)
|
||||
|
||||
|
|
|
@ -211,6 +211,7 @@ static const SSysDbTableSchema transSchema[] = {
|
|||
{.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "type", .bytes = TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
{.name = "last_error", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
};
|
||||
|
|
|
@ -170,8 +170,8 @@ uint32_t tsCurRange = 100; // range
|
|||
char tsCompressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
|
||||
|
||||
// internal
|
||||
int32_t tsTransPullupMs = 6000;
|
||||
int32_t tsMaRebalanceMs = 2000;
|
||||
int32_t tsTransPullupInterval = 6;
|
||||
int32_t tsMqRebalanceInterval = 2;
|
||||
|
||||
void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary) {
|
||||
tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN);
|
||||
|
@ -438,6 +438,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
if (cfgAddString(pCfg, "telemetryServer", tsTelemServer, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "telemetryPort", tsTelemPort, 1, 65056, 0) != 0) return -1;
|
||||
|
||||
if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -575,6 +578,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN);
|
||||
tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32;
|
||||
|
||||
tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32;
|
||||
tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32;
|
||||
|
||||
if (tsQueryBufferSize >= 0) {
|
||||
tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL;
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ int32_t dmReadEps(SDnode *pDnode) {
|
|||
pDnode->data.dnodeEps = taosArrayInit(1, sizeof(SDnodeEp));
|
||||
if (pDnode->data.dnodeEps == NULL) {
|
||||
dError("failed to calloc dnodeEp array since %s", strerror(errno));
|
||||
goto PRASE_DNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
snprintf(file, sizeof(file), "%s%sdnode.json", pDnode->wrappers[DNODE].path, TD_DIRSEP);
|
||||
|
@ -59,53 +59,53 @@ int32_t dmReadEps(SDnode *pDnode) {
|
|||
if (pFile == NULL) {
|
||||
// dDebug("file %s not exist", file);
|
||||
code = 0;
|
||||
goto PRASE_DNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
len = (int32_t)taosReadFile(pFile, content, maxLen);
|
||||
if (len <= 0) {
|
||||
dError("failed to read %s since content is null", file);
|
||||
goto PRASE_DNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
content[len] = 0;
|
||||
root = cJSON_Parse(content);
|
||||
if (root == NULL) {
|
||||
dError("failed to read %s since invalid json format", file);
|
||||
goto PRASE_DNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
cJSON *dnodeId = cJSON_GetObjectItem(root, "dnodeId");
|
||||
if (!dnodeId || dnodeId->type != cJSON_Number) {
|
||||
dError("failed to read %s since dnodeId not found", file);
|
||||
goto PRASE_DNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
pDnode->data.dnodeId = dnodeId->valueint;
|
||||
|
||||
cJSON *clusterId = cJSON_GetObjectItem(root, "clusterId");
|
||||
if (!clusterId || clusterId->type != cJSON_String) {
|
||||
dError("failed to read %s since clusterId not found", file);
|
||||
goto PRASE_DNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
pDnode->data.clusterId = atoll(clusterId->valuestring);
|
||||
|
||||
cJSON *dropped = cJSON_GetObjectItem(root, "dropped");
|
||||
if (!dropped || dropped->type != cJSON_Number) {
|
||||
dError("failed to read %s since dropped not found", file);
|
||||
goto PRASE_DNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
pDnode->data.dropped = dropped->valueint;
|
||||
|
||||
cJSON *dnodes = cJSON_GetObjectItem(root, "dnodes");
|
||||
if (!dnodes || dnodes->type != cJSON_Array) {
|
||||
dError("failed to read %s since dnodes not found", file);
|
||||
goto PRASE_DNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
int32_t numOfDnodes = cJSON_GetArraySize(dnodes);
|
||||
if (numOfDnodes <= 0) {
|
||||
dError("failed to read %s since numOfDnodes:%d invalid", file, numOfDnodes);
|
||||
goto PRASE_DNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfDnodes; ++i) {
|
||||
|
@ -117,7 +117,7 @@ int32_t dmReadEps(SDnode *pDnode) {
|
|||
cJSON *did = cJSON_GetObjectItem(node, "id");
|
||||
if (!did || did->type != cJSON_Number) {
|
||||
dError("failed to read %s since dnodeId not found", file);
|
||||
goto PRASE_DNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
dnodeEp.id = did->valueint;
|
||||
|
@ -125,14 +125,14 @@ int32_t dmReadEps(SDnode *pDnode) {
|
|||
cJSON *dnodeFqdn = cJSON_GetObjectItem(node, "fqdn");
|
||||
if (!dnodeFqdn || dnodeFqdn->type != cJSON_String || dnodeFqdn->valuestring == NULL) {
|
||||
dError("failed to read %s since dnodeFqdn not found", file);
|
||||
goto PRASE_DNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
tstrncpy(dnodeEp.ep.fqdn, dnodeFqdn->valuestring, TSDB_FQDN_LEN);
|
||||
|
||||
cJSON *dnodePort = cJSON_GetObjectItem(node, "port");
|
||||
if (!dnodePort || dnodePort->type != cJSON_Number) {
|
||||
dError("failed to read %s since dnodePort not found", file);
|
||||
goto PRASE_DNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
dnodeEp.ep.port = dnodePort->valueint;
|
||||
|
@ -140,7 +140,7 @@ int32_t dmReadEps(SDnode *pDnode) {
|
|||
cJSON *isMnode = cJSON_GetObjectItem(node, "isMnode");
|
||||
if (!isMnode || isMnode->type != cJSON_Number) {
|
||||
dError("failed to read %s since isMnode not found", file);
|
||||
goto PRASE_DNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
dnodeEp.isMnode = isMnode->valueint;
|
||||
|
||||
|
@ -151,7 +151,7 @@ int32_t dmReadEps(SDnode *pDnode) {
|
|||
dDebug("succcessed to read file %s", file);
|
||||
dmPrintEps(pDnode);
|
||||
|
||||
PRASE_DNODE_OVER:
|
||||
_OVER:
|
||||
if (content != NULL) taosMemoryFree(content);
|
||||
if (root != NULL) cJSON_Delete(root);
|
||||
if (pFile != NULL) taosCloseFile(&pFile);
|
||||
|
@ -176,7 +176,7 @@ PRASE_DNODE_OVER:
|
|||
|
||||
int32_t dmWriteEps(SDnode *pDnode) {
|
||||
char file[PATH_MAX] = {0};
|
||||
char realfile[PATH_MAX];
|
||||
char realfile[PATH_MAX] = {0};
|
||||
snprintf(file, sizeof(file), "%s%sdnode.json.bak", pDnode->wrappers[DNODE].path, TD_DIRSEP);
|
||||
snprintf(realfile, sizeof(realfile), "%s%sdnode.json", pDnode->wrappers[DNODE].path, TD_DIRSEP);
|
||||
|
||||
|
|
|
@ -241,7 +241,11 @@ static int32_t dmSpawnUdfd(SDnode *pDnode) {
|
|||
strncpy(path, tsProcPath, strlen(tsProcPath));
|
||||
taosDirName(path);
|
||||
}
|
||||
#ifdef WINDOWS
|
||||
strcat(path, "udfd.exe");
|
||||
#else
|
||||
strcat(path, "/udfd");
|
||||
#endif
|
||||
char* argsUdfd[] = {path, "-c", configDir, NULL};
|
||||
options.args = argsUdfd;
|
||||
options.file = path;
|
||||
|
|
|
@ -105,12 +105,13 @@ void dmStopMonitorThread(SDnode *pDnode) {
|
|||
}
|
||||
|
||||
static void dmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
|
||||
SDnode * pDnode = pInfo->ahandle;
|
||||
SRpcMsg *pRpc = &pMsg->rpcMsg;
|
||||
int32_t code = -1;
|
||||
SDnode *pDnode = pInfo->ahandle;
|
||||
|
||||
int32_t code = -1;
|
||||
tmsg_t msgType = pMsg->rpcMsg.msgType;
|
||||
dTrace("msg:%p, will be processed in dnode-mgmt queue", pMsg);
|
||||
|
||||
switch (pRpc->msgType) {
|
||||
switch (msgType) {
|
||||
case TDMT_DND_CONFIG_DNODE:
|
||||
code = dmProcessConfigReq(pDnode, pMsg);
|
||||
break;
|
||||
|
@ -148,9 +149,14 @@ static void dmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
|
|||
break;
|
||||
}
|
||||
|
||||
if (pRpc->msgType & 1u) {
|
||||
if (code != 0) code = terrno;
|
||||
SRpcMsg rsp = {.handle = pRpc->handle, .ahandle = pRpc->ahandle, .code = code, .refId = pRpc->refId};
|
||||
if (msgType & 1u) {
|
||||
if (code != 0 && terrno != 0) code = terrno;
|
||||
SRpcMsg rsp = {
|
||||
.handle = pMsg->rpcMsg.handle,
|
||||
.ahandle = pMsg->rpcMsg.ahandle,
|
||||
.code = code,
|
||||
.refId = pMsg->rpcMsg.refId,
|
||||
};
|
||||
rpcSendResponse(&rsp);
|
||||
}
|
||||
|
||||
|
@ -160,7 +166,13 @@ static void dmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t dmStartWorker(SDnode *pDnode) {
|
||||
SSingleWorkerCfg cfg = {.min = 1, .max = 1, .name = "dnode-mgmt", .fp = (FItem)dmProcessMgmtQueue, .param = pDnode};
|
||||
SSingleWorkerCfg cfg = {
|
||||
.min = 1,
|
||||
.max = 1,
|
||||
.name = "dnode-mgmt",
|
||||
.fp = (FItem)dmProcessMgmtQueue,
|
||||
.param = pDnode,
|
||||
};
|
||||
if (tSingleWorkerInit(&pDnode->data.mgmtWorker, &cfg) != 0) {
|
||||
dError("failed to start dnode-mgmt worker since %s", terrstr());
|
||||
return -1;
|
||||
|
|
|
@ -18,7 +18,11 @@
|
|||
|
||||
static void bmSendErrorRsp(SNodeMsg *pMsg, int32_t code) {
|
||||
SRpcMsg rpcRsp = {
|
||||
.handle = pMsg->rpcMsg.handle, .ahandle = pMsg->rpcMsg.ahandle, .code = code, .refId = pMsg->rpcMsg.refId};
|
||||
.handle = pMsg->rpcMsg.handle,
|
||||
.ahandle = pMsg->rpcMsg.ahandle,
|
||||
.code = code,
|
||||
.refId = pMsg->rpcMsg.refId,
|
||||
};
|
||||
tmsgSendRsp(&rpcRsp);
|
||||
|
||||
dTrace("msg:%p, is freed", pMsg);
|
||||
|
@ -103,7 +107,7 @@ static void bmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
|||
}
|
||||
|
||||
int32_t bmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
||||
SBnodeMgmt * pMgmt = pWrapper->pMgmt;
|
||||
SBnodeMgmt *pMgmt = pWrapper->pMgmt;
|
||||
SMultiWorker *pWorker = &pMgmt->writeWorker;
|
||||
|
||||
dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name);
|
||||
|
@ -112,7 +116,7 @@ int32_t bmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t bmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
||||
SBnodeMgmt * pMgmt = pWrapper->pMgmt;
|
||||
SBnodeMgmt *pMgmt = pWrapper->pMgmt;
|
||||
SSingleWorker *pWorker = &pMgmt->monitorWorker;
|
||||
|
||||
dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name);
|
||||
|
@ -121,7 +125,12 @@ int32_t bmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t bmStartWorker(SBnodeMgmt *pMgmt) {
|
||||
SMultiWorkerCfg cfg = {.max = 1, .name = "bnode-write", .fp = (FItems)bmProcessWriteQueue, .param = pMgmt};
|
||||
SMultiWorkerCfg cfg = {
|
||||
.max = 1,
|
||||
.name = "bnode-write",
|
||||
.fp = (FItems)bmProcessWriteQueue,
|
||||
.param = pMgmt,
|
||||
};
|
||||
if (tMultiWorkerInit(&pMgmt->writeWorker, &cfg) != 0) {
|
||||
dError("failed to start bnode-write worker since %s", terrstr());
|
||||
return -1;
|
||||
|
@ -129,7 +138,12 @@ int32_t bmStartWorker(SBnodeMgmt *pMgmt) {
|
|||
|
||||
if (tsMultiProcess) {
|
||||
SSingleWorkerCfg mCfg = {
|
||||
.min = 1, .max = 1, .name = "bnode-monitor", .fp = (FItem)bmProcessMonitorQueue, .param = pMgmt};
|
||||
.min = 1,
|
||||
.max = 1,
|
||||
.name = "bnode-monitor",
|
||||
.fp = (FItem)bmProcessMonitorQueue,
|
||||
.param = pMgmt,
|
||||
};
|
||||
if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) {
|
||||
dError("failed to start bnode-monitor worker since %s", terrstr());
|
||||
return -1;
|
||||
|
|
|
@ -22,7 +22,7 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) {
|
|||
int32_t maxLen = 4096;
|
||||
char *content = taosMemoryCalloc(1, maxLen + 1);
|
||||
cJSON *root = NULL;
|
||||
char file[PATH_MAX];
|
||||
char file[PATH_MAX] = {0};
|
||||
TdFilePtr pFile = NULL;
|
||||
|
||||
snprintf(file, sizeof(file), "%s%smnode.json", pMgmt->path, TD_DIRSEP);
|
||||
|
@ -30,39 +30,39 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) {
|
|||
if (pFile == NULL) {
|
||||
// dDebug("file %s not exist", file);
|
||||
code = 0;
|
||||
goto PRASE_MNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
len = (int32_t)taosReadFile(pFile, content, maxLen);
|
||||
if (len <= 0) {
|
||||
dError("failed to read %s since content is null", file);
|
||||
goto PRASE_MNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
content[len] = 0;
|
||||
root = cJSON_Parse(content);
|
||||
if (root == NULL) {
|
||||
dError("failed to read %s since invalid json format", file);
|
||||
goto PRASE_MNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
cJSON *deployed = cJSON_GetObjectItem(root, "deployed");
|
||||
if (!deployed || deployed->type != cJSON_Number) {
|
||||
dError("failed to read %s since deployed not found", file);
|
||||
goto PRASE_MNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
*pDeployed = deployed->valueint;
|
||||
|
||||
cJSON *mnodes = cJSON_GetObjectItem(root, "mnodes");
|
||||
if (!mnodes || mnodes->type != cJSON_Array) {
|
||||
dError("failed to read %s since nodes not found", file);
|
||||
goto PRASE_MNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
pMgmt->replica = cJSON_GetArraySize(mnodes);
|
||||
if (pMgmt->replica <= 0 || pMgmt->replica > TSDB_MAX_REPLICA) {
|
||||
dError("failed to read %s since mnodes size %d invalid", file, pMgmt->replica);
|
||||
goto PRASE_MNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pMgmt->replica; ++i) {
|
||||
|
@ -74,21 +74,21 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) {
|
|||
cJSON *id = cJSON_GetObjectItem(node, "id");
|
||||
if (!id || id->type != cJSON_Number) {
|
||||
dError("failed to read %s since id not found", file);
|
||||
goto PRASE_MNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
pReplica->id = id->valueint;
|
||||
|
||||
cJSON *fqdn = cJSON_GetObjectItem(node, "fqdn");
|
||||
if (!fqdn || fqdn->type != cJSON_String || fqdn->valuestring == NULL) {
|
||||
dError("failed to read %s since fqdn not found", file);
|
||||
goto PRASE_MNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
tstrncpy(pReplica->fqdn, fqdn->valuestring, TSDB_FQDN_LEN);
|
||||
|
||||
cJSON *port = cJSON_GetObjectItem(node, "port");
|
||||
if (!port || port->type != cJSON_Number) {
|
||||
dError("failed to read %s since port not found", file);
|
||||
goto PRASE_MNODE_OVER;
|
||||
goto _OVER;
|
||||
}
|
||||
pReplica->port = port->valueint;
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) {
|
|||
code = 0;
|
||||
dDebug("succcessed to read file %s, deployed:%d", file, *pDeployed);
|
||||
|
||||
PRASE_MNODE_OVER:
|
||||
_OVER:
|
||||
if (content != NULL) taosMemoryFree(content);
|
||||
if (root != NULL) cJSON_Delete(root);
|
||||
if (pFile != NULL) taosCloseFile(&pFile);
|
||||
|
|
|
@ -161,9 +161,7 @@ static int32_t mmOpen(SMgmtWrapper *pWrapper) {
|
|||
SMnodeOpt option = {0};
|
||||
if (!deployed) {
|
||||
dInfo("mnode start to deploy");
|
||||
// if (pWrapper->procType == DND_PROC_CHILD) {
|
||||
pWrapper->pDnode->data.dnodeId = 1;
|
||||
// }
|
||||
pWrapper->pDnode->data.dnodeId = 1;
|
||||
mmBuildOptionForDeploy(pMgmt, &option);
|
||||
} else {
|
||||
dInfo("mnode start to open");
|
||||
|
|
|
@ -17,42 +17,48 @@
|
|||
#include "mmInt.h"
|
||||
|
||||
static inline void mmSendRsp(SNodeMsg *pMsg, int32_t code) {
|
||||
SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle,
|
||||
.ahandle = pMsg->rpcMsg.ahandle,
|
||||
.refId = pMsg->rpcMsg.refId,
|
||||
.code = code,
|
||||
.pCont = pMsg->pRsp,
|
||||
.contLen = pMsg->rspLen};
|
||||
SRpcMsg rsp = {
|
||||
.handle = pMsg->rpcMsg.handle,
|
||||
.ahandle = pMsg->rpcMsg.ahandle,
|
||||
.refId = pMsg->rpcMsg.refId,
|
||||
.code = code,
|
||||
.pCont = pMsg->pRsp,
|
||||
.contLen = pMsg->rspLen,
|
||||
};
|
||||
tmsgSendRsp(&rsp);
|
||||
}
|
||||
|
||||
static void mmProcessQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
|
||||
SMnodeMgmt *pMgmt = pInfo->ahandle;
|
||||
|
||||
int32_t code = -1;
|
||||
tmsg_t msgType = pMsg->rpcMsg.msgType;
|
||||
dTrace("msg:%p, get from mnode queue", pMsg);
|
||||
SRpcMsg *pRpc = &pMsg->rpcMsg;
|
||||
int32_t code = -1;
|
||||
|
||||
if (pMsg->rpcMsg.msgType == TDMT_DND_ALTER_MNODE) {
|
||||
code = mmProcessAlterReq(pMgmt, pMsg);
|
||||
} else if (pMsg->rpcMsg.msgType == TDMT_MON_MM_INFO) {
|
||||
code = mmProcessGetMonMmInfoReq(pMgmt->pWrapper, pMsg);
|
||||
} else if (pMsg->rpcMsg.msgType == TDMT_MON_MM_LOAD) {
|
||||
code = mmProcessGetMnodeLoadsReq(pMgmt->pWrapper, pMsg);
|
||||
} else {
|
||||
pMsg->pNode = pMgmt->pMnode;
|
||||
code = mndProcessMsg(pMsg);
|
||||
switch (msgType) {
|
||||
case TDMT_DND_ALTER_MNODE:
|
||||
code = mmProcessAlterReq(pMgmt, pMsg);
|
||||
break;
|
||||
case TDMT_MON_MM_INFO:
|
||||
code = mmProcessGetMonMmInfoReq(pMgmt->pWrapper, pMsg);
|
||||
break;
|
||||
case TDMT_MON_MM_LOAD:
|
||||
code = mmProcessGetMnodeLoadsReq(pMgmt->pWrapper, pMsg);
|
||||
break;
|
||||
default:
|
||||
pMsg->pNode = pMgmt->pMnode;
|
||||
code = mndProcessMsg(pMsg);
|
||||
}
|
||||
|
||||
if (pRpc->msgType & 1U) {
|
||||
if (pRpc->handle != NULL && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
|
||||
if (msgType & 1U) {
|
||||
if (pMsg->rpcMsg.handle != NULL && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
|
||||
if (code != 0 && terrno != 0) code = terrno;
|
||||
mmSendRsp(pMsg, code);
|
||||
}
|
||||
}
|
||||
|
||||
dTrace("msg:%p, is freed, result:0x%04x:%s", pMsg, code & 0XFFFF, tstrerror(code));
|
||||
rpcFreeCont(pRpc->pCont);
|
||||
rpcFreeCont(pMsg->rpcMsg.pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
|
||||
|
@ -78,38 +84,38 @@ static void mmProcessQueryQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) {
|
|||
taosFreeQitem(pMsg);
|
||||
}
|
||||
|
||||
static void mmPutMsgToWorker(SSingleWorker *pWorker, SNodeMsg *pMsg) {
|
||||
static void mmPutNodeMsgToWorker(SSingleWorker *pWorker, SNodeMsg *pMsg) {
|
||||
dTrace("msg:%p, put into worker %s", pMsg, pWorker->name);
|
||||
taosWriteQitem(pWorker->queue, pMsg);
|
||||
}
|
||||
|
||||
int32_t mmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
||||
SMnodeMgmt *pMgmt = pWrapper->pMgmt;
|
||||
mmPutMsgToWorker(&pMgmt->writeWorker, pMsg);
|
||||
mmPutNodeMsgToWorker(&pMgmt->writeWorker, pMsg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mmProcessSyncMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
||||
SMnodeMgmt *pMgmt = pWrapper->pMgmt;
|
||||
mmPutMsgToWorker(&pMgmt->syncWorker, pMsg);
|
||||
mmPutNodeMsgToWorker(&pMgmt->syncWorker, pMsg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mmProcessReadMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
||||
SMnodeMgmt *pMgmt = pWrapper->pMgmt;
|
||||
mmPutMsgToWorker(&pMgmt->readWorker, pMsg);
|
||||
mmPutNodeMsgToWorker(&pMgmt->readWorker, pMsg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mmProcessQueryMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
||||
SMnodeMgmt *pMgmt = pWrapper->pMgmt;
|
||||
mmPutMsgToWorker(&pMgmt->queryWorker, pMsg);
|
||||
mmPutNodeMsgToWorker(&pMgmt->queryWorker, pMsg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
||||
SMnodeMgmt *pMgmt = pWrapper->pMgmt;
|
||||
mmPutMsgToWorker(&pMgmt->monitorWorker, pMsg);
|
||||
mmPutNodeMsgToWorker(&pMgmt->monitorWorker, pMsg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -144,40 +150,62 @@ int32_t mmPutMsgToSyncQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) {
|
|||
}
|
||||
|
||||
int32_t mmStartWorker(SMnodeMgmt *pMgmt) {
|
||||
SSingleWorkerCfg qCfg = {.min = tsNumOfMnodeQueryThreads,
|
||||
.max = tsNumOfMnodeQueryThreads,
|
||||
.name = "mnode-query",
|
||||
.fp = (FItem)mmProcessQueryQueue,
|
||||
.param = pMgmt};
|
||||
SSingleWorkerCfg qCfg = {
|
||||
.min = tsNumOfMnodeQueryThreads,
|
||||
.max = tsNumOfMnodeQueryThreads,
|
||||
.name = "mnode-query",
|
||||
.fp = (FItem)mmProcessQueryQueue,
|
||||
.param = pMgmt,
|
||||
};
|
||||
if (tSingleWorkerInit(&pMgmt->queryWorker, &qCfg) != 0) {
|
||||
dError("failed to start mnode-query worker since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
SSingleWorkerCfg rCfg = {.min = tsNumOfMnodeReadThreads,
|
||||
.max = tsNumOfMnodeReadThreads,
|
||||
.name = "mnode-read",
|
||||
.fp = (FItem)mmProcessQueue,
|
||||
.param = pMgmt};
|
||||
SSingleWorkerCfg rCfg = {
|
||||
.min = tsNumOfMnodeReadThreads,
|
||||
.max = tsNumOfMnodeReadThreads,
|
||||
.name = "mnode-read",
|
||||
.fp = (FItem)mmProcessQueue,
|
||||
.param = pMgmt,
|
||||
};
|
||||
if (tSingleWorkerInit(&pMgmt->readWorker, &rCfg) != 0) {
|
||||
dError("failed to start mnode-read worker since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
SSingleWorkerCfg wCfg = {.min = 1, .max = 1, .name = "mnode-write", .fp = (FItem)mmProcessQueue, .param = pMgmt};
|
||||
SSingleWorkerCfg wCfg = {
|
||||
.min = 1,
|
||||
.max = 1,
|
||||
.name = "mnode-write",
|
||||
.fp = (FItem)mmProcessQueue,
|
||||
.param = pMgmt,
|
||||
};
|
||||
if (tSingleWorkerInit(&pMgmt->writeWorker, &wCfg) != 0) {
|
||||
dError("failed to start mnode-write worker since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
SSingleWorkerCfg sCfg = {.min = 1, .max = 1, .name = "mnode-sync", .fp = (FItem)mmProcessQueue, .param = pMgmt};
|
||||
SSingleWorkerCfg sCfg = {
|
||||
.min = 1,
|
||||
.max = 1,
|
||||
.name = "mnode-sync",
|
||||
.fp = (FItem)mmProcessQueue,
|
||||
.param = pMgmt,
|
||||
};
|
||||
if (tSingleWorkerInit(&pMgmt->syncWorker, &sCfg) != 0) {
|
||||
dError("failed to start mnode mnode-sync worker since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tsMultiProcess) {
|
||||
SSingleWorkerCfg mCfg = {.min = 1, .max = 1, .name = "mnode-monitor", .fp = (FItem)mmProcessQueue, .param = pMgmt};
|
||||
SSingleWorkerCfg mCfg = {
|
||||
.min = 1,
|
||||
.max = 1,
|
||||
.name = "mnode-monitor",
|
||||
.fp = (FItem)mmProcessQueue,
|
||||
.param = pMgmt,
|
||||
};
|
||||
if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) {
|
||||
dError("failed to start mnode mnode-monitor worker since %s", terrstr());
|
||||
return -1;
|
||||
|
|
|
@ -17,12 +17,14 @@
|
|||
#include "qmInt.h"
|
||||
|
||||
static inline void qmSendRsp(SNodeMsg *pMsg, int32_t code) {
|
||||
SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle,
|
||||
.ahandle = pMsg->rpcMsg.ahandle,
|
||||
.refId = pMsg->rpcMsg.refId,
|
||||
.code = code,
|
||||
.pCont = pMsg->pRsp,
|
||||
.contLen = pMsg->rspLen};
|
||||
SRpcMsg rsp = {
|
||||
.handle = pMsg->rpcMsg.handle,
|
||||
.ahandle = pMsg->rpcMsg.ahandle,
|
||||
.refId = pMsg->rpcMsg.refId,
|
||||
.code = code,
|
||||
.pCont = pMsg->pRsp,
|
||||
.contLen = pMsg->rspLen,
|
||||
};
|
||||
tmsgSendRsp(&rsp);
|
||||
}
|
||||
|
||||
|
@ -145,22 +147,26 @@ int32_t qmGetQueueSize(SMgmtWrapper *pWrapper, int32_t vgId, EQueueType qtype) {
|
|||
}
|
||||
|
||||
int32_t qmStartWorker(SQnodeMgmt *pMgmt) {
|
||||
SSingleWorkerCfg queryCfg = {.min = tsNumOfVnodeQueryThreads,
|
||||
.max = tsNumOfVnodeQueryThreads,
|
||||
.name = "qnode-query",
|
||||
.fp = (FItem)qmProcessQueryQueue,
|
||||
.param = pMgmt};
|
||||
SSingleWorkerCfg queryCfg = {
|
||||
.min = tsNumOfVnodeQueryThreads,
|
||||
.max = tsNumOfVnodeQueryThreads,
|
||||
.name = "qnode-query",
|
||||
.fp = (FItem)qmProcessQueryQueue,
|
||||
.param = pMgmt,
|
||||
};
|
||||
|
||||
if (tSingleWorkerInit(&pMgmt->queryWorker, &queryCfg) != 0) {
|
||||
dError("failed to start qnode-query worker since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
SSingleWorkerCfg fetchCfg = {.min = tsNumOfQnodeFetchThreads,
|
||||
.max = tsNumOfQnodeFetchThreads,
|
||||
.name = "qnode-fetch",
|
||||
.fp = (FItem)qmProcessFetchQueue,
|
||||
.param = pMgmt};
|
||||
SSingleWorkerCfg fetchCfg = {
|
||||
.min = tsNumOfQnodeFetchThreads,
|
||||
.max = tsNumOfQnodeFetchThreads,
|
||||
.name = "qnode-fetch",
|
||||
.fp = (FItem)qmProcessFetchQueue,
|
||||
.param = pMgmt,
|
||||
};
|
||||
|
||||
if (tSingleWorkerInit(&pMgmt->fetchWorker, &fetchCfg) != 0) {
|
||||
dError("failed to start qnode-fetch worker since %s", terrstr());
|
||||
|
@ -169,7 +175,12 @@ int32_t qmStartWorker(SQnodeMgmt *pMgmt) {
|
|||
|
||||
if (tsMultiProcess) {
|
||||
SSingleWorkerCfg mCfg = {
|
||||
.min = 1, .max = 1, .name = "qnode-monitor", .fp = (FItem)qmProcessMonitorQueue, .param = pMgmt};
|
||||
.min = 1,
|
||||
.max = 1,
|
||||
.name = "qnode-monitor",
|
||||
.fp = (FItem)qmProcessMonitorQueue,
|
||||
.param = pMgmt,
|
||||
};
|
||||
if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) {
|
||||
dError("failed to start qnode-monitor worker since %s", terrstr());
|
||||
return -1;
|
||||
|
|
|
@ -17,12 +17,14 @@
|
|||
#include "smInt.h"
|
||||
|
||||
static inline void smSendRsp(SNodeMsg *pMsg, int32_t code) {
|
||||
SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle,
|
||||
.ahandle = pMsg->rpcMsg.ahandle,
|
||||
.refId = pMsg->rpcMsg.refId,
|
||||
.code = code,
|
||||
.pCont = pMsg->pRsp,
|
||||
.contLen = pMsg->rspLen};
|
||||
SRpcMsg rsp = {
|
||||
.handle = pMsg->rpcMsg.handle,
|
||||
.ahandle = pMsg->rpcMsg.ahandle,
|
||||
.refId = pMsg->rpcMsg.refId,
|
||||
.code = code,
|
||||
.pCont = pMsg->pRsp,
|
||||
.contLen = pMsg->rspLen,
|
||||
};
|
||||
tmsgSendRsp(&rsp);
|
||||
}
|
||||
|
||||
|
@ -90,7 +92,12 @@ int32_t smStartWorker(SSnodeMgmt *pMgmt) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
SMultiWorkerCfg cfg = {.max = 1, .name = "snode-unique", .fp = smProcessUniqueQueue, .param = pMgmt};
|
||||
SMultiWorkerCfg cfg = {
|
||||
.max = 1,
|
||||
.name = "snode-unique",
|
||||
.fp = smProcessUniqueQueue,
|
||||
.param = pMgmt,
|
||||
};
|
||||
if (tMultiWorkerInit(pUniqueWorker, &cfg) != 0) {
|
||||
dError("failed to start snode-unique worker since %s", terrstr());
|
||||
return -1;
|
||||
|
@ -101,11 +108,13 @@ int32_t smStartWorker(SSnodeMgmt *pMgmt) {
|
|||
}
|
||||
}
|
||||
|
||||
SSingleWorkerCfg cfg = {.min = tsNumOfSnodeSharedThreads,
|
||||
.max = tsNumOfSnodeSharedThreads,
|
||||
.name = "snode-shared",
|
||||
.fp = (FItem)smProcessSharedQueue,
|
||||
.param = pMgmt};
|
||||
SSingleWorkerCfg cfg = {
|
||||
.min = tsNumOfSnodeSharedThreads,
|
||||
.max = tsNumOfSnodeSharedThreads,
|
||||
.name = "snode-shared",
|
||||
.fp = (FItem)smProcessSharedQueue,
|
||||
.param = pMgmt,
|
||||
};
|
||||
|
||||
if (tSingleWorkerInit(&pMgmt->sharedWorker, &cfg)) {
|
||||
dError("failed to start snode shared-worker since %s", terrstr());
|
||||
|
@ -114,7 +123,12 @@ int32_t smStartWorker(SSnodeMgmt *pMgmt) {
|
|||
|
||||
if (tsMultiProcess) {
|
||||
SSingleWorkerCfg mCfg = {
|
||||
.min = 1, .max = 1, .name = "snode-monitor", .fp = (FItem)smProcessMonitorQueue, .param = pMgmt};
|
||||
.min = 1,
|
||||
.max = 1,
|
||||
.name = "snode-monitor",
|
||||
.fp = (FItem)smProcessMonitorQueue,
|
||||
.param = pMgmt,
|
||||
};
|
||||
if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) {
|
||||
dError("failed to start snode-monitor worker since %s", terrstr());
|
||||
return -1;
|
||||
|
@ -150,7 +164,7 @@ static FORCE_INLINE int32_t smGetSWTypeFromMsg(SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t smProcessMgmtMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
||||
SSnodeMgmt * pMgmt = pWrapper->pMgmt;
|
||||
SSnodeMgmt *pMgmt = pWrapper->pMgmt;
|
||||
SMultiWorker *pWorker = taosArrayGetP(pMgmt->uniqueWorkers, 0);
|
||||
if (pWorker == NULL) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
|
@ -163,7 +177,7 @@ int32_t smProcessMgmtMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t smProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
||||
SSnodeMgmt * pMgmt = pWrapper->pMgmt;
|
||||
SSnodeMgmt *pMgmt = pWrapper->pMgmt;
|
||||
SSingleWorker *pWorker = &pMgmt->monitorWorker;
|
||||
|
||||
dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name);
|
||||
|
@ -172,7 +186,7 @@ int32_t smProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t smProcessUniqueMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
||||
SSnodeMgmt * pMgmt = pWrapper->pMgmt;
|
||||
SSnodeMgmt *pMgmt = pWrapper->pMgmt;
|
||||
int32_t index = smGetSWIdFromMsg(&pMsg->rpcMsg);
|
||||
SMultiWorker *pWorker = taosArrayGetP(pMgmt->uniqueWorkers, index);
|
||||
if (pWorker == NULL) {
|
||||
|
@ -186,7 +200,7 @@ int32_t smProcessUniqueMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t smProcessSharedMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) {
|
||||
SSnodeMgmt * pMgmt = pWrapper->pMgmt;
|
||||
SSnodeMgmt *pMgmt = pWrapper->pMgmt;
|
||||
SSingleWorker *pWorker = &pMgmt->sharedWorker;
|
||||
|
||||
dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name);
|
||||
|
|
|
@ -335,7 +335,6 @@ static int32_t mndProcessStatusReq(SNodeMsg *pReq) {
|
|||
}
|
||||
bool roleChanged = false;
|
||||
for (int32_t vg = 0; vg < pVgroup->replica; ++vg) {
|
||||
// sync integration
|
||||
if (pVgroup->vnodeGid[vg].dnodeId == statusReq.dnodeId) {
|
||||
if (pVgroup->vnodeGid[vg].role != pVload->syncState) {
|
||||
roleChanged = true;
|
||||
|
|
|
@ -1405,15 +1405,18 @@ static int32_t mndRetrieveTrans(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
|||
colDataAppend(pColInfo, numOfRows, (const char *)dbname, false);
|
||||
|
||||
char type[TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(dbname, mndTransType(pTrans->type), pShow->pMeta->pSchemas[cols].bytes);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(type, mndTransType(pTrans->type), pShow->pMeta->pSchemas[cols].bytes);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)type, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->failedTimes, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->lastExecTime, false);
|
||||
|
||||
char lastError[TSDB_TRANS_ERROR_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(dbname, pTrans->lastError, pShow->pMeta->pSchemas[cols].bytes);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(lastError, pTrans->lastError, pShow->pMeta->pSchemas[cols].bytes);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)lastError, false);
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ static void mndPullupTrans(void *param, void *tmrId) {
|
|||
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg);
|
||||
}
|
||||
|
||||
taosTmrReset(mndPullupTrans, tsTransPullupMs, pMnode, pMnode->timer, &pMnode->transTimer);
|
||||
taosTmrReset(mndPullupTrans, tsTransPullupInterval * 1000, pMnode, pMnode->timer, &pMnode->transTimer);
|
||||
}
|
||||
|
||||
static void mndCalMqRebalance(void *param, void *tmrId) {
|
||||
|
@ -81,7 +81,7 @@ static void mndCalMqRebalance(void *param, void *tmrId) {
|
|||
tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg);
|
||||
}
|
||||
|
||||
taosTmrReset(mndCalMqRebalance, tsMaRebalanceMs, pMnode, pMnode->timer, &pMnode->mqTimer);
|
||||
taosTmrReset(mndCalMqRebalance, tsMqRebalanceInterval * 1000, pMnode, pMnode->timer, &pMnode->mqTimer);
|
||||
}
|
||||
|
||||
static void mndPullupTelem(void *param, void *tmrId) {
|
||||
|
@ -103,12 +103,12 @@ static int32_t mndInitTimer(SMnode *pMnode) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (taosTmrReset(mndPullupTrans, tsTransPullupMs, pMnode, pMnode->timer, &pMnode->transTimer)) {
|
||||
if (taosTmrReset(mndPullupTrans, tsTransPullupInterval * 1000, pMnode, pMnode->timer, &pMnode->transTimer)) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (taosTmrReset(mndCalMqRebalance, tsMaRebalanceMs, pMnode, pMnode->timer, &pMnode->mqTimer)) {
|
||||
if (taosTmrReset(mndCalMqRebalance, tsMqRebalanceInterval * 1000, pMnode, pMnode->timer, &pMnode->mqTimer)) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ class MndTestTrans2 : public ::testing::Test {
|
|||
strcpy(opt.replicas[0].fqdn, "localhost");
|
||||
opt.msgCb = msgCb;
|
||||
|
||||
tsTransPullupMs = 1000;
|
||||
tsTransPullupInterval = 1;
|
||||
|
||||
const char *mnodepath = "/tmp/mnode_test_trans";
|
||||
taosRemoveDir(mnodepath);
|
||||
|
|
|
@ -310,7 +310,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
|
|||
code = taosFsyncFile(pFile);
|
||||
if (code != 0) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
mError("failed to write file:%s since %s", tmpfile, tstrerror(code));
|
||||
mError("failed to sync file:%s since %s", tmpfile, tstrerror(code));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -157,7 +157,7 @@ struct SVnodeCfg {
|
|||
bool isWeak;
|
||||
STsdbCfg tsdbCfg;
|
||||
SWalCfg walCfg;
|
||||
SSyncCfg syncCfg; // sync integration
|
||||
SSyncCfg syncCfg;
|
||||
uint32_t hashBegin;
|
||||
uint32_t hashEnd;
|
||||
int8_t hashMethod;
|
||||
|
|
|
@ -879,14 +879,14 @@ static TSKEY extractFirstTraverseKey(STableCheckInfo* pCheckInfo, int32_t order,
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static STSRow* getSRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update, STSRow** extraRow, TDRowVerT maxVer) {
|
||||
static STSRow* getSRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update, STSRow** extraRow,
|
||||
TDRowVerT maxVer) {
|
||||
STSRow *rmem = NULL, *rimem = NULL;
|
||||
if (pCheckInfo->iter) {
|
||||
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
|
||||
if (node != NULL) {
|
||||
rmem = (STSRow*)SL_GET_NODE_DATA(node);
|
||||
#if 0 // TODO: skiplist refactor
|
||||
#if 0 // TODO: skiplist refactor
|
||||
if (TD_ROW_VER(rmem) > maxVer) {
|
||||
rmem = NULL;
|
||||
}
|
||||
|
@ -898,7 +898,7 @@ static STSRow* getSRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int
|
|||
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter);
|
||||
if (node != NULL) {
|
||||
rimem = (STSRow*)SL_GET_NODE_DATA(node);
|
||||
#if 0 // TODO: skiplist refactor
|
||||
#if 0 // TODO: skiplist refactor
|
||||
if (TD_ROW_VER(rimem) > maxVer) {
|
||||
rimem = NULL;
|
||||
}
|
||||
|
@ -1677,7 +1677,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa
|
|||
colIdOfRow2 = tdKvRowColIdAt(row2, k);
|
||||
}
|
||||
|
||||
if (colIdOfRow1 < colIdOfRow2) { // the most probability
|
||||
if (colIdOfRow1 < colIdOfRow2) { // the most probability
|
||||
if (colIdOfRow1 < pColInfo->info.colId) {
|
||||
++j;
|
||||
continue;
|
||||
|
@ -1720,7 +1720,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa
|
|||
++(*curRow);
|
||||
}
|
||||
++nResult;
|
||||
} else if (update){
|
||||
} else if (update) {
|
||||
mergeOption = 2;
|
||||
} else {
|
||||
mergeOption = 0;
|
||||
|
@ -1741,7 +1741,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa
|
|||
++(*curRow);
|
||||
}
|
||||
++nResult;
|
||||
} else if(update) {
|
||||
} else if (update) {
|
||||
mergeOption = 2;
|
||||
} else {
|
||||
mergeOption = 0;
|
||||
|
@ -1985,7 +1985,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
|
|||
return;
|
||||
} else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) {
|
||||
SSkipListNode* node = NULL;
|
||||
TSKEY lastRowKey = TSKEY_INITIAL_VAL;
|
||||
TSKEY lastKeyAppend = TSKEY_INITIAL_VAL;
|
||||
|
||||
do {
|
||||
STSRow* row2 = NULL;
|
||||
|
@ -2018,9 +2018,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
|
|||
rv2 = TD_ROW_SVER(row2);
|
||||
}
|
||||
|
||||
numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
|
||||
pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastRowKey);
|
||||
// numOfRows += 1;
|
||||
numOfRows +=
|
||||
mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
|
||||
pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
|
||||
if (cur->win.skey == TSKEY_INITIAL_VAL) {
|
||||
cur->win.skey = key;
|
||||
}
|
||||
|
@ -2028,7 +2028,6 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
|
|||
cur->win.ekey = key;
|
||||
cur->lastKey = key + step;
|
||||
cur->mixBlock = true;
|
||||
|
||||
moveToNextRowInMem(pCheckInfo);
|
||||
} else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it
|
||||
#if 0
|
||||
|
@ -2064,7 +2063,11 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
|
|||
}
|
||||
#endif
|
||||
if (TD_SUPPORT_UPDATE(pCfg->update)) {
|
||||
doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos);
|
||||
if (lastKeyAppend != key) {
|
||||
lastKeyAppend = key;
|
||||
++curRow;
|
||||
}
|
||||
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos);
|
||||
|
||||
if (rv1 != TD_ROW_SVER(row1)) {
|
||||
// pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1));
|
||||
|
@ -2074,10 +2077,10 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
|
|||
// pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2));
|
||||
rv2 = TD_ROW_SVER(row2);
|
||||
}
|
||||
numOfRows +=
|
||||
mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
|
||||
pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
|
||||
|
||||
numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
|
||||
pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastRowKey);
|
||||
// ++numOfRows;
|
||||
if (cur->win.skey == TSKEY_INITIAL_VAL) {
|
||||
cur->win.skey = key;
|
||||
}
|
||||
|
@ -2118,11 +2121,19 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
|
|||
int32_t qstart = 0, qend = 0;
|
||||
getQualifiedRowsPos(pTsdbReadHandle, pos, end, numOfRows, &qstart, &qend);
|
||||
|
||||
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, numOfRows, qstart, qend);
|
||||
if ((lastKeyAppend != TSKEY_INITIAL_VAL) &&
|
||||
(lastKeyAppend != (ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? tsArray[qstart] : tsArray[qend]))) {
|
||||
++curRow;
|
||||
}
|
||||
numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, qstart, qend);
|
||||
pos += (qend - qstart + 1) * step;
|
||||
if (numOfRows > 0) {
|
||||
curRow = numOfRows - 1;
|
||||
}
|
||||
|
||||
cur->win.ekey = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? tsArray[qend] : tsArray[qstart];
|
||||
cur->lastKey = cur->win.ekey + step;
|
||||
lastKeyAppend = cur->win.ekey;
|
||||
}
|
||||
} while (numOfRows < pTsdbReadHandle->outputCapacity);
|
||||
|
||||
|
@ -2426,7 +2437,7 @@ static int32_t getFirstFileDataBlock(STsdbReadHandle* pTsdbReadHandle, bool* exi
|
|||
int32_t numOfTables = (int32_t)taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo);
|
||||
|
||||
STsdbKeepCfg* pCfg = REPO_KEEP_CFG(pTsdbReadHandle->pTsdb);
|
||||
STimeWindow win = TSWINDOW_INITIALIZER;
|
||||
STimeWindow win = TSWINDOW_INITIALIZER;
|
||||
|
||||
while (true) {
|
||||
tsdbRLockFS(REPO_FS(pTsdbReadHandle->pTsdb));
|
||||
|
@ -2732,7 +2743,6 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
|
|||
STSchema* pSchema = NULL;
|
||||
TSKEY lastRowKey = TSKEY_INITIAL_VAL;
|
||||
|
||||
|
||||
do {
|
||||
STSRow* row = getSRowInTableMem(pCheckInfo, pTsdbReadHandle->order, pCfg->update, NULL, TD_VER_MAX);
|
||||
if (row == NULL) {
|
||||
|
@ -2757,8 +2767,8 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
|
|||
pSchema = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), pCheckInfo->tableId, 0);
|
||||
rv = TD_ROW_SVER(row);
|
||||
}
|
||||
numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, maxRowsToRead, &curRows, row, NULL, numOfCols, pCheckInfo->tableId, pSchema,
|
||||
NULL, pCfg->update, &lastRowKey);
|
||||
numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, maxRowsToRead, &curRows, row, NULL, numOfCols, pCheckInfo->tableId,
|
||||
pSchema, NULL, pCfg->update, &lastRowKey);
|
||||
|
||||
if (numOfRows >= maxRowsToRead) {
|
||||
moveToNextRowInMem(pCheckInfo);
|
||||
|
@ -2767,7 +2777,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
|
|||
|
||||
} while (moveToNextRowInMem(pCheckInfo));
|
||||
|
||||
taosMemoryFreeClear(pSchema); // free the STSChema
|
||||
taosMemoryFreeClear(pSchema); // free the STSChema
|
||||
|
||||
assert(numOfRows <= maxRowsToRead);
|
||||
|
||||
|
@ -2895,8 +2905,8 @@ static bool loadCachedLastRow(STsdbReadHandle* pTsdbReadHandle) {
|
|||
// if (ret != TSDB_CODE_SUCCESS) {
|
||||
// return false;
|
||||
// }
|
||||
mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, pRow, NULL, numOfCols, pCheckInfo->tableId,
|
||||
NULL, NULL, true, &lastRowKey);
|
||||
mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, pRow, NULL, numOfCols,
|
||||
pCheckInfo->tableId, NULL, NULL, true, &lastRowKey);
|
||||
taosMemoryFreeClear(pRow);
|
||||
|
||||
// update the last key value
|
||||
|
@ -3465,7 +3475,7 @@ void tsdbRetrieveDataBlockInfo(tsdbReaderT* pTsdbReadHandle, SDataBlockInfo* pDa
|
|||
|
||||
pDataBlockInfo->rows = cur->rows;
|
||||
pDataBlockInfo->window = cur->win;
|
||||
// ASSERT(pDataBlockInfo->numOfCols >= (int32_t)(QH_GET_NUM_OF_COLS(pHandle));
|
||||
// ASSERT(pDataBlockInfo->numOfCols >= (int32_t)(QH_GET_NUM_OF_COLS(pHandle));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -97,7 +97,6 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
|
|||
if (tjsonAddIntegerToObject(pJson, "hashEnd", pCfg->hashEnd) < 0) return -1;
|
||||
if (tjsonAddIntegerToObject(pJson, "hashMethod", pCfg->hashMethod) < 0) return -1;
|
||||
|
||||
// sync integration
|
||||
if (tjsonAddIntegerToObject(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1;
|
||||
if (tjsonAddIntegerToObject(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1;
|
||||
SJson *pNodeInfoArr = tjsonCreateArray();
|
||||
|
@ -157,7 +156,6 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
|
|||
if (tjsonGetNumberValue(pJson, "hashEnd", pCfg->hashEnd) < 0) return -1;
|
||||
if (tjsonGetNumberValue(pJson, "hashMethod", pCfg->hashMethod) < 0) return -1;
|
||||
|
||||
// sync integration
|
||||
if (tjsonGetNumberValue(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1;
|
||||
if (tjsonGetNumberValue(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1;
|
||||
|
||||
|
|
|
@ -124,8 +124,7 @@ _exit:
|
|||
|
||||
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) {
|
||||
pLoad->vgId = TD_VID(pVnode);
|
||||
// pLoad->syncState = TAOS_SYNC_STATE_LEADER;
|
||||
pLoad->syncState = syncGetMyRole(pVnode->sync); // sync integration
|
||||
pLoad->syncState = syncGetMyRole(pVnode->sync);
|
||||
pLoad->numOfTables = metaGetTbNum(pVnode->pMeta);
|
||||
pLoad->numOfTimeSeries = 400;
|
||||
pLoad->totalStorage = 300;
|
||||
|
|
|
@ -198,7 +198,6 @@ void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
|
|||
tsdbInsertTSmaData(((SVnode *)pVnode)->pTsdb, smaId, (const char *)data);
|
||||
}
|
||||
|
||||
// sync integration
|
||||
int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
||||
if (syncEnvIsStart()) {
|
||||
SSyncNode *pSyncNode = syncNodeAcquire(pVnode->sync);
|
||||
|
|
|
@ -14,12 +14,6 @@
|
|||
*/
|
||||
|
||||
#include "vnd.h"
|
||||
// #include "sync.h"
|
||||
// #include "syncTools.h"
|
||||
// #include "tmsgcb.h"
|
||||
// #include "vnodeInt.h"
|
||||
|
||||
// sync integration
|
||||
|
||||
int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
|
||||
SSyncInfo syncInfo;
|
||||
|
|
|
@ -36,7 +36,7 @@ target_link_libraries(
|
|||
PRIVATE os util common nodes function
|
||||
)
|
||||
|
||||
add_library(udf1 MODULE test/udf1.c)
|
||||
add_library(udf1 STATIC MODULE test/udf1.c)
|
||||
target_include_directories(
|
||||
udf1
|
||||
PUBLIC
|
||||
|
@ -50,7 +50,7 @@ target_include_directories(
|
|||
target_link_libraries(
|
||||
udf1 PUBLIC os)
|
||||
|
||||
add_library(udf2 MODULE test/udf2.c)
|
||||
add_library(udf2 STATIC MODULE test/udf2.c)
|
||||
target_include_directories(
|
||||
udf2
|
||||
PUBLIC
|
||||
|
|
|
@ -127,7 +127,7 @@ enum {
|
|||
|
||||
int64_t gUdfTaskSeqNum = 0;
|
||||
typedef struct SUdfdProxy {
|
||||
char udfdPipeName[UDF_LISTEN_PIPE_NAME_LEN];
|
||||
char udfdPipeName[PATH_MAX + UDF_LISTEN_PIPE_NAME_LEN + 2];
|
||||
uv_barrier_t gUdfInitBarrier;
|
||||
|
||||
uv_loop_t gUdfdLoop;
|
||||
|
@ -224,9 +224,15 @@ int32_t getUdfdPipeName(char* pipeName, int32_t size) {
|
|||
size_t dnodeIdSize = sizeof(dnodeId);
|
||||
int32_t err = uv_os_getenv(UDF_DNODE_ID_ENV_NAME, dnodeId, &dnodeIdSize);
|
||||
if (err != 0) {
|
||||
fnError("get dnode id from env. error: %s.", uv_err_name(err));
|
||||
dnodeId[0] = '1';
|
||||
}
|
||||
#ifdef _WIN32
|
||||
snprintf(pipeName, size, "%s%s", UDF_LISTEN_PIPE_NAME_PREFIX, dnodeId);
|
||||
#else
|
||||
snprintf(pipeName, size, "%s/%s%s", tsDataDir, UDF_LISTEN_PIPE_NAME_PREFIX, dnodeId);
|
||||
#endif
|
||||
fnInfo("get dnode id from env. dnode id: %s. pipe path: %s", dnodeId, pipeName);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -998,7 +1004,7 @@ int32_t udfcOpen() {
|
|||
return 0;
|
||||
}
|
||||
SUdfdProxy *proxy = &gUdfdProxy;
|
||||
getUdfdPipeName(proxy->udfdPipeName, UDF_LISTEN_PIPE_NAME_LEN);
|
||||
getUdfdPipeName(proxy->udfdPipeName, sizeof(proxy->udfdPipeName));
|
||||
proxy->gUdfcState = UDFC_STATE_STARTNG;
|
||||
uv_barrier_init(&proxy->gUdfInitBarrier, 2);
|
||||
uv_thread_create(&proxy->gUdfLoopThread, constructUdfService, proxy);
|
||||
|
@ -1253,7 +1259,9 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResult
|
|||
return false;
|
||||
}
|
||||
UdfcFuncHandle handle;
|
||||
if (setupUdf((char*)pCtx->udfName, &handle) != 0) {
|
||||
int32_t udfCode = 0;
|
||||
if ((udfCode = setupUdf((char*)pCtx->udfName, &handle)) != 0) {
|
||||
fnError("udfAggInit error. step setupUdf. udf code: %d", udfCode);
|
||||
return false;
|
||||
}
|
||||
SClientUdfUvSession *session = (SClientUdfUvSession *)handle;
|
||||
|
@ -1266,7 +1274,8 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResult
|
|||
|
||||
udfRes->session = (SClientUdfUvSession *)handle;
|
||||
SUdfInterBuf buf = {0};
|
||||
if (callUdfAggInit(handle, &buf) != 0) {
|
||||
if ((udfCode = callUdfAggInit(handle, &buf)) != 0) {
|
||||
fnError("udfAggInit error. step callUdfAggInit. udf code: %d", udfCode);
|
||||
return false;
|
||||
}
|
||||
udfRes->interResNum = buf.numOfResult;
|
||||
|
@ -1310,21 +1319,23 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) {
|
|||
.numOfResult = udfRes->interResNum};
|
||||
SUdfInterBuf newState = {0};
|
||||
|
||||
callUdfAggProcess(session, inputBlock, &state, &newState);
|
||||
|
||||
udfRes->interResNum = newState.numOfResult;
|
||||
memcpy(udfRes->interResBuf, newState.buf, newState.bufLen);
|
||||
|
||||
int32_t udfCode = callUdfAggProcess(session, inputBlock, &state, &newState);
|
||||
if (udfCode != 0) {
|
||||
fnError("udfAggProcess error. code: %d", udfCode);
|
||||
newState.numOfResult = 0;
|
||||
} else {
|
||||
udfRes->interResNum = newState.numOfResult;
|
||||
memcpy(udfRes->interResBuf, newState.buf, newState.bufLen);
|
||||
}
|
||||
if (newState.numOfResult == 1 || state.numOfResult == 1) {
|
||||
GET_RES_INFO(pCtx)->numOfRes = 1;
|
||||
}
|
||||
|
||||
blockDataDestroy(inputBlock);
|
||||
|
||||
taosArrayDestroy(tempBlock.pDataBlock);
|
||||
|
||||
taosMemoryFree(newState.buf);
|
||||
return 0;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) {
|
||||
|
@ -1338,15 +1349,22 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) {
|
|||
SUdfInterBuf state = {.buf = udfRes->interResBuf,
|
||||
.bufLen = session->bufSize,
|
||||
.numOfResult = udfRes->interResNum};
|
||||
callUdfAggFinalize(session, &state, &resultBuf);
|
||||
|
||||
udfRes->finalResBuf = resultBuf.buf;
|
||||
udfRes->finalResNum = resultBuf.numOfResult;
|
||||
|
||||
teardownUdf(session);
|
||||
|
||||
if (resultBuf.numOfResult == 1) {
|
||||
GET_RES_INFO(pCtx)->numOfRes = 1;
|
||||
int32_t udfCallCode= 0;
|
||||
udfCallCode= callUdfAggFinalize(session, &state, &resultBuf);
|
||||
if (udfCallCode!= 0) {
|
||||
fnError("udfAggFinalize error. callUdfAggFinalize step. udf code:%d", udfCallCode);
|
||||
GET_RES_INFO(pCtx)->numOfRes = 0;
|
||||
} else {
|
||||
memcpy(udfRes->finalResBuf, resultBuf.buf, session->outputLen);
|
||||
udfRes->finalResNum = resultBuf.numOfResult;
|
||||
GET_RES_INFO(pCtx)->numOfRes = udfRes->finalResNum;
|
||||
}
|
||||
|
||||
int32_t code = teardownUdf(session);
|
||||
if (code != 0) {
|
||||
fnError("udfAggFinalize error. teardownUdf step. udf code: %d", code);
|
||||
}
|
||||
|
||||
return functionFinalizeWithResultBuf(pCtx, pBlock, udfRes->finalResBuf);
|
||||
|
||||
}
|
|
@ -30,7 +30,7 @@ typedef struct SUdfdContext {
|
|||
uv_loop_t *loop;
|
||||
uv_pipe_t ctrlPipe;
|
||||
uv_signal_t intrSignal;
|
||||
char listenPipeName[UDF_LISTEN_PIPE_NAME_LEN];
|
||||
char listenPipeName[PATH_MAX + UDF_LISTEN_PIPE_NAME_LEN + 2];
|
||||
uv_pipe_t listeningPipe;
|
||||
|
||||
void *clientRpc;
|
||||
|
@ -652,7 +652,7 @@ static int32_t udfdUvInit() {
|
|||
uv_pipe_open(&global.ctrlPipe, 0);
|
||||
uv_read_start((uv_stream_t *)&global.ctrlPipe, udfdCtrlAllocBufCb, udfdCtrlReadCb);
|
||||
|
||||
getUdfdPipeName(global.listenPipeName, UDF_LISTEN_PIPE_NAME_LEN);
|
||||
getUdfdPipeName(global.listenPipeName, sizeof(global.listenPipeName));
|
||||
|
||||
removeListeningPipe();
|
||||
|
||||
|
@ -696,6 +696,7 @@ static int32_t udfdRun() {
|
|||
fnInfo("udfd stopped. result: %s, code: %d", uv_err_name(code), code);
|
||||
int codeClose = uv_loop_close(global.loop);
|
||||
fnDebug("uv loop close. result: %s", uv_err_name(codeClose));
|
||||
removeListeningPipe();
|
||||
udfdCloseClientRpc();
|
||||
uv_mutex_destroy(&global.udfsMutex);
|
||||
taosHashCleanup(global.udfsHash);
|
||||
|
|
|
@ -347,10 +347,21 @@ int32_t sclExecFunction(SFunctionNode *node, SScalarCtx *ctx, SScalarParam *outp
|
|||
if (fmIsUserDefinedFunc(node->funcId)) {
|
||||
UdfcFuncHandle udfHandle = NULL;
|
||||
|
||||
SCL_ERR_JRET(setupUdf(node->functionName, &udfHandle));
|
||||
code = setupUdf(node->functionName, &udfHandle);
|
||||
if (code != 0) {
|
||||
sclError("fmExecFunction error. setupUdf. function name: %s, code:%d", node->functionName, code);
|
||||
goto _return;
|
||||
}
|
||||
code = callUdfScalarFunc(udfHandle, params, paramNum, output);
|
||||
teardownUdf(udfHandle);
|
||||
SCL_ERR_JRET(code);
|
||||
if (code != 0) {
|
||||
sclError("fmExecFunction error. callUdfScalarFunc. function name: %s, udf code:%d", node->functionName, code);
|
||||
goto _return;
|
||||
}
|
||||
code = teardownUdf(udfHandle);
|
||||
if (code != 0) {
|
||||
sclError("fmExecFunction error. callUdfScalarFunc. function name: %s, udf code:%d", node->functionName, code);
|
||||
goto _return;
|
||||
}
|
||||
} else {
|
||||
SScalarFuncExecFuncs ffpSet = {0};
|
||||
code = fmGetScalarFuncExecFuncs(node->funcId, &ffpSet);
|
||||
|
|
|
@ -67,6 +67,7 @@ typedef struct SSrvMsg {
|
|||
|
||||
typedef struct SWorkThrdObj {
|
||||
TdThread thread;
|
||||
uv_connect_t connect_req;
|
||||
uv_pipe_t* pipe;
|
||||
uv_os_fd_t fd;
|
||||
uv_loop_t* loop;
|
||||
|
@ -87,8 +88,10 @@ typedef struct SServerObj {
|
|||
// work thread info
|
||||
int workerIdx;
|
||||
int numOfThreads;
|
||||
int numOfWorkerReady;
|
||||
SWorkThrdObj** pThreadObj;
|
||||
|
||||
uv_pipe_t pipeListen;
|
||||
uv_pipe_t** pipe;
|
||||
uint32_t ip;
|
||||
uint32_t port;
|
||||
|
@ -161,7 +164,7 @@ static void* transWorkerThread(void* arg);
|
|||
static void* transAcceptThread(void* arg);
|
||||
|
||||
// add handle loop
|
||||
static bool addHandleToWorkloop(void* arg);
|
||||
static bool addHandleToWorkloop(SWorkThrdObj* pThrd,char *pipeName);
|
||||
static bool addHandleToAcceptloop(void* arg);
|
||||
|
||||
#define CONN_SHOULD_RELEASE(conn, head) \
|
||||
|
@ -577,6 +580,12 @@ void uvOnAcceptCb(uv_stream_t* stream, int status) {
|
|||
uv_tcp_init(pObj->loop, cli);
|
||||
|
||||
if (uv_accept(stream, (uv_stream_t*)cli) == 0) {
|
||||
if (pObj->numOfWorkerReady < pObj->numOfThreads) {
|
||||
tError("worker-threads are not ready for all, need %d instead of %d.", pObj->numOfThreads, pObj->numOfWorkerReady);
|
||||
uv_close((uv_handle_t*)cli, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
uv_write_t* wr = (uv_write_t*)taosMemoryMalloc(sizeof(uv_write_t));
|
||||
wr->data = cli;
|
||||
uv_buf_t buf = uv_buf_init((char*)notify, strlen(notify));
|
||||
|
@ -672,15 +681,21 @@ void* transAcceptThread(void* arg) {
|
|||
|
||||
return NULL;
|
||||
}
|
||||
static bool addHandleToWorkloop(void* arg) {
|
||||
SWorkThrdObj* pThrd = arg;
|
||||
void uvOnPipeConnectionCb(uv_connect_t *connect, int status) {
|
||||
if (status != 0) {
|
||||
return;
|
||||
}
|
||||
SWorkThrdObj* pThrd = container_of(connect, SWorkThrdObj, connect_req);
|
||||
uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
|
||||
}
|
||||
static bool addHandleToWorkloop(SWorkThrdObj* pThrd,char *pipeName) {
|
||||
pThrd->loop = (uv_loop_t*)taosMemoryMalloc(sizeof(uv_loop_t));
|
||||
if (0 != uv_loop_init(pThrd->loop)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
uv_pipe_init(pThrd->loop, pThrd->pipe, 1);
|
||||
uv_pipe_open(pThrd->pipe, pThrd->fd);
|
||||
// int r = uv_pipe_open(pThrd->pipe, pThrd->fd);
|
||||
|
||||
pThrd->pipe->data = pThrd;
|
||||
|
||||
|
@ -691,7 +706,8 @@ static bool addHandleToWorkloop(void* arg) {
|
|||
QUEUE_INIT(&pThrd->conn);
|
||||
|
||||
pThrd->asyncPool = transCreateAsyncPool(pThrd->loop, 5, pThrd, uvWorkerAsyncCb);
|
||||
uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
|
||||
uv_pipe_connect(&pThrd->connect_req, pThrd->pipe, pipeName, uvOnPipeConnectionCb);
|
||||
// uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -802,12 +818,32 @@ static void uvDestroyConn(uv_handle_t* handle) {
|
|||
uv_walk(thrd->loop, uvWalkCb, NULL);
|
||||
}
|
||||
}
|
||||
static void uvPipeListenCb(uv_stream_t* handle, int status) {
|
||||
ASSERT(status == 0);
|
||||
|
||||
SServerObj* srv = container_of(handle, SServerObj, pipeListen);
|
||||
uv_pipe_t* pipe = &(srv->pipe[srv->numOfWorkerReady][0]);
|
||||
ASSERT(0 == uv_pipe_init(srv->loop, pipe, 1));
|
||||
ASSERT(0 == uv_accept((uv_stream_t*)&srv->pipeListen, (uv_stream_t*)pipe));
|
||||
|
||||
ASSERT(1 == uv_is_readable((uv_stream_t*)pipe));
|
||||
ASSERT(1 == uv_is_writable((uv_stream_t*)pipe));
|
||||
ASSERT(0 == uv_is_closing((uv_handle_t*)pipe));
|
||||
|
||||
srv->numOfWorkerReady++;
|
||||
|
||||
// ASSERT(0 == uv_listen((uv_stream_t*)&ctx.send.tcp, 512, uvOnAcceptCb));
|
||||
|
||||
// r = uv_read_start((uv_stream_t*)&ctx.channel, alloc_cb, read_cb);
|
||||
// ASSERT(r == 0);
|
||||
}
|
||||
|
||||
void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) {
|
||||
SServerObj* srv = taosMemoryCalloc(1, sizeof(SServerObj));
|
||||
srv->loop = (uv_loop_t*)taosMemoryMalloc(sizeof(uv_loop_t));
|
||||
srv->numOfThreads = numOfThreads;
|
||||
srv->workerIdx = 0;
|
||||
srv->numOfWorkerReady = 0;
|
||||
srv->pThreadObj = (SWorkThrdObj**)taosMemoryCalloc(srv->numOfThreads, sizeof(SWorkThrdObj*));
|
||||
srv->pipe = (uv_pipe_t**)taosMemoryCalloc(srv->numOfThreads, sizeof(uv_pipe_t*));
|
||||
srv->ip = ip;
|
||||
|
@ -817,6 +853,16 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
|
|||
taosThreadOnce(&transModuleInit, uvInitEnv);
|
||||
transSrvInst++;
|
||||
|
||||
char pipeName[64];
|
||||
assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0));
|
||||
#ifdef WINDOWS
|
||||
snprintf(pipeName, sizeof(pipeName), "\\\\?\\pipe\\trans.rpc\\%p-%lu", taosSafeRand(), GetCurrentProcessId());
|
||||
#else
|
||||
snprintf(pipeName, sizeof(pipeName), ".trans.rpc\\%08X-%lu", taosSafeRand(), taosGetSelfPthreadId());
|
||||
#endif
|
||||
assert(0 == uv_pipe_bind(&srv->pipeListen, pipeName));
|
||||
assert(0 == uv_listen((uv_stream_t*)&srv->pipeListen, SOMAXCONN, uvPipeListenCb));
|
||||
|
||||
for (int i = 0; i < srv->numOfThreads; i++) {
|
||||
SWorkThrdObj* thrd = (SWorkThrdObj*)taosMemoryCalloc(1, sizeof(SWorkThrdObj));
|
||||
thrd->pTransInst = shandle;
|
||||
|
@ -826,17 +872,22 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
|
|||
|
||||
srv->pipe[i] = (uv_pipe_t*)taosMemoryCalloc(2, sizeof(uv_pipe_t));
|
||||
|
||||
uv_os_sock_t fds[2];
|
||||
if (uv_socketpair(SOCK_STREAM, 0, fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE) != 0) {
|
||||
goto End;
|
||||
}
|
||||
uv_pipe_init(srv->loop, &(srv->pipe[i][0]), 1);
|
||||
uv_pipe_open(&(srv->pipe[i][0]), fds[1]); // init write
|
||||
// #ifdef WINDOWS
|
||||
// uv_file fds[2];
|
||||
// if (uv_pipe(fds, UV_READABLE_PIPE|UV_WRITABLE_PIPE|UV_NONBLOCK_PIPE, UV_READABLE_PIPE|UV_WRITABLE_PIPE|UV_NONBLOCK_PIPE) != 0) {
|
||||
// #else
|
||||
// uv_os_sock_t fds[2];
|
||||
// if (uv_socketpair(SOCK_STREAM, 0, fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE) != 0) {
|
||||
// #endif
|
||||
// goto End;
|
||||
// }
|
||||
// uv_pipe_init(srv->loop, &(srv->pipe[i][0]), 1);
|
||||
// uv_pipe_open(&(srv->pipe[i][0]), fds[1]); // init write
|
||||
|
||||
thrd->fd = fds[0];
|
||||
// thrd->fd = fds[0];
|
||||
thrd->pipe = &(srv->pipe[i][1]); // init read
|
||||
|
||||
if (false == addHandleToWorkloop(thrd)) {
|
||||
if (false == addHandleToWorkloop(thrd,pipeName)) {
|
||||
goto End;
|
||||
}
|
||||
int err = taosThreadCreate(&(thrd->thread), NULL, transWorkerThread, (void*)(thrd));
|
||||
|
|
|
@ -204,7 +204,7 @@ int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen) {
|
|||
int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen) {
|
||||
char tmp[PATH_MAX] = {0};
|
||||
#ifdef WINDOWS
|
||||
if (_fullpath(dirname, tmp, maxlen) != NULL) {
|
||||
if (_fullpath(tmp, dirname, maxlen) != NULL) {
|
||||
#else
|
||||
if (realpath(dirname, tmp) != NULL) {
|
||||
#endif
|
||||
|
|
|
@ -543,7 +543,7 @@ int32_t taosFsyncFile(TdFilePtr pFile) {
|
|||
|
||||
HANDLE h = (HANDLE)_get_osfhandle(pFile->fd);
|
||||
|
||||
return FlushFileBuffers(h);
|
||||
return !FlushFileBuffers(h);
|
||||
#else
|
||||
if (pFile == NULL) {
|
||||
return 0;
|
||||
|
|
|
@ -869,11 +869,15 @@ SysNameInfo taosGetSysNameInfo() {
|
|||
SysNameInfo info = {0};
|
||||
DWORD dwVersion = GetVersion();
|
||||
|
||||
tstrncpy(info.sysname, getenv("OS"), sizeof(info.sysname));
|
||||
tstrncpy(info.nodename, getenv("COMPUTERNAME"), sizeof(info.nodename));
|
||||
char *tmp = NULL;
|
||||
tmp = getenv("OS");
|
||||
if (tmp != NULL) tstrncpy(info.sysname, tmp, sizeof(info.sysname));
|
||||
tmp = getenv("COMPUTERNAME");
|
||||
if (tmp != NULL) tstrncpy(info.nodename, tmp, sizeof(info.nodename));
|
||||
sprintf_s(info.release, sizeof(info.release), "%d", dwVersion & 0x0F);
|
||||
sprintf_s(info.version, sizeof(info.release), "%d", (dwVersion >> 8) & 0x0F);
|
||||
tstrncpy(info.machine, getenv("PROCESSOR_ARCHITECTURE"), sizeof(info.machine));
|
||||
tmp = getenv("PROCESSOR_ARCHITECTURE");
|
||||
if (tmp != NULL) tstrncpy(info.machine, tmp, sizeof(info.machine));
|
||||
|
||||
return info;
|
||||
#elif defined(_TD_DARWIN_64)
|
||||
|
|
|
@ -62,6 +62,8 @@
|
|||
# ---- tstream
|
||||
./test.sh -f tsim/tstream/basic0.sim
|
||||
|
||||
# ---- transaction
|
||||
./test.sh -f tsim/trans/create_db.sim
|
||||
|
||||
# ---- tmq
|
||||
./test.sh -f tsim/tmq/basic1.sim
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/cfg.sh -n dnode1 -c transPullupInterval -v 1
|
||||
system sh/cfg.sh -n dnode2 -c transPullupInterval -v 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sql connect
|
||||
|
||||
print =============== show dnodes
|
||||
sql show dnodes;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show mnodes;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data02 != LEADER then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== create dnodes
|
||||
sql create dnode $hostname port 7200
|
||||
sleep 2000
|
||||
|
||||
sql show dnodes;
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data10 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== kill dnode2
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print =============== create database
|
||||
sql show transactions
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql_error create database d1 vgroups 2;
|
||||
|
||||
print =============== show transactions
|
||||
sql show transactions
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][0] != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][2] != undoAction then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][3] != d1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][4] != create-db then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][7] != @Unable to establish connection@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql_error create database d1 vgroups 2;
|
||||
|
||||
print =============== start dnode2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sleep 3000
|
||||
|
||||
sql show transactions
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql create database d1 vgroups 2;
|
||||
|
||||
print =============== kill dnode2
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print =============== create database
|
||||
sql show transactions
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql_error create database d2 vgroups 2;
|
||||
|
||||
print =============== show transactions
|
||||
sql show transactions
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][0] != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][2] != undoAction then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][3] != d2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][4] != create-db then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][7] != @Unable to establish connection@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql_error create database d2 vgroups 2;
|
||||
|
||||
print =============== kill transaction
|
||||
sql kill transaction 4;
|
||||
sleep 2000
|
||||
|
||||
sql show transactions
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== start dnode2
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
sleep 3000
|
||||
|
||||
sql show transactions
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql create database d2 vgroups 2;
|
||||
sql_error kill transaction 1;
|
||||
sql_error kill transaction 2;
|
||||
sql_error kill transaction 3;
|
||||
sql_error kill transaction 4;
|
||||
sql_error kill transaction 5;
|
||||
|
||||
return
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
|
@ -0,0 +1,311 @@
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import pexpect
|
||||
import os
|
||||
import http.server
|
||||
import gzip
|
||||
import threading
|
||||
import json
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
telemetryPort = '6043'
|
||||
|
||||
|
||||
def telemetryInfoCheck(infoDict=''):
|
||||
|
||||
hostname = socket.gethostname()
|
||||
serverPort = 7080
|
||||
|
||||
if "ts" not in infoDict or len(infoDict["ts"]) == 0:
|
||||
tdLog.exit("ts is null!")
|
||||
|
||||
if "dnode_id" not in infoDict or infoDict["dnode_id"] != 1:
|
||||
tdLog.exit("dnode_id is null!")
|
||||
|
||||
if "dnode_ep" not in infoDict:
|
||||
tdLog.exit("dnode_ep is null!")
|
||||
|
||||
if "cluster_id" not in infoDict:
|
||||
tdLog.exit("cluster_id is null!")
|
||||
|
||||
if "protocol" not in infoDict or infoDict["protocol"] != 1:
|
||||
tdLog.exit("protocol is null!")
|
||||
|
||||
if "cluster_info" not in infoDict :
|
||||
tdLog.exit("cluster_info is null!")
|
||||
|
||||
# cluster_info ====================================
|
||||
|
||||
if "first_ep" not in infoDict["cluster_info"] or infoDict["cluster_info"]["first_ep"] == None:
|
||||
tdLog.exit("first_ep is null!")
|
||||
|
||||
if "first_ep_dnode_id" not in infoDict["cluster_info"] or infoDict["cluster_info"]["first_ep_dnode_id"] != 1:
|
||||
tdLog.exit("first_ep_dnode_id is null!")
|
||||
|
||||
if "version" not in infoDict["cluster_info"] or infoDict["cluster_info"]["version"] == None:
|
||||
tdLog.exit("first_ep_dnode_id is null!")
|
||||
|
||||
if "master_uptime" not in infoDict["cluster_info"] or infoDict["cluster_info"]["master_uptime"] == None:
|
||||
tdLog.exit("master_uptime is null!")
|
||||
|
||||
if "monitor_interval" not in infoDict["cluster_info"] or infoDict["cluster_info"]["monitor_interval"] !=5:
|
||||
tdLog.exit("monitor_interval is null!")
|
||||
|
||||
if "vgroups_total" not in infoDict["cluster_info"] or infoDict["cluster_info"]["vgroups_total"] < 0:
|
||||
tdLog.exit("vgroups_total is null!")
|
||||
|
||||
if "vgroups_alive" not in infoDict["cluster_info"] or infoDict["cluster_info"]["vgroups_alive"] < 0:
|
||||
tdLog.exit("vgroups_alive is null!")
|
||||
|
||||
if "connections_total" not in infoDict["cluster_info"] or infoDict["cluster_info"]["connections_total"] < 0 :
|
||||
tdLog.exit("connections_total is null!")
|
||||
|
||||
if "dnodes" not in infoDict["cluster_info"] or infoDict["cluster_info"]["dnodes"] == None :
|
||||
tdLog.exit("dnodes is null!")
|
||||
|
||||
dnodes_info = { "dnode_id": 1,"dnode_ep": f"{hostname}:{serverPort}","status":"ready"}
|
||||
|
||||
for k ,v in dnodes_info.items():
|
||||
if k not in infoDict["cluster_info"]["dnodes"][0] or v != infoDict["cluster_info"]["dnodes"][0][k] :
|
||||
tdLog.exit("dnodes info is null!")
|
||||
|
||||
mnodes_info = { "mnode_id":1, "mnode_ep":f"{hostname}:{serverPort}","role": "LEADER" }
|
||||
|
||||
for k ,v in mnodes_info.items():
|
||||
if k not in infoDict["cluster_info"]["mnodes"][0] or v != infoDict["cluster_info"]["mnodes"][0][k] :
|
||||
tdLog.exit("mnodes info is null!")
|
||||
|
||||
# vgroup_infos ====================================
|
||||
|
||||
if "vgroup_infos" not in infoDict or infoDict["vgroup_infos"]== None:
|
||||
tdLog.exit("vgroup_infos is null!")
|
||||
|
||||
vgroup_infos_nums = len(infoDict["vgroup_infos"])
|
||||
|
||||
for index in range(vgroup_infos_nums):
|
||||
if "vgroup_id" not in infoDict["vgroup_infos"][index] or infoDict["vgroup_infos"][index]["vgroup_id"]<0:
|
||||
tdLog.exit("vgroup_id is null!")
|
||||
if "database_name" not in infoDict["vgroup_infos"][index] or len(infoDict["vgroup_infos"][index]["database_name"]) < 0:
|
||||
tdLog.exit("database_name is null!")
|
||||
if "tables_num" not in infoDict["vgroup_infos"][index] or infoDict["vgroup_infos"][index]["tables_num"]!= 0:
|
||||
tdLog.exit("tables_num is null!")
|
||||
if "status" not in infoDict["vgroup_infos"][index] or len(infoDict["vgroup_infos"][index]["status"]) < 0 :
|
||||
tdLog.exit("status is null!")
|
||||
if "vnodes" not in infoDict["vgroup_infos"][index] or infoDict["vgroup_infos"][index]["vnodes"] ==None :
|
||||
tdLog.exit("vnodes is null!")
|
||||
if "dnode_id" not in infoDict["vgroup_infos"][index]["vnodes"][0] or infoDict["vgroup_infos"][index]["vnodes"][0]["dnode_id"] < 0 :
|
||||
tdLog.exit("vnodes is null!")
|
||||
|
||||
# grant_info ====================================
|
||||
|
||||
if "grant_info" not in infoDict or infoDict["grant_info"]== None:
|
||||
tdLog.exit("grant_info is null!")
|
||||
|
||||
if "expire_time" not in infoDict["grant_info"] or not infoDict["grant_info"]["expire_time"] > 0:
|
||||
tdLog.exit("expire_time is null!")
|
||||
|
||||
if "timeseries_used" not in infoDict["grant_info"] or not infoDict["grant_info"]["timeseries_used"] > 0:
|
||||
tdLog.exit("timeseries_used is null!")
|
||||
|
||||
if "timeseries_total" not in infoDict["grant_info"] or not infoDict["grant_info"]["timeseries_total"] > 0:
|
||||
tdLog.exit("timeseries_total is null!")
|
||||
|
||||
# dnode_info ====================================
|
||||
|
||||
if "dnode_info" not in infoDict or infoDict["dnode_info"]== None:
|
||||
tdLog.exit("dnode_info is null!")
|
||||
|
||||
dnode_infos = ['uptime', 'cpu_engine', 'cpu_system', 'cpu_cores', 'mem_engine', 'mem_system', 'mem_total', 'disk_engine',
|
||||
'disk_used', 'disk_total', 'net_in', 'net_out', 'io_read', 'io_write', 'io_read_disk', 'io_write_disk', 'req_select',
|
||||
'req_select_rate', 'req_insert', 'req_insert_success', 'req_insert_rate', 'req_insert_batch', 'req_insert_batch_success',
|
||||
'req_insert_batch_rate', 'errors', 'vnodes_num', 'masters', 'has_mnode', 'has_qnode', 'has_snode', 'has_bnode']
|
||||
for elem in dnode_infos:
|
||||
if elem not in infoDict["dnode_info"] or infoDict["dnode_info"][elem] < 0:
|
||||
tdLog.exit(f"{elem} is null!")
|
||||
|
||||
# dnode_info ====================================
|
||||
|
||||
if "disk_infos" not in infoDict or infoDict["disk_infos"]== None:
|
||||
tdLog.exit("disk_infos is null!")
|
||||
|
||||
# bug for data_dir
|
||||
if "datadir" not in infoDict["disk_infos"] or len(infoDict["disk_infos"]["datadir"]) <=0 :
|
||||
tdLog.exit("datadir is null!")
|
||||
|
||||
if "name" not in infoDict["disk_infos"]["datadir"][0] or len(infoDict["disk_infos"]["datadir"][0]["name"]) <= 0:
|
||||
tdLog.exit("name is null!")
|
||||
|
||||
if "level" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["level"] < 0:
|
||||
tdLog.exit("level is null!")
|
||||
|
||||
if "avail" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["avail"] <= 0:
|
||||
tdLog.exit("avail is null!")
|
||||
|
||||
if "used" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["used"] <= 0:
|
||||
tdLog.exit("used is null!")
|
||||
|
||||
if "total" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["total"] <= 0:
|
||||
tdLog.exit("total is null!")
|
||||
|
||||
|
||||
if "logdir" not in infoDict["disk_infos"] or infoDict["disk_infos"]["logdir"]== None:
|
||||
tdLog.exit("logdir is null!")
|
||||
|
||||
if "name" not in infoDict["disk_infos"]["logdir"] or len(infoDict["disk_infos"]["logdir"]["name"]) <= 0:
|
||||
tdLog.exit("name is null!")
|
||||
|
||||
if "avail" not in infoDict["disk_infos"]["logdir"] or infoDict["disk_infos"]["logdir"]["avail"] <= 0:
|
||||
tdLog.exit("avail is null!")
|
||||
|
||||
if "used" not in infoDict["disk_infos"]["logdir"] or infoDict["disk_infos"]["logdir"]["used"] <= 0:
|
||||
tdLog.exit("used is null!")
|
||||
|
||||
if "total" not in infoDict["disk_infos"]["logdir"] or infoDict["disk_infos"]["logdir"]["total"] <= 0:
|
||||
tdLog.exit("total is null!")
|
||||
|
||||
|
||||
|
||||
if "tempdir" not in infoDict["disk_infos"] or infoDict["disk_infos"]["tempdir"]== None:
|
||||
tdLog.exit("tempdir is null!")
|
||||
|
||||
if "name" not in infoDict["disk_infos"]["tempdir"] or len(infoDict["disk_infos"]["tempdir"]["name"]) <= 0:
|
||||
tdLog.exit("name is null!")
|
||||
|
||||
if "avail" not in infoDict["disk_infos"]["tempdir"] or infoDict["disk_infos"]["tempdir"]["avail"] <= 0:
|
||||
tdLog.exit("avail is null!")
|
||||
|
||||
if "used" not in infoDict["disk_infos"]["tempdir"] or infoDict["disk_infos"]["tempdir"]["used"] <= 0:
|
||||
tdLog.exit("used is null!")
|
||||
|
||||
if "total" not in infoDict["disk_infos"]["tempdir"] or infoDict["disk_infos"]["tempdir"]["total"] <= 0:
|
||||
tdLog.exit("total is null!")
|
||||
|
||||
|
||||
# log_infos ====================================
|
||||
|
||||
if "log_infos" not in infoDict or infoDict["log_infos"]== None:
|
||||
tdLog.exit("log_infos is null!")
|
||||
|
||||
if "logs" not in infoDict["log_infos"] or len(infoDict["log_infos"]["logs"])!= 10:
|
||||
tdLog.exit("logs is null!")
|
||||
|
||||
if "ts" not in infoDict["log_infos"]["logs"][0] or len(infoDict["log_infos"]["logs"][0]["ts"]) <= 10:
|
||||
tdLog.exit("ts is null!")
|
||||
|
||||
if "level" not in infoDict["log_infos"]["logs"][0] or infoDict["log_infos"]["logs"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]:
|
||||
tdLog.exit("level is null!")
|
||||
|
||||
if "content" not in infoDict["log_infos"]["logs"][0] or len(infoDict["log_infos"]["logs"][0]["ts"]) <= 1:
|
||||
tdLog.exit("content is null!")
|
||||
|
||||
if "summary" not in infoDict["log_infos"] or len(infoDict["log_infos"]["summary"])!= 4:
|
||||
tdLog.exit("summary is null!")
|
||||
|
||||
|
||||
if "total" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["total"] < 0 :
|
||||
tdLog.exit("total is null!")
|
||||
|
||||
if "level" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]:
|
||||
tdLog.exit("level is null!")
|
||||
|
||||
class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
"""
|
||||
process GET request
|
||||
"""
|
||||
|
||||
def do_POST(self):
|
||||
"""
|
||||
process POST request
|
||||
"""
|
||||
contentEncoding = self.headers["Content-Encoding"]
|
||||
|
||||
if contentEncoding == 'gzip':
|
||||
req_body = self.rfile.read(int(self.headers["Content-Length"]))
|
||||
plainText = gzip.decompress(req_body).decode()
|
||||
else:
|
||||
plainText = self.rfile.read(int(self.headers["Content-Length"])).decode()
|
||||
|
||||
print(plainText)
|
||||
# 1. send response code and header
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", "text/html; charset=utf-8")
|
||||
self.end_headers()
|
||||
|
||||
# 2. send response content
|
||||
#self.wfile.write(("Hello World: " + req_body + "\n").encode("utf-8"))
|
||||
|
||||
# 3. check request body info
|
||||
infoDict = json.loads(plainText)
|
||||
#print("================")
|
||||
# print(infoDict)
|
||||
telemetryInfoCheck(infoDict)
|
||||
|
||||
# 4. shutdown the server and exit case
|
||||
assassin = threading.Thread(target=httpServer.shutdown)
|
||||
assassin.daemon = True
|
||||
assassin.start()
|
||||
print ("==== shutdown http server ====")
|
||||
|
||||
class TDTestCase:
|
||||
hostname = socket.gethostname()
|
||||
serverPort = '7080'
|
||||
rpcDebugFlagVal = '143'
|
||||
clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
|
||||
clientCfgDict["serverPort"] = serverPort
|
||||
clientCfgDict["firstEp"] = hostname + ':' + serverPort
|
||||
clientCfgDict["secondEp"] = hostname + ':' + serverPort
|
||||
clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
|
||||
clientCfgDict["fqdn"] = hostname
|
||||
|
||||
updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
|
||||
updatecfgDict["clientCfg"] = clientCfgDict
|
||||
updatecfgDict["serverPort"] = serverPort
|
||||
updatecfgDict["firstEp"] = hostname + ':' + serverPort
|
||||
updatecfgDict["secondEp"] = hostname + ':' + serverPort
|
||||
updatecfgDict["fqdn"] = hostname
|
||||
|
||||
updatecfgDict["monitorFqdn"] = hostname
|
||||
updatecfgDict["monitorPort"] = '6043'
|
||||
updatecfgDict["monitor"] = '1'
|
||||
updatecfgDict["monitorInterval"] = "5"
|
||||
updatecfgDict["monitorMaxLogs"] = "10"
|
||||
updatecfgDict["monitorComp"] = "1"
|
||||
|
||||
print ("===================: ", updatecfgDict)
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
tdSql.prepare()
|
||||
# time.sleep(2)
|
||||
vgroups = "30"
|
||||
sql = "create database db3 vgroups " + vgroups
|
||||
tdSql.query(sql)
|
||||
|
||||
# loop to wait request
|
||||
httpServer.serve_forever()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
# create http server: bing ip/port , and request processor
|
||||
serverAddress = ("", int(telemetryPort))
|
||||
httpServer = http.server.HTTPServer(serverAddress, RequestHandlerImpl)
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -12,6 +12,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import threading
|
||||
import multiprocessing as mp
|
||||
from numpy.lib.function_base import insert
|
||||
|
@ -66,14 +67,19 @@ class TDTestCase:
|
|||
# run case
|
||||
def run(self):
|
||||
|
||||
# test base case
|
||||
self.test_case1()
|
||||
tdLog.debug(" LIMIT test_case1 ............ [OK]")
|
||||
# # test base case
|
||||
# self.test_case1()
|
||||
# tdLog.debug(" LIMIT test_case1 ............ [OK]")
|
||||
|
||||
# test advance case
|
||||
# test case
|
||||
# self.test_case2()
|
||||
# tdLog.debug(" LIMIT test_case2 ............ [OK]")
|
||||
|
||||
# test case
|
||||
self.test_case3()
|
||||
tdLog.debug(" LIMIT test_case3 ............ [OK]")
|
||||
|
||||
|
||||
# stop
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
@ -115,11 +121,12 @@ class TDTestCase:
|
|||
return cur
|
||||
|
||||
def new_create_tables(self,dbname,vgroups,stbname,tcountStart,tcountStop):
|
||||
host = "chenhaoran02"
|
||||
host = "localhost"
|
||||
buildPath = self.getBuildPath()
|
||||
config = buildPath+ "../sim/dnode1/cfg/"
|
||||
|
||||
tsql=self.newcur(host,config)
|
||||
tsql.execute("drop database if exists %s"%dbname)
|
||||
tsql.execute("create database %s vgroups %d"%(dbname,vgroups))
|
||||
tsql.execute("use %s" %dbname)
|
||||
tsql.execute("create stable %s(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%stbname)
|
||||
|
@ -182,7 +189,52 @@ class TDTestCase:
|
|||
tdLog.debug("INSERT TABLE DATA ............ [OK]")
|
||||
return
|
||||
|
||||
def taosBench(self,jsonFile):
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
taosBenchbin = buildPath+ "/build/bin/taosBenchmark"
|
||||
os.system("%s -f %s -y " %(taosBenchbin,jsonFile))
|
||||
|
||||
return
|
||||
def taosBenchCreate(self,dbname,stbname,vgroups,threadNumbers,count):
|
||||
# count=50000
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
taosBenchbin = buildPath+ "/build/bin/taosBenchmark"
|
||||
|
||||
# insert: create one or mutiple tables per sql and insert multiple rows per sql
|
||||
tdSql.execute("drop database if exists %s"%dbname)
|
||||
|
||||
tdSql.execute("create database %s vgroups %d"%(dbname,vgroups))
|
||||
tdSql.execute("use %s" %dbname)
|
||||
|
||||
threads = []
|
||||
# threadNumbers=2
|
||||
for i in range(threadNumbers):
|
||||
jsonfile="1-insert/Vgroups%d%d.json"%(vgroups,i)
|
||||
os.system("cp -f 1-insert/manyVgroups.json %s"%(jsonfile))
|
||||
os.system("sed -i 's/\"name\": \"db\",/\"name\": \"%s%d\",/g' %s"%(dbname,i,jsonfile))
|
||||
os.system("sed -i 's/\"childtable_count\": 300000,/\"childtable_count\": %d,/g' %s "%(count,jsonfile))
|
||||
os.system("sed -i 's/\"name\": \"stb1\",/\"name\": \"%s%d\",/g' %s "%(stbname,i,jsonfile))
|
||||
os.system("sed -i 's/\"childtable_prefix\": \"stb1_\",/\"childtable_prefix\": \"%s%d_\",/g' %s "%(stbname,i,jsonfile))
|
||||
threads.append(mp.Process(target=self.taosBench, args=("%s"%jsonfile,)))
|
||||
start_time = time.time()
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
end_time = time.time()
|
||||
|
||||
spendTime=end_time-start_time
|
||||
speedCreate=count/spendTime
|
||||
tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate))
|
||||
return
|
||||
# test case1 base
|
||||
def test_case1(self):
|
||||
tdLog.debug("-----create database and tables test------- ")
|
||||
|
@ -284,6 +336,12 @@ class TDTestCase:
|
|||
|
||||
return
|
||||
|
||||
def test_case3(self):
|
||||
|
||||
self.taosBenchCreate("db1", "stb1", 1, 2, 1*50000)
|
||||
|
||||
return
|
||||
|
||||
#
|
||||
# add case with filename
|
||||
#
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos/",
|
||||
"host": "test216",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 8,
|
||||
"thread_count_create_tbl": 8,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 100000,
|
||||
"num_of_records_per_req": 100000,
|
||||
"databases": [
|
||||
{
|
||||
"dbinfo": {
|
||||
"name": "db",
|
||||
"drop": "yes",
|
||||
"vgroups": 1
|
||||
},
|
||||
"super_tables": [
|
||||
{
|
||||
"name": "stb1",
|
||||
"child_table_exists": "no",
|
||||
"childtable_count": 300000,
|
||||
"childtable_prefix": "stb1_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 50000,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval": 0,
|
||||
"max_sql_len": 10000000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 10,
|
||||
"sample_format": "csv",
|
||||
"use_sample_ts": "no",
|
||||
"tags_file": "",
|
||||
"columns": [
|
||||
{
|
||||
"type": "INT"
|
||||
},
|
||||
{
|
||||
"type": "DOUBLE",
|
||||
"count": 100
|
||||
},
|
||||
{
|
||||
"type": "BINARY",
|
||||
"len": 400,
|
||||
"count": 10
|
||||
},
|
||||
{
|
||||
"type": "nchar",
|
||||
"len": 200,
|
||||
"count": 20
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"type": "TINYINT",
|
||||
"count": 2
|
||||
},
|
||||
{
|
||||
"type": "BINARY",
|
||||
"len": 16,
|
||||
"count": 2
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,299 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
import sys
|
||||
import os
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
utilPath="%s/../../pytest/"%selfPath
|
||||
import threading
|
||||
import multiprocessing as mp
|
||||
from numpy.lib.function_base import insert
|
||||
import taos
|
||||
sys.path.append(utilPath)
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
import datetime as dt
|
||||
import time
|
||||
# constant define
|
||||
WAITS = 5 # wait seconds
|
||||
|
||||
class TDTestCase:
|
||||
#
|
||||
# --------------- main frame -------------------
|
||||
#
|
||||
|
||||
def caseDescription(self):
|
||||
'''
|
||||
limit and offset keyword function test cases;
|
||||
case1: limit offset base function test
|
||||
case2: offset return valid
|
||||
'''
|
||||
return
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
# init
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
# tdSql.init(conn.cursor())
|
||||
# tdSql.prepare()
|
||||
# self.create_tables();
|
||||
self.ts = 1500000000000
|
||||
|
||||
|
||||
# run case
|
||||
def run(self):
|
||||
|
||||
# test base case
|
||||
self.test_case1()
|
||||
tdLog.debug(" LIMIT test_case1 ............ [OK]")
|
||||
|
||||
# test advance case
|
||||
# self.test_case2()
|
||||
# tdLog.debug(" LIMIT test_case2 ............ [OK]")
|
||||
|
||||
# stop
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
# --------------- case -------------------
|
||||
|
||||
# create tables
|
||||
def create_tables(self,dbname,stbname,count):
|
||||
tdSql.execute("use %s" %dbname)
|
||||
tdSql.execute("create stable %s(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%stbname)
|
||||
pre_create = "create table"
|
||||
sql = pre_create
|
||||
tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
|
||||
# print(time.time())
|
||||
exeStartTime=time.time()
|
||||
for i in range(count):
|
||||
sql += " %s_%d using %s tags(%d)"%(stbname,i,stbname,i+1)
|
||||
if i >0 and i%3000 == 0:
|
||||
tdSql.execute(sql)
|
||||
sql = pre_create
|
||||
# print(time.time())
|
||||
# end sql
|
||||
if sql != pre_create:
|
||||
tdSql.execute(sql)
|
||||
exeEndTime=time.time()
|
||||
spendTime=exeEndTime-exeStartTime
|
||||
speedCreate=count/spendTime
|
||||
tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate))
|
||||
return
|
||||
|
||||
def newcur(self,host,cfg):
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
port =6030
|
||||
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
|
||||
cur=con.cursor()
|
||||
print(cur)
|
||||
return cur
|
||||
|
||||
def new_create_tables(self,dbname,vgroups,stbname,tcountStart,tcountStop):
|
||||
host = "127.0.0.1"
|
||||
buildPath = self.getBuildPath()
|
||||
config = buildPath+ "../sim/dnode1/cfg/"
|
||||
|
||||
tsql=self.newcur(host,config)
|
||||
tsql.execute("drop database if exists %s" %(dbname))
|
||||
tsql.execute("create database if not exists %s vgroups %d"%(dbname,vgroups))
|
||||
tsql.execute("use %s" %dbname)
|
||||
tsql.execute("create stable %s(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%stbname)
|
||||
|
||||
pre_create = "create table"
|
||||
sql = pre_create
|
||||
tcountStop=int(tcountStop)
|
||||
tcountStart=int(tcountStart)
|
||||
count=tcountStop-tcountStart
|
||||
|
||||
tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
|
||||
# print(time.time())
|
||||
exeStartTime=time.time()
|
||||
# print(type(tcountStop),type(tcountStart))
|
||||
for i in range(tcountStart,tcountStop):
|
||||
sql += " %s_%d using %s tags(%d)"%(stbname,i,stbname,i+1)
|
||||
if i >0 and i%20000 == 0:
|
||||
# print(sql)
|
||||
tsql.execute(sql)
|
||||
sql = pre_create
|
||||
# print(time.time())
|
||||
# end sql
|
||||
if sql != pre_create:
|
||||
# print(sql)
|
||||
tsql.execute(sql)
|
||||
exeEndTime=time.time()
|
||||
spendTime=exeEndTime-exeStartTime
|
||||
speedCreate=count/spendTime
|
||||
# tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate))
|
||||
return
|
||||
|
||||
|
||||
|
||||
# insert data
|
||||
def insert_data(self, dbname, stbname, ts_start, tcountStart,tcountStop,rowCount):
|
||||
tdSql.execute("use %s" %dbname)
|
||||
pre_insert = "insert into "
|
||||
sql = pre_insert
|
||||
tcount=tcountStop-tcountStart
|
||||
allRows=tcount*rowCount
|
||||
tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbname, allRows))
|
||||
exeStartTime=time.time()
|
||||
for i in range(tcountStart,tcountStop):
|
||||
sql += " %s_%d values "%(stbname,i)
|
||||
for j in range(rowCount):
|
||||
sql += "(%d, %d, 'taos_%d') "%(ts_start + j*1000, j, j)
|
||||
if j >0 and j%5000 == 0:
|
||||
# print(sql)
|
||||
tdSql.execute(sql)
|
||||
sql = "insert into %s_%d values " %(stbname,i)
|
||||
# end sql
|
||||
if sql != pre_insert:
|
||||
# print(sql)
|
||||
tdSql.execute(sql)
|
||||
exeEndTime=time.time()
|
||||
spendTime=exeEndTime-exeStartTime
|
||||
speedInsert=allRows/spendTime
|
||||
# tdLog.debug("spent %.2fs to INSERT %d rows , insert rate is %.2f rows/s... [OK]"% (spendTime,allRows,speedInsert))
|
||||
|
||||
tdLog.debug("INSERT TABLE DATA ............ [OK]")
|
||||
return
|
||||
|
||||
|
||||
# test case1 base
|
||||
def test_case1(self):
|
||||
tdLog.debug("-----create database and tables test------- ")
|
||||
# tdSql.execute("drop database if exists db1")
|
||||
# tdSql.execute("drop database if exists db4")
|
||||
# tdSql.execute("drop database if exists db6")
|
||||
# tdSql.execute("drop database if exists db8")
|
||||
# tdSql.execute("drop database if exists db12")
|
||||
# tdSql.execute("drop database if exists db16")
|
||||
|
||||
#create database and tables;
|
||||
|
||||
# tdSql.execute("create database db11 vgroups 1")
|
||||
# # self.create_tables("db1", "stb1", 30*10000)
|
||||
# tdSql.execute("use db1")
|
||||
# tdSql.execute("create stable stb1(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)")
|
||||
|
||||
# tdSql.execute("create database db12 vgroups 1")
|
||||
# # self.create_tables("db1", "stb1", 30*10000)
|
||||
# tdSql.execute("use db1")
|
||||
|
||||
# t1 = threading.Thread(target=self.new_create_tables("db1", "stb1", 15*10000), args=(1,))
|
||||
# t2 = threading.Thread(target=self.new_create_tables("db1", "stb1", 15*10000), args=(2,))
|
||||
# t1 = mp.Process(target=self.new_create_tables, args=("db1", "stb1", 0,count/2,))
|
||||
# t2 = mp.Process(target=self.new_create_tables, args=("db1", "stb1", count/2,count,))
|
||||
|
||||
count=500000
|
||||
vgroups=1
|
||||
threads = []
|
||||
threadNumbers=2
|
||||
for i in range(threadNumbers):
|
||||
threads.append(mp.Process(target=self.new_create_tables, args=("db1%d"%i, vgroups, "stb1", 0,count,)))
|
||||
start_time = time.time()
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
end_time = time.time()
|
||||
spendTime=end_time-start_time
|
||||
speedCreate=count/spendTime
|
||||
tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate))
|
||||
# self.new_create_tables("db1", "stb1", 15*10000)
|
||||
# self.new_create_tables("db1", "stb1", 15*10000)
|
||||
|
||||
# tdSql.execute("create database db4 vgroups 4")
|
||||
# self.create_tables("db4", "stb4", 30*10000)
|
||||
|
||||
# tdSql.execute("create database db6 vgroups 6")
|
||||
# self.create_tables("db6", "stb6", 30*10000)
|
||||
|
||||
# tdSql.execute("create database db8 vgroups 8")
|
||||
# self.create_tables("db8", "stb8", 30*10000)
|
||||
|
||||
# tdSql.execute("create database db12 vgroups 12")
|
||||
# self.create_tables("db12", "stb12", 30*10000)
|
||||
|
||||
# tdSql.execute("create database db16 vgroups 16")
|
||||
# self.create_tables("db16", "stb16", 30*10000)
|
||||
return
|
||||
|
||||
# test case2 base:insert data
|
||||
def test_case2(self):
|
||||
|
||||
tdLog.debug("-----insert data test------- ")
|
||||
# drop database
|
||||
tdSql.execute("drop database if exists db1")
|
||||
tdSql.execute("drop database if exists db4")
|
||||
tdSql.execute("drop database if exists db6")
|
||||
tdSql.execute("drop database if exists db8")
|
||||
tdSql.execute("drop database if exists db12")
|
||||
tdSql.execute("drop database if exists db16")
|
||||
|
||||
#create database and tables;
|
||||
|
||||
tdSql.execute("create database db1 vgroups 1")
|
||||
self.create_tables("db1", "stb1", 1*100)
|
||||
self.insert_data("db1", "stb1", self.ts, 1*50,1*10000)
|
||||
|
||||
|
||||
tdSql.execute("create database db4 vgroups 4")
|
||||
self.create_tables("db4", "stb4", 1*100)
|
||||
self.insert_data("db4", "stb4", self.ts, 1*100,1*10000)
|
||||
|
||||
tdSql.execute("create database db6 vgroups 6")
|
||||
self.create_tables("db6", "stb6", 1*100)
|
||||
self.insert_data("db6", "stb6", self.ts, 1*100,1*10000)
|
||||
|
||||
tdSql.execute("create database db8 vgroups 8")
|
||||
self.create_tables("db8", "stb8", 1*100)
|
||||
self.insert_data("db8", "stb8", self.ts, 1*100,1*10000)
|
||||
|
||||
tdSql.execute("create database db12 vgroups 12")
|
||||
self.create_tables("db12", "stb12", 1*100)
|
||||
self.insert_data("db12", "stb12", self.ts, 1*100,1*10000)
|
||||
|
||||
tdSql.execute("create database db16 vgroups 16")
|
||||
self.create_tables("db16", "stb16", 1*100)
|
||||
self.insert_data("db16", "stb16", self.ts, 1*100,1*10000)
|
||||
|
||||
return
|
||||
|
||||
#
|
||||
# add case with filename
|
||||
#
|
||||
# tdCases.addWindows(__file__, TDTestCase())
|
||||
# tdCases.addLinux(__file__, TDTestCase())
|
||||
case=TDTestCase()
|
||||
case.test_case1()
|
|
@ -0,0 +1,287 @@
|
|||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c1"
|
||||
BINT_COL = "c2"
|
||||
SINT_COL = "c3"
|
||||
TINT_COL = "c4"
|
||||
FLOAT_COL = "c5"
|
||||
DOUBLE_COL = "c6"
|
||||
BOOL_COL = "c7"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
|
||||
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [ BOOL_COL, ]
|
||||
TS_TYPE_COL = [ TS_COL, ]
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def __concat_condition(self): # sourcery skip: extract-method
|
||||
concat_condition = []
|
||||
for char_col in CHAR_COL:
|
||||
concat_condition.extend(
|
||||
(
|
||||
char_col,
|
||||
f"upper( {char_col} )",
|
||||
)
|
||||
)
|
||||
concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
|
||||
concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
|
||||
concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
# concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
|
||||
|
||||
for num_col in NUM_COL:
|
||||
concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
|
||||
concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
|
||||
|
||||
concat_condition.append('''"test1234!@#$%^&*():'><?/.,][}{"''')
|
||||
|
||||
return concat_condition
|
||||
|
||||
def __where_condition(self, col):
|
||||
# return f" where count({col}) > 0 "
|
||||
return ""
|
||||
|
||||
def __concat_num(self, concat_lists, num):
|
||||
return [ concat_lists[i] for i in range(num) ]
|
||||
|
||||
|
||||
def __group_condition(self, col, having = ""):
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __concat_check(self, tbname, num):
|
||||
concat_condition = self.__concat_condition()
|
||||
for i in range(len(concat_condition) - num + 1 ):
|
||||
condition = self.__concat_num(concat_condition[i:], num)
|
||||
concat_filter = f"concat( {','.join( condition ) }) "
|
||||
where_condition = self.__where_condition(condition[0])
|
||||
# group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " )
|
||||
concat_group_having = self.__group_condition(concat_filter, having=f"{concat_filter} is not null " )
|
||||
# group_no_having= self.__group_condition(condition[0] )
|
||||
concat_group_no_having= self.__group_condition(concat_filter)
|
||||
groups = ["", concat_group_having, concat_group_no_having]
|
||||
|
||||
if num > 8 or num < 2 :
|
||||
[tdSql.error(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
|
||||
|
||||
|
||||
tdSql.query(f"select {','.join(condition)} from {tbname} ")
|
||||
rows = tdSql.queryRows
|
||||
concat_data = []
|
||||
for m in range(rows):
|
||||
concat_data.append("".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_data.append(None)
|
||||
tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} ")
|
||||
tdSql.checkRows(rows)
|
||||
for j in range(tdSql.queryRows):
|
||||
assert tdSql.getData(j, 0) in concat_data
|
||||
|
||||
[ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
|
||||
|
||||
|
||||
def __concat_err_check(self,tbname):
|
||||
sqls = []
|
||||
|
||||
for char_col in CHAR_COL:
|
||||
sqls.extend(
|
||||
(
|
||||
f"select concat( {char_col} ) from {tbname} ",
|
||||
f"select concat(ceil( {char_col} )) from {tbname} ",
|
||||
f"select {char_col} from {tbname} group by concat( {char_col} ) ",
|
||||
)
|
||||
)
|
||||
|
||||
sqls.extend( f"select concat( {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL )
|
||||
sqls.extend( f"select concat( {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select concat( {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select concat( {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select concat( {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
|
||||
sqls.extend( f"select concat( {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
|
||||
sqls.extend( f"select concat( {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
|
||||
sqls.extend( f"select concat( {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select concat( {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select concat( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
|
||||
sqls.extend( f"select concat({char_col}, 11) from {tbname} " for char_col in CHAR_COL )
|
||||
sqls.extend( f"select concat({num_col}, '1') from {tbname} " for num_col in NUM_COL )
|
||||
sqls.extend( f"select concat({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select concat({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select concat({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
|
||||
sqls.extend(
|
||||
(
|
||||
f"select concat() from {tbname} ",
|
||||
f"select concat(*) from {tbname} ",
|
||||
f"select concat(ccccccc) from {tbname} ",
|
||||
f"select concat(111) from {tbname} ",
|
||||
)
|
||||
)
|
||||
|
||||
return sqls
|
||||
|
||||
def __test_current(self): # sourcery skip: use-itertools-product
|
||||
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
for tb in tbname:
|
||||
for i in range(2,8):
|
||||
self.__concat_check(tb,i)
|
||||
tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
|
||||
|
||||
def __test_error(self):
|
||||
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
|
||||
for tb in tbname:
|
||||
for errsql in self.__concat_err_check(tb):
|
||||
tdSql.error(sql=errsql)
|
||||
self.__concat_check(tb,1)
|
||||
self.__concat_check(tb,9)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
|
||||
|
||||
|
||||
def all_test(self):
|
||||
self.__test_current()
|
||||
self.__test_error()
|
||||
|
||||
|
||||
def __create_tb(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
create_stb_sql = f'''create table stb1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
) tags (t1 int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table t1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
def __insert_data(self, rows):
|
||||
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
for i in range(rows):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f'''insert into ct1 values
|
||||
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
|
||||
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct4 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct2 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
|
||||
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
|
||||
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(rows):
|
||||
insert_data = f'''insert into t1 values
|
||||
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
|
||||
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
|
||||
'''
|
||||
tdSql.execute(insert_data)
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
|
||||
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
|
||||
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
self.__create_tb()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
self.rows = 10
|
||||
self.__insert_data(self.rows)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:all check")
|
||||
self.all_test()
|
||||
|
||||
# tdDnodes.stop(1)
|
||||
# tdDnodes.start(1)
|
||||
|
||||
# tdSql.execute("use db")
|
||||
|
||||
# tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
# self.all_test()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,287 @@
|
|||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c1"
|
||||
BINT_COL = "c2"
|
||||
SINT_COL = "c3"
|
||||
TINT_COL = "c4"
|
||||
FLOAT_COL = "c5"
|
||||
DOUBLE_COL = "c6"
|
||||
BOOL_COL = "c7"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
|
||||
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [ BOOL_COL, ]
|
||||
TS_TYPE_COL = [ TS_COL, ]
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def __concat_ws_condition(self): # sourcery skip: extract-method
|
||||
concat_ws_condition = []
|
||||
for char_col in CHAR_COL:
|
||||
concat_ws_condition.extend(
|
||||
(
|
||||
char_col,
|
||||
f"upper( {char_col} )",
|
||||
)
|
||||
)
|
||||
concat_ws_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
|
||||
concat_ws_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
|
||||
concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_ws_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
# concat_ws_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
concat_ws_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
|
||||
|
||||
for num_col in NUM_COL:
|
||||
concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_ws_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
|
||||
concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
|
||||
|
||||
concat_ws_condition.append('''"test1234!@#$%^&*():'><?/.,][}{"''')
|
||||
|
||||
return concat_ws_condition
|
||||
|
||||
def __where_condition(self, col):
|
||||
# return f" where count({col}) > 0 "
|
||||
return ""
|
||||
|
||||
def __concat_ws_num(self, concat_ws_lists, num):
|
||||
return [ concat_ws_lists[i] for i in range(num) ]
|
||||
|
||||
|
||||
def __group_condition(self, col, having = ""):
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __concat_ws_check(self, tbname, num):
|
||||
concat_ws_condition = self.__concat_ws_condition()
|
||||
for i in range(len(concat_ws_condition) - num + 1 ):
|
||||
condition = self.__concat_ws_num(concat_ws_condition[i:], num)
|
||||
concat_ws_filter = f"concat_ws('_', {','.join( condition ) }) "
|
||||
where_condition = self.__where_condition(condition[0])
|
||||
# group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " )
|
||||
concat_ws_group_having = self.__group_condition(concat_ws_filter, having=f"{concat_ws_filter} is not null " )
|
||||
# group_no_having= self.__group_condition(condition[0] )
|
||||
concat_ws_group_no_having= self.__group_condition(concat_ws_filter)
|
||||
groups = ["", concat_ws_group_having, concat_ws_group_no_having]
|
||||
|
||||
if num > 8 or num < 2 :
|
||||
[tdSql.error(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
|
||||
|
||||
|
||||
tdSql.query(f"select {','.join(condition)} from {tbname} ")
|
||||
rows = tdSql.queryRows
|
||||
concat_ws_data = []
|
||||
for m in range(rows):
|
||||
concat_ws_data.append("_".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_ws_data.append(None)
|
||||
tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} ")
|
||||
tdSql.checkRows(rows)
|
||||
for j in range(tdSql.queryRows):
|
||||
assert tdSql.getData(j, 0) in concat_ws_data
|
||||
|
||||
[ tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
|
||||
|
||||
|
||||
def __concat_ws_err_check(self,tbname):
|
||||
sqls = []
|
||||
|
||||
for char_col in CHAR_COL:
|
||||
sqls.extend(
|
||||
(
|
||||
f"select concat_ws('_', {char_col} ) from {tbname} ",
|
||||
f"select concat_ws('_', ceil( {char_col} )) from {tbname} ",
|
||||
f"select {char_col} from {tbname} group by concat_ws('_', {char_col} ) ",
|
||||
)
|
||||
)
|
||||
|
||||
sqls.extend( f"select concat_ws('_', {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL )
|
||||
sqls.extend( f"select concat_ws('_', {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select concat_ws('_', {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select concat_ws('_', {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select concat_ws('_', {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
|
||||
sqls.extend( f"select concat_ws('_', {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
|
||||
sqls.extend( f"select concat_ws('_', {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
|
||||
sqls.extend( f"select concat_ws('_', {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select concat_ws('_', {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select concat_ws('_', {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
|
||||
sqls.extend( f"select concat_ws('_', {char_col}, 11) from {tbname} " for char_col in CHAR_COL )
|
||||
sqls.extend( f"select concat_ws('_', {num_col}, '1') from {tbname} " for num_col in NUM_COL )
|
||||
sqls.extend( f"select concat_ws('_', {ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select concat_ws('_', {bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select concat_ws('_', {char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
|
||||
sqls.extend(
|
||||
(
|
||||
f"select concat_ws('_', ) from {tbname} ",
|
||||
f"select concat_ws('_', *) from {tbname} ",
|
||||
f"select concat_ws('_', ccccccc) from {tbname} ",
|
||||
f"select concat_ws('_', 111) from {tbname} ",
|
||||
)
|
||||
)
|
||||
|
||||
return sqls
|
||||
|
||||
def __test_current(self): # sourcery skip: use-itertools-product
|
||||
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
for tb in tbname:
|
||||
for i in range(2,8):
|
||||
self.__concat_ws_check(tb,i)
|
||||
tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
|
||||
|
||||
def __test_error(self):
|
||||
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
|
||||
for tb in tbname:
|
||||
for errsql in self.__concat_ws_err_check(tb):
|
||||
tdSql.error(sql=errsql)
|
||||
self.__concat_ws_check(tb,1)
|
||||
self.__concat_ws_check(tb,9)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
|
||||
|
||||
|
||||
def all_test(self):
|
||||
self.__test_current()
|
||||
self.__test_error()
|
||||
|
||||
|
||||
def __create_tb(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
create_stb_sql = f'''create table stb1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
) tags (t1 int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table t1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
def __insert_data(self, rows):
|
||||
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
for i in range(rows):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f'''insert into ct1 values
|
||||
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
|
||||
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct4 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct2 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
|
||||
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
|
||||
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(rows):
|
||||
insert_data = f'''insert into t1 values
|
||||
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
|
||||
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
|
||||
'''
|
||||
tdSql.execute(insert_data)
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
|
||||
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
|
||||
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
self.__create_tb()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
self.rows = 10
|
||||
self.__insert_data(self.rows)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:all check")
|
||||
self.all_test()
|
||||
|
||||
# tdDnodes.stop(1)
|
||||
# tdDnodes.start(1)
|
||||
|
||||
# tdSql.execute("use db")
|
||||
|
||||
# tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
# self.all_test()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,267 @@
|
|||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c1"
|
||||
BINT_COL = "c2"
|
||||
SINT_COL = "c3"
|
||||
TINT_COL = "c4"
|
||||
FLOAT_COL = "c5"
|
||||
DOUBLE_COL = "c6"
|
||||
BOOL_COL = "c7"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
|
||||
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [ BOOL_COL, ]
|
||||
TS_TYPE_COL = [ TS_COL, ]
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def __ltrim_condition(self): # sourcery skip: extract-method
|
||||
ltrim_condition = []
|
||||
for char_col in CHAR_COL:
|
||||
ltrim_condition.extend(
|
||||
(
|
||||
char_col,
|
||||
f"upper( {char_col} )",
|
||||
)
|
||||
)
|
||||
ltrim_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
|
||||
ltrim_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
|
||||
ltrim_condition.extend( f"concat( cast( {char_col} + {num_col} as binary(16) ), {char_col}) " for num_col in NUM_COL )
|
||||
ltrim_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
ltrim_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
ltrim_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
# ltrim_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
ltrim_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
|
||||
ltrim_condition.extend( f"concat( {char_col}, {char_col_2} ) " for char_col_2 in CHAR_COL )
|
||||
|
||||
for num_col in NUM_COL:
|
||||
ltrim_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
ltrim_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
|
||||
|
||||
ltrim_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
|
||||
|
||||
ltrim_condition.append(''' " test1234!@#$%^&*() :'><?/.,][}{ " ''')
|
||||
|
||||
return ltrim_condition
|
||||
|
||||
def __where_condition(self, col):
|
||||
# return f" where count({col}) > 0 "
|
||||
return ""
|
||||
|
||||
def __group_condition(self, col, having = ""):
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __ltrim_check(self, tbname):
|
||||
ltrim_condition = self.__ltrim_condition()
|
||||
for condition in ltrim_condition:
|
||||
where_condition = self.__where_condition(condition)
|
||||
ltrim_group_having = self.__group_condition(condition, having=f"{condition} is not null " )
|
||||
ltrim_group_no_having= self.__group_condition(condition)
|
||||
groups = ["", ltrim_group_having, ltrim_group_no_having]
|
||||
|
||||
tdSql.query(f"select ltrim( {condition}) , {condition} from {tbname} ")
|
||||
for j in range(tdSql.queryRows):
|
||||
tdSql.checkData(j,0, tdSql.getData(j,1).lstrip()) if tdSql.getData(j,1) else tdSql.checkData(j, 0, None)
|
||||
|
||||
[ tdSql.query(f"select ltrim({condition}) from {tbname} {where_condition} {group} ") for group in groups ]
|
||||
|
||||
|
||||
def __ltrim_err_check(self,tbname):
|
||||
sqls = []
|
||||
|
||||
for num_col in NUM_COL:
|
||||
sqls.extend(
|
||||
(
|
||||
f"select ltrim( {num_col} ) from {tbname} ",
|
||||
f"select ltrim(ceil( {num_col} )) from {tbname} ",
|
||||
f"select {num_col} from {tbname} group by ltrim( {num_col} ) ",
|
||||
)
|
||||
)
|
||||
|
||||
sqls.extend( f"select ltrim( {char_col} , {num_col} ) from {tbname} " for char_col in CHAR_COL )
|
||||
sqls.extend( f"select ltrim( {num_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select ltrim( {num_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select ltrim( {ts_col}+{bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select ltrim( {num_col}+{ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
|
||||
sqls.extend( f"select ltrim( {num_col}+ {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
|
||||
sqls.extend( f"select ltrim( {num_col}+ {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
|
||||
sqls.extend( f"select ltrim( {ts_col}+{ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select ltrim( {bool_col}+ {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select ltrim( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
|
||||
sqls.extend( f"select ltrim({num_col}, '1') from {tbname} " for num_col in NUM_COL )
|
||||
sqls.extend( f"select ltrim({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select ltrim({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select ltrim({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
|
||||
sqls.extend(
|
||||
(
|
||||
f"select ltrim() from {tbname} ",
|
||||
f"select ltrim(*) from {tbname} ",
|
||||
f"select ltrim(ccccccc) from {tbname} ",
|
||||
f"select ltrim(111) from {tbname} ",
|
||||
)
|
||||
)
|
||||
|
||||
return sqls
|
||||
|
||||
def __test_current(self): # sourcery skip: use-itertools-product
|
||||
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
for tb in tbname:
|
||||
self.__ltrim_check(tb)
|
||||
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
|
||||
|
||||
def __test_error(self):
|
||||
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
|
||||
for tb in tbname:
|
||||
for errsql in self.__ltrim_err_check(tb):
|
||||
tdSql.error(sql=errsql)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
|
||||
|
||||
|
||||
def all_test(self):
|
||||
self.__test_current()
|
||||
self.__test_error()
|
||||
|
||||
|
||||
def __create_tb(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
create_stb_sql = f'''create table stb1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
) tags (t1 int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table t1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
def __insert_data(self, rows):
|
||||
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
for i in range(rows):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f'''insert into ct1 values
|
||||
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
|
||||
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct4 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct2 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
|
||||
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
|
||||
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(rows):
|
||||
insert_data = f'''insert into t1 values
|
||||
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
|
||||
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
|
||||
'''
|
||||
tdSql.execute(insert_data)
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
|
||||
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
|
||||
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
self.__create_tb()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
self.rows = 10
|
||||
self.__insert_data(self.rows)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:all check")
|
||||
self.all_test()
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
tdSql.execute("use db")
|
||||
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
self.all_test()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,267 @@
|
|||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c1"
|
||||
BINT_COL = "c2"
|
||||
SINT_COL = "c3"
|
||||
TINT_COL = "c4"
|
||||
FLOAT_COL = "c5"
|
||||
DOUBLE_COL = "c6"
|
||||
BOOL_COL = "c7"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
|
||||
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [ BOOL_COL, ]
|
||||
TS_TYPE_COL = [ TS_COL, ]
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def __rtrim_condition(self): # sourcery skip: extract-method
|
||||
rtrim_condition = []
|
||||
for char_col in CHAR_COL:
|
||||
rtrim_condition.extend(
|
||||
(
|
||||
char_col,
|
||||
f"upper( {char_col} )",
|
||||
)
|
||||
)
|
||||
rtrim_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
|
||||
rtrim_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
|
||||
rtrim_condition.extend( f"concat( cast( {char_col} + {num_col} as binary(16) ), {char_col}) " for num_col in NUM_COL )
|
||||
rtrim_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
rtrim_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
rtrim_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
# rtrim_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
rtrim_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
|
||||
rtrim_condition.extend( f"concat( {char_col}, {char_col_2} ) " for char_col_2 in CHAR_COL )
|
||||
|
||||
for num_col in NUM_COL:
|
||||
rtrim_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
rtrim_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL )
|
||||
|
||||
rtrim_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
|
||||
|
||||
rtrim_condition.append(''' " test1234!@#$%^&*() :'><?/.,][}{ " ''')
|
||||
|
||||
return rtrim_condition
|
||||
|
||||
def __where_condition(self, col):
|
||||
# return f" where count({col}) > 0 "
|
||||
return ""
|
||||
|
||||
def __group_condition(self, col, having = ""):
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __rtrim_check(self, tbname):
|
||||
rtrim_condition = self.__rtrim_condition()
|
||||
for condition in rtrim_condition:
|
||||
where_condition = self.__where_condition(condition)
|
||||
rtrim_group_having = self.__group_condition(condition, having=f"{condition} is not null " )
|
||||
rtrim_group_no_having= self.__group_condition(condition)
|
||||
groups = ["", rtrim_group_having, rtrim_group_no_having]
|
||||
|
||||
tdSql.query(f"select rtrim( {condition}) , {condition} from {tbname} ")
|
||||
for j in range(tdSql.queryRows):
|
||||
tdSql.checkData(j,0, tdSql.getData(j,1).rstrip()) if tdSql.getData(j,1) else tdSql.checkData(j, 0, None)
|
||||
|
||||
[ tdSql.query(f"select rtrim({condition}) from {tbname} {where_condition} {group} ") for group in groups ]
|
||||
|
||||
|
||||
def __rtrim_err_check(self,tbname):
|
||||
sqls = []
|
||||
|
||||
for num_col in NUM_COL:
|
||||
sqls.extend(
|
||||
(
|
||||
f"select rtrim( {num_col} ) from {tbname} ",
|
||||
f"select rtrim(ceil( {num_col} )) from {tbname} ",
|
||||
f"select {num_col} from {tbname} group by rtrim( {num_col} ) ",
|
||||
)
|
||||
)
|
||||
|
||||
sqls.extend( f"select rtrim( {char_col} , {num_col} ) from {tbname} " for char_col in CHAR_COL )
|
||||
sqls.extend( f"select rtrim( {num_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select rtrim( {num_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select rtrim( {ts_col}+{bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select rtrim( {num_col}+{ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
|
||||
sqls.extend( f"select rtrim( {num_col}+ {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
|
||||
sqls.extend( f"select rtrim( {num_col}+ {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
|
||||
sqls.extend( f"select rtrim( {ts_col}+{ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select rtrim( {bool_col}+ {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select rtrim( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
|
||||
sqls.extend( f"select rtrim({num_col}, '1') from {tbname} " for num_col in NUM_COL )
|
||||
sqls.extend( f"select rtrim({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select rtrim({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select rtrim({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
|
||||
sqls.extend(
|
||||
(
|
||||
f"select rtrim() from {tbname} ",
|
||||
f"select rtrim(*) from {tbname} ",
|
||||
f"select rtrim(ccccccc) from {tbname} ",
|
||||
f"select rtrim(111) from {tbname} ",
|
||||
)
|
||||
)
|
||||
|
||||
return sqls
|
||||
|
||||
def __test_current(self): # sourcery skip: use-itertools-product
|
||||
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
for tb in tbname:
|
||||
self.__rtrim_check(tb)
|
||||
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
|
||||
|
||||
def __test_error(self):
|
||||
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
|
||||
for tb in tbname:
|
||||
for errsql in self.__rtrim_err_check(tb):
|
||||
tdSql.error(sql=errsql)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
|
||||
|
||||
|
||||
def all_test(self):
|
||||
self.__test_current()
|
||||
self.__test_error()
|
||||
|
||||
|
||||
def __create_tb(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
create_stb_sql = f'''create table stb1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
) tags (t1 int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table t1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
def __insert_data(self, rows):
|
||||
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
for i in range(rows):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f'''insert into ct1 values
|
||||
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
|
||||
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct4 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct2 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
|
||||
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
|
||||
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(rows):
|
||||
insert_data = f'''insert into t1 values
|
||||
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
|
||||
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
|
||||
'''
|
||||
tdSql.execute(insert_data)
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
|
||||
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
|
||||
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
self.__create_tb()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
self.rows = 10
|
||||
self.__insert_data(self.rows)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:all check")
|
||||
self.all_test()
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
tdSql.execute("use db")
|
||||
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
self.all_test()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,271 @@
|
|||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c1"
|
||||
BINT_COL = "c2"
|
||||
SINT_COL = "c3"
|
||||
TINT_COL = "c4"
|
||||
FLOAT_COL = "c5"
|
||||
DOUBLE_COL = "c6"
|
||||
BOOL_COL = "c7"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
|
||||
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [ BOOL_COL, ]
|
||||
TS_TYPE_COL = [ TS_COL, ]
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def __substr_condition(self): # sourcery skip: extract-method
|
||||
substr_condition = []
|
||||
for char_col in CHAR_COL:
|
||||
substr_condition.extend(
|
||||
(
|
||||
char_col,
|
||||
f"upper( {char_col} )",
|
||||
)
|
||||
)
|
||||
substr_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
|
||||
substr_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
|
||||
substr_condition.extend( f"concat( cast( {char_col} + {num_col} as binary(16) ), {char_col}) " for num_col in NUM_COL )
|
||||
substr_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
substr_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
substr_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
# substr_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
substr_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
|
||||
substr_condition.extend( f"concat( {char_col}, {char_col_2} ) " for char_col_2 in CHAR_COL )
|
||||
|
||||
for num_col in NUM_COL:
|
||||
substr_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
substr_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
|
||||
|
||||
substr_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
|
||||
|
||||
substr_condition.append(''' " test1234!@#$%^&*() :'><?/.,][}{ " ''')
|
||||
|
||||
return substr_condition
|
||||
|
||||
def __where_condition(self, col):
|
||||
# return f" where count({col}) > 0 "
|
||||
return ""
|
||||
|
||||
def __group_condition(self, col, having = ""):
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __substr_check(self, tbname,pos, lens=None):
|
||||
substr_condition = self.__substr_condition()
|
||||
for condition in substr_condition:
|
||||
where_condition = self.__where_condition(condition)
|
||||
substr_group_having = self.__group_condition(condition, having=f"{condition} is not null " )
|
||||
substr_group_no_having= self.__group_condition(condition)
|
||||
groups = ["", substr_group_having, substr_group_no_having]
|
||||
|
||||
if pos < 1:
|
||||
tdSql.error(f"select substr( {condition}, {pos}, {lens}) , {condition} from {tbname} ")
|
||||
|
||||
tdSql.query(f"select substr( {condition}, {pos}, {lens}) , {condition} from {tbname} ")
|
||||
for j in range(tdSql.queryRows):
|
||||
tdSql.checkData(j,0, tdSql.getData(j,1)[pos-1:lens]) if tdSql.getData(j,1) else tdSql.checkData(j, 0, None)
|
||||
|
||||
[ tdSql.query(f"select substr({condition}, {pos}, {lens}) from {tbname} {where_condition} {group} ") for group in groups ]
|
||||
|
||||
|
||||
def __substr_err_check(self,tbname):
|
||||
sqls = []
|
||||
|
||||
for num_col in NUM_COL:
|
||||
sqls.extend(
|
||||
(
|
||||
f"select substr( {num_col} ) from {tbname} ",
|
||||
f"select substr(ceil( {num_col} )) from {tbname} ",
|
||||
f"select {num_col} from {tbname} group by substr( {num_col} ) ",
|
||||
)
|
||||
)
|
||||
|
||||
sqls.extend( f"select substr( {char_col} + {num_col} ) from {tbname} " for char_col in CHAR_COL )
|
||||
sqls.extend( f"select substr( {num_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select substr( {num_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select substr( {ts_col}+{bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select substr( {num_col}+{ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
|
||||
sqls.extend( f"select substr( {num_col}+ {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
|
||||
sqls.extend( f"select substr( {num_col}+ {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
|
||||
sqls.extend( f"select substr( {ts_col}+{ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select substr( {bool_col}+ {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select substr( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
|
||||
sqls.extend( f"select substr({num_col}, '1') from {tbname} " for num_col in NUM_COL )
|
||||
sqls.extend( f"select substr({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select substr({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select substr({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
|
||||
sqls.extend(
|
||||
(
|
||||
f"select substr() from {tbname} ",
|
||||
f"select substr(*) from {tbname} ",
|
||||
f"select substr(ccccccc) from {tbname} ",
|
||||
f"select substr(111) from {tbname} ",
|
||||
)
|
||||
)
|
||||
|
||||
return sqls
|
||||
|
||||
def __test_current(self): # sourcery skip: use-itertools-product
|
||||
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
for tb in tbname:
|
||||
self.__substr_check(tb, 1, 6)
|
||||
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
|
||||
|
||||
def __test_error(self):
|
||||
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
|
||||
for tb in tbname:
|
||||
for errsql in self.__substr_err_check(tb):
|
||||
tdSql.error(sql=errsql)
|
||||
self.__substr_check(tb, 0, 6)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
|
||||
|
||||
|
||||
def all_test(self):
|
||||
self.__test_current()
|
||||
self.__test_error()
|
||||
|
||||
|
||||
def __create_tb(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
create_stb_sql = f'''create table stb1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
) tags (t1 int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table t1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
def __insert_data(self, rows):
|
||||
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
for i in range(rows):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f'''insert into ct1 values
|
||||
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
|
||||
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct4 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct2 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
|
||||
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
|
||||
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(rows):
|
||||
insert_data = f'''insert into t1 values
|
||||
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
|
||||
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
|
||||
'''
|
||||
tdSql.execute(insert_data)
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
|
||||
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
|
||||
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
self.__create_tb()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
self.rows = 10
|
||||
self.__insert_data(self.rows)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:all check")
|
||||
self.all_test()
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
tdSql.execute("use db")
|
||||
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
self.all_test()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -6,11 +6,13 @@ python3 ./test.py -f 0-others/taosShell.py
|
|||
python3 ./test.py -f 0-others/taosShellError.py
|
||||
python3 ./test.py -f 0-others/taosShellNetChk.py
|
||||
python3 ./test.py -f 0-others/telemetry.py
|
||||
|
||||
python3 ./test.py -f 0-others/taosdMonitor.py
|
||||
|
||||
#python3 ./test.py -f 2-query/between.py
|
||||
python3 ./test.py -f 2-query/distinct.py
|
||||
python3 ./test.py -f 2-query/varchar.py
|
||||
python3 ./test.py -f 2-query/ltrim.py
|
||||
python3 ./test.py -f 2-query/rtrim.py
|
||||
|
||||
python3 ./test.py -f 2-query/timezone.py
|
||||
python3 ./test.py -f 2-query/Now.py
|
||||
|
@ -23,8 +25,7 @@ python3 ./test.py -f 2-query/last.py
|
|||
python3 ./test.py -f 2-query/To_unixtimestamp.py
|
||||
python3 ./test.py -f 2-query/timetruncate.py
|
||||
|
||||
python3 ./test.py -f 2-query/Timediff.py
|
||||
# python3 ./test.py -f 2-query/diff.py
|
||||
# python3 ./test.py -f 2-query/Timediff.py
|
||||
#python3 ./test.py -f 2-query/cast.py
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue