[td-225]merge develop

This commit is contained in:
Haojun Liao 2021-04-27 22:17:09 +08:00
commit 9286f77e07
68 changed files with 3976 additions and 241 deletions

View File

@ -64,7 +64,7 @@
# monitorInterval 30 # monitorInterval 30
# number of seconds allowed for a dnode to be offline, for cluster only # number of seconds allowed for a dnode to be offline, for cluster only
# offlineThreshold 8640000 # offlineThreshold 864000
# RPC re-try timer, millisecond # RPC re-try timer, millisecond
# rpcTimer 300 # rpcTimer 300

View File

@ -49,7 +49,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
pSql->sqlstr = calloc(1, sqlLen + 1); pSql->sqlstr = calloc(1, sqlLen + 1);
if (pSql->sqlstr == NULL) { if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql); tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY; pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscAsyncResultOnError(pSql); tscAsyncResultOnError(pSql);
return; return;
@ -81,7 +81,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
TAOS_RES * taos_query_ra(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param) { TAOS_RES * taos_query_ra(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param) {
STscObj *pObj = (STscObj *)taos; STscObj *pObj = (STscObj *)taos;
if (pObj == NULL || pObj->signature != pObj) { if (pObj == NULL || pObj->signature != pObj) {
tscError("bug!!! pObj:%p", pObj); tscError("pObj:%p is NULL or freed", pObj);
terrno = TSDB_CODE_TSC_DISCONNECTED; terrno = TSDB_CODE_TSC_DISCONNECTED;
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED); tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED);
return NULL; return NULL;
@ -288,7 +288,7 @@ static void tscAsyncResultCallback(SSchedMsg *pMsg) {
} }
assert(pSql->res.code != TSDB_CODE_SUCCESS); assert(pSql->res.code != TSDB_CODE_SUCCESS);
tscError("%p invoke user specified function due to error occurred, code:%s", pSql, tstrerror(pSql->res.code)); tscError("0x%"PRIx64" async result callback, code:%s", pSql->self, tstrerror(pSql->res.code));
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
if (pSql->fp == NULL || pSql->fetchFp == NULL){ if (pSql->fp == NULL || pSql->fetchFp == NULL){
@ -368,7 +368,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlObj *sub = (SSqlObj*) res; SSqlObj *sub = (SSqlObj*) res;
const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta"; const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code)); tscError("0x%"PRIx64" get %s failed, code:%s", pSql->self, msg, tstrerror(code));
goto _error; goto _error;
} }

View File

@ -926,7 +926,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
pRes->code = tscProcessServStatus(pSql); pRes->code = tscProcessServStatus(pSql);
} else { } else {
pRes->code = TSDB_CODE_TSC_INVALID_SQL; pRes->code = TSDB_CODE_TSC_INVALID_SQL;
tscError("%p not support command:%d", pSql, pCmd->command); tscError("0x%"PRIx64" not support command:%d", pSql->self, pCmd->command);
} }
// keep the code in local variable in order to avoid invalid read in case of async query // keep the code in local variable in order to avoid invalid read in case of async query

View File

@ -113,14 +113,14 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
if (pMemBuffer == NULL) { if (pMemBuffer == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
tscError("%p pMemBuffer is NULL", pMemBuffer); tscError("pMemBuffer:%p is NULL", pMemBuffer);
pRes->code = TSDB_CODE_TSC_APP_ERROR; pRes->code = TSDB_CODE_TSC_APP_ERROR;
return; return;
} }
if (pDesc->pColumnModel == NULL) { if (pDesc->pColumnModel == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
tscError("%p no local buffer or intermediate result format model", pSql); tscError("0x%"PRIx64" no local buffer or intermediate result format model", pSql->self);
pRes->code = TSDB_CODE_TSC_APP_ERROR; pRes->code = TSDB_CODE_TSC_APP_ERROR;
return; return;
} }
@ -144,7 +144,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
} }
if (pDesc->pColumnModel->capacity >= pMemBuffer[0]->pageSize) { if (pDesc->pColumnModel->capacity >= pMemBuffer[0]->pageSize) {
tscError("%p Invalid value of buffer capacity %d and page size %d ", pSql, pDesc->pColumnModel->capacity, tscError("0x%"PRIx64" Invalid value of buffer capacity %d and page size %d ", pSql->self, pDesc->pColumnModel->capacity,
pMemBuffer[0]->pageSize); pMemBuffer[0]->pageSize);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
@ -154,9 +154,9 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
size_t size = sizeof(SLocalMerger) + POINTER_BYTES * numOfFlush; size_t size = sizeof(SLocalMerger) + POINTER_BYTES * numOfFlush;
SLocalMerger *pMerger = (SLocalMerger *) calloc(1, size); SLocalMerger *pReducer = (SLocalMerger *) calloc(1, size);
if (pMerger == NULL) { if (pReducer == NULL) {
tscError("%p failed to create local merge structure, out of memory", pSql); tscError("0x%"PRIx64" failed to create local merge structure, out of memory", pSql->self);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer); tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -180,7 +180,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
for (int32_t j = 0; j < numOfFlushoutInFile; ++j) { for (int32_t j = 0; j < numOfFlushoutInFile; ++j) {
SLocalDataSource *ds = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize); SLocalDataSource *ds = (SLocalDataSource *)malloc(sizeof(SLocalDataSource) + pMemBuffer[0]->pageSize);
if (ds == NULL) { if (ds == NULL) {
tscError("%p failed to create merge structure", pSql); tscError("0x%"PRIx64" failed to create merge structure", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tfree(pMerger); tfree(pMerger);
return; return;
@ -539,7 +539,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
(*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * pSql->subState.numOfSub); (*pMemBuffer) = (tExtMemBuffer **)malloc(POINTER_BYTES * pSql->subState.numOfSub);
if (*pMemBuffer == NULL) { if (*pMemBuffer == NULL) {
tscError("%p failed to allocate memory", pSql); tscError("0x%"PRIx64" failed to allocate memory", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return pRes->code; return pRes->code;
} }
@ -548,7 +548,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
pSchema = (SSchema *)calloc(1, sizeof(SSchema) * size); pSchema = (SSchema *)calloc(1, sizeof(SSchema) * size);
if (pSchema == NULL) { if (pSchema == NULL) {
tscError("%p failed to allocate memory", pSql); tscError("0x%"PRIx64" failed to allocate memory", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return pRes->code; return pRes->code;
} }

View File

@ -1147,7 +1147,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
return code; return code;
} }
tscError("%p async insert parse error, code:%s", pSql, tstrerror(code)); tscError("0x%"PRIx64" async insert parse error, code:%s", pSql->self, tstrerror(code));
pCmd->curSql = NULL; pCmd->curSql = NULL;
goto _clean; goto _clean;
} }
@ -1415,7 +1415,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
assert(pSql->res.numOfRows == 0); assert(pSql->res.numOfRows == 0);
int32_t ret = fseek(fp, 0, SEEK_SET); int32_t ret = fseek(fp, 0, SEEK_SET);
if (ret < 0) { if (ret < 0) {
tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno)); tscError("0x%"PRIx64" failed to seek SEEK_SET since:%s", pSql->self, tstrerror(errno));
code = TAOS_SYSTEM_ERROR(errno); code = TAOS_SYSTEM_ERROR(errno);
goto _error; goto _error;
} }
@ -1536,7 +1536,7 @@ void tscImportDataFromFile(SSqlObj *pSql) {
FILE *fp = fopen(pCmd->payload, "rb"); FILE *fp = fopen(pCmd->payload, "rb");
if (fp == NULL) { if (fp == NULL) {
pSql->res.code = TAOS_SYSTEM_ERROR(errno); pSql->res.code = TAOS_SYSTEM_ERROR(errno);
tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code)); tscError("0x%"PRIx64" failed to open file %s to load data from file, code:%s", pSql->self, pCmd->payload, tstrerror(pSql->res.code));
tfree(pSupporter); tfree(pSupporter);
taos_free_result(pNew); taos_free_result(pNew);

View File

@ -104,7 +104,7 @@ void tscSaveSlowQuery(SSqlObj *pSql) {
char *sql = malloc(sqlSize); char *sql = malloc(sqlSize);
if (sql == NULL) { if (sql == NULL) {
tscError("%p failed to allocate memory to sent slow query to dnode", pSql); tscError("0x%"PRIx64" failed to allocate memory to sent slow query to dnode", pSql->self);
return; return;
} }

View File

@ -5222,7 +5222,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
int32_t size = sizeof(SUpdateTableTagValMsg) + pTagsSchema->bytes + schemaLen + TSDB_EXTRA_PAYLOAD_SIZE; int32_t size = sizeof(SUpdateTableTagValMsg) + pTagsSchema->bytes + schemaLen + TSDB_EXTRA_PAYLOAD_SIZE;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
tscError("%p failed to malloc for alter table msg", pSql); tscError("0x%"PRIx64" failed to malloc for alter table msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }

View File

@ -222,7 +222,7 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
assert(online <= total); assert(online <= total);
if (online < total) { if (online < total) {
tscError("HB:%p, total dnode:%d, online dnode:%d", pSql, total, online); tscError("0x%"PRIx64", HB, total dnode:%d, online dnode:%d", pSql->self, total, online);
pSql->res.code = TSDB_CODE_RPC_NETWORK_UNAVAIL; pSql->res.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
} }
@ -274,7 +274,7 @@ void tscProcessActivityTimer(void *handle, void *tmrId) {
taosReleaseRef(tscObjRef, pObj->hbrid); taosReleaseRef(tscObjRef, pObj->hbrid);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscError("%p failed to sent HB to server, reason:%s", pHB, tstrerror(code)); tscError("0x%"PRIx64" failed to sent HB to server, reason:%s", pHB->self, tstrerror(code));
} }
taosReleaseRef(tscRefId, rid); taosReleaseRef(tscRefId, rid);
@ -286,7 +286,7 @@ int tscSendMsgToServer(SSqlObj *pSql) {
char *pMsg = rpcMallocCont(pCmd->payloadLen); char *pMsg = rpcMallocCont(pCmd->payloadLen);
if (NULL == pMsg) { if (NULL == pMsg) {
tscError("%p msg:%s malloc failed", pSql, taosMsg[pSql->cmd.msgType]); tscError("0x%"PRIx64" msg:%s malloc failed", pSql->self, taosMsg[pSql->cmd.msgType]);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -370,11 +370,11 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
rpcMsg->code == TSDB_CODE_APP_NOT_READY)) { rpcMsg->code == TSDB_CODE_APP_NOT_READY)) {
pSql->retry++; pSql->retry++;
tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), pSql->retry); tscWarn("0x%"PRIx64" it shall renew table meta, code:%s, retry:%d", pSql->self, tstrerror(rpcMsg->code), pSql->retry);
pSql->res.code = rpcMsg->code; // keep the previous error code pSql->res.code = rpcMsg->code; // keep the previous error code
if (pSql->retry > pSql->maxRetry) { if (pSql->retry > pSql->maxRetry) {
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry); tscError("0x%"PRIx64" max retry %d reached, give up", pSql->self, pSql->maxRetry);
} else { } else {
// wait for a little bit moment and then retry // wait for a little bit moment and then retry
// todo do not sleep in rpc callback thread, add this process into queueu to process // todo do not sleep in rpc callback thread, add this process into queueu to process
@ -666,7 +666,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab
assert(index < pTableMetaInfo->vgroupList->numOfVgroups); assert(index < pTableMetaInfo->vgroupList->numOfVgroups);
pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index]; pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
} else { } else {
tscError("%p No vgroup info found", pSql); tscError("0x%"PRIx64" No vgroup info found", pSql->self);
*succeed = 0; *succeed = 0;
return pMsg; return pMsg;
@ -767,7 +767,7 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo,
// the queried table has been removed and a new table with the same name has already been created already // the queried table has been removed and a new table with the same name has already been created already
// return error msg // return error msg
if (pExpr->uid != pTableMeta->id.uid) { if (pExpr->uid != pTableMeta->id.uid) {
tscError("%p table has already been destroyed", addr); tscError("0x%"PRIx64" table has already been destroyed", addr->self);
return TSDB_CODE_TSC_INVALID_TABLE_NAME; return TSDB_CODE_TSC_INVALID_TABLE_NAME;
} }
@ -1063,7 +1063,7 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCreateDnodeMsg); pCmd->payloadLen = sizeof(SCreateDnodeMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1081,7 +1081,7 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCreateAcctMsg); pCmd->payloadLen = sizeof(SCreateAcctMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1127,7 +1127,7 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SCreateUserMsg); pCmd->payloadLen = sizeof(SCreateUserMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1166,7 +1166,7 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SDropDbMsg); pCmd->payloadLen = sizeof(SDropDbMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1188,7 +1188,7 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SCMDropTableMsg); pCmd->payloadLen = sizeof(SCMDropTableMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1209,7 +1209,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SDropDnodeMsg); pCmd->payloadLen = sizeof(SDropDnodeMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1230,7 +1230,7 @@ int32_t tscBuildDropUserAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->msgType = (pInfo->type == TSDB_SQL_DROP_USER)? TSDB_MSG_TYPE_CM_DROP_USER:TSDB_MSG_TYPE_CM_DROP_ACCT; pCmd->msgType = (pInfo->type == TSDB_SQL_DROP_USER)? TSDB_MSG_TYPE_CM_DROP_USER:TSDB_MSG_TYPE_CM_DROP_ACCT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1245,7 +1245,7 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SUseDbMsg); pCmd->payloadLen = sizeof(SUseDbMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1262,7 +1262,7 @@ int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SSyncDbMsg); pCmd->payloadLen = sizeof(SSyncDbMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1281,7 +1281,7 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SShowMsg) + 100; pCmd->payloadLen = sizeof(SShowMsg) + 100;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1367,7 +1367,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
// Reallocate the payload size // Reallocate the payload size
size = tscEstimateCreateTableMsgLength(pSql, pInfo); size = tscEstimateCreateTableMsgLength(pSql, pInfo);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
tscError("%p failed to malloc for create table msg", pSql); tscError("0x%"PRIx64" failed to malloc for create table msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1466,7 +1466,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SAlterTableInfo *pAlterInfo = pInfo->pAlterInfo; SAlterTableInfo *pAlterInfo = pInfo->pAlterInfo;
int size = tscEstimateAlterTableMsgLength(pCmd); int size = tscEstimateAlterTableMsgLength(pCmd);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
tscError("%p failed to malloc for alter table msg", pSql); tscError("0x%"PRIx64" failed to malloc for alter table msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1540,7 +1540,7 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SRetrieveTableMsg); pCmd->payloadLen = sizeof(SRetrieveTableMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1662,7 +1662,7 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pCmd->payloadLen = sizeof(SConnectMsg); pCmd->payloadLen = sizeof(SConnectMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql); tscError("0x%"PRIx64" failed to malloc for query msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1802,7 +1802,7 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int size = numOfQueries * sizeof(SQueryDesc) + numOfStreams * sizeof(SStreamDesc) + sizeof(SHeartBeatMsg) + 100; int size = numOfQueries * sizeof(SQueryDesc) + numOfStreams * sizeof(SStreamDesc) + sizeof(SHeartBeatMsg) + 100;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
pthread_mutex_unlock(&pObj->mutex); pthread_mutex_unlock(&pObj->mutex);
tscError("%p failed to create heartbeat msg", pSql); tscError("0x%"PRIx64" failed to create heartbeat msg", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -1878,7 +1878,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg); STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) { if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
tscError("%p invalid table meta from mnode, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name)); tscError("0x%"PRIx64" invalid table meta from mnode, name:%s", pSql->self, tNameGetTableName(&pTableMetaInfo->name));
return TSDB_CODE_TSC_INVALID_VALUE; return TSDB_CODE_TSC_INVALID_VALUE;
} }
@ -2072,7 +2072,7 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups; pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups;
if (pInfo->vgroupList->numOfVgroups <= 0) { if (pInfo->vgroupList->numOfVgroups <= 0) {
//tfree(pInfo->vgroupList); //tfree(pInfo->vgroupList);
tscError("%p empty vgroup info", pSql); tscError("0x%"PRIx64" empty vgroup info", pSql->self);
} else { } else {
for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) { for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
// just init, no need to lock // just init, no need to lock
@ -2388,7 +2388,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code);
static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
SSqlObj *pNew = calloc(1, sizeof(SSqlObj)); SSqlObj *pNew = calloc(1, sizeof(SSqlObj));
if (NULL == pNew) { if (NULL == pNew) {
tscError("%p malloc failed for new sqlobj to get table meta", pSql); tscError("0x%"PRIx64" malloc failed for new sqlobj to get table meta", pSql->self);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -2402,7 +2402,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
pNew->cmd.autoCreated = pSql->cmd.autoCreated; // create table if not exists pNew->cmd.autoCreated = pSql->cmd.autoCreated; // create table if not exists
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) {
tscError("%p malloc failed for payload to get table meta", pSql); tscError("0x%"PRIx64" malloc failed for payload to get table meta", pSql->self);
tscFreeSqlObj(pNew); tscFreeSqlObj(pNew);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -2415,7 +2415,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
if (pSql->cmd.autoCreated) { if (pSql->cmd.autoCreated) {
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData); int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscError("%p malloc failed for new tag data to get table meta", pSql); tscError("0x%"PRIx64" malloc failed for new tag data to get table meta", pSql->self);
tscFreeSqlObj(pNew); tscFreeSqlObj(pNew);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -2493,7 +2493,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
char name[TSDB_TABLE_FNAME_LEN] = {0}; char name[TSDB_TABLE_FNAME_LEN] = {0};
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, name); int32_t code = tNameExtractFullName(&pTableMetaInfo->name, name);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscError("%p failed to generate the table full name", pSql); tscError("0x%"PRIx64" failed to generate the table full name", pSql->self);
return TSDB_CODE_TSC_INVALID_SQL; return TSDB_CODE_TSC_INVALID_SQL;
} }

View File

@ -588,7 +588,7 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
void taos_free_result(TAOS_RES *res) { void taos_free_result(TAOS_RES *res) {
SSqlObj* pSql = (SSqlObj*) res; SSqlObj* pSql = (SSqlObj*) res;
if (pSql == NULL || pSql->signature != pSql) { if (pSql == NULL || pSql->signature != pSql) {
tscError("%p already released sqlObj", res); tscError("0x%"PRIx64" already released sqlObj", pSql ? pSql->self : -1);
return; return;
} }
@ -881,15 +881,14 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
int32_t sqlLen = (int32_t)strlen(sql); int32_t sqlLen = (int32_t)strlen(sql);
if (sqlLen > tsMaxSQLStringLen) { if (sqlLen > tsMaxSQLStringLen) {
tscError("%p sql too long", pSql); tscError("0x%"PRIx64" sql too long", pSql->self);
tfree(pSql); tfree(pSql);
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT; return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
} }
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
if (pSql->sqlstr == NULL) { if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql); tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tscDebug("0x%"PRIx64" Valid SQL result:%d, %s pObj:%p", pSql->self, pRes->code, taos_errstr(pSql), pObj);
tfree(pSql); tfree(pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -914,7 +913,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
} }
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscDebug("0x%"PRIx64" Valid SQL result:%d, %s pObj:%p", pSql->self, code, taos_errstr(pSql), pObj); tscError("0x%"PRIx64" invalid SQL result:%d, %s pObj:%p", pSql->self, code, taos_errstr(pSql), pObj);
} }
taos_free_result(pSql); taos_free_result(pSql);
@ -1031,14 +1030,14 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
int32_t tblListLen = (int32_t)strlen(tableNameList); int32_t tblListLen = (int32_t)strlen(tableNameList);
if (tblListLen > MAX_TABLE_NAME_LENGTH) { if (tblListLen > MAX_TABLE_NAME_LENGTH) {
tscError("%p tableNameList too long, length:%d, maximum allowed:%d", pSql, tblListLen, MAX_TABLE_NAME_LENGTH); tscError("0x%"PRIx64" tableNameList too long, length:%d, maximum allowed:%d", pSql->self, tblListLen, MAX_TABLE_NAME_LENGTH);
tscFreeSqlObj(pSql); tscFreeSqlObj(pSql);
return TSDB_CODE_TSC_INVALID_SQL; return TSDB_CODE_TSC_INVALID_SQL;
} }
char *str = calloc(1, tblListLen + 1); char *str = calloc(1, tblListLen + 1);
if (str == NULL) { if (str == NULL) {
tscError("%p failed to malloc sql string buffer", pSql); tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tscFreeSqlObj(pSql); tscFreeSqlObj(pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }

View File

@ -203,6 +203,14 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
tNameExtractFullName(&pTableMetaInfo->name, name); tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tfree(pTableMetaInfo->pTableMeta);
tscFreeSqlResult(pStream->pSql);
tscFreeSubobj(pStream->pSql);
tfree(pStream->pSql->pSubs);
pStream->pSql->subState.numOfSub = 0;
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList); pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
tscSetRetryTimer(pStream, pStream->pSql, retryDelay); tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
@ -468,8 +476,8 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
} }
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) { if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
tscWarn("0x%"PRIx64" stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, tscWarn("0x%"PRIx64" stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql->self, pStream,
pSql->self, pStream, pQueryInfo->interval.sliding, pQueryInfo->interval.interval); pQueryInfo->interval.sliding, pQueryInfo->interval.interval);
pQueryInfo->interval.sliding = pQueryInfo->interval.interval; pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
} }
@ -601,7 +609,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
SSqlStream *pStream = (SSqlStream *)calloc(1, sizeof(SSqlStream)); SSqlStream *pStream = (SSqlStream *)calloc(1, sizeof(SSqlStream));
if (pStream == NULL) { if (pStream == NULL) {
tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, sqlstr, pCmd->payload, pRes->code); tscError("0x%"PRIx64" open stream failed, sql:%s, reason:%s, code:0x%08x", pSql->self, sqlstr, pCmd->payload, pRes->code);
tscFreeSqlObj(pSql); tscFreeSqlObj(pSql);
return NULL; return NULL;
} }
@ -617,7 +625,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
pSql->sqlstr = calloc(1, strlen(sqlstr) + 1); pSql->sqlstr = calloc(1, strlen(sqlstr) + 1);
if (pSql->sqlstr == NULL) { if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql); tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tscFreeSqlObj(pSql); tscFreeSqlObj(pSql);
return NULL; return NULL;
} }

View File

@ -224,11 +224,11 @@ static SArray* getTableList( SSqlObj* pSql ) {
SSqlObj* pNew = taos_query(pSql->pTscObj, sql); SSqlObj* pNew = taos_query(pSql->pTscObj, sql);
if (pNew == NULL) { if (pNew == NULL) {
tscError("failed to retrieve table id: cannot create new sql object."); tscError("0x%"PRIx64"failed to retrieve table id: cannot create new sql object.", pSql->self);
return NULL; return NULL;
} else if (taos_errno(pNew) != TSDB_CODE_SUCCESS) { } else if (taos_errno(pNew) != TSDB_CODE_SUCCESS) {
tscError("failed to retrieve table id: %s", tstrerror(taos_errno(pNew))); tscError("0x%"PRIx64"failed to retrieve table id,error: %s", pSql->self, tstrerror(taos_errno(pNew)));
return NULL; return NULL;
} }

View File

@ -70,7 +70,7 @@ static void subquerySetState(SSqlObj *pSql, SSubqueryState *subState, int idx, i
pthread_mutex_lock(&subState->mutex); pthread_mutex_lock(&subState->mutex);
tscDebug("subquery:%p,%d state set to %d", pSql, idx, state); tscDebug("subquery:0x%"PRIx64",%d state set to %d", pSql->self, idx, state);
subState->states[idx] = state; subState->states[idx] = state;
@ -85,11 +85,12 @@ static bool allSubqueryDone(SSqlObj *pParentSql) {
tscDebug("0x%"PRIx64" total subqueries: %d", pParentSql->self, subState->numOfSub); tscDebug("0x%"PRIx64" total subqueries: %d", pParentSql->self, subState->numOfSub);
for (int i = 0; i < subState->numOfSub; i++) { for (int i = 0; i < subState->numOfSub; i++) {
if (0 == subState->states[i]) { if (0 == subState->states[i]) {
tscDebug("0x%"PRIx64" subquery:%p, index: %d NOT finished, abort query completion check", pParentSql->self, pParentSql->pSubs[i], i); tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d NOT finished, abort query completion check", pParentSql->self,
pParentSql->pSubs[i]->self, i);
done = false; done = false;
break; break;
} else { } else {
tscDebug("0x%"PRIx64" subquery:%p, index: %d finished", pParentSql->self, pParentSql->pSubs[i], i); tscDebug("0x%"PRIx64" subquery:0x%"PRIx64", index: %d finished", pParentSql->self, pParentSql->pSubs[i]->self, i);
} }
} }
@ -106,14 +107,15 @@ static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
bool done = allSubqueryDone(pParentSql); bool done = allSubqueryDone(pParentSql);
if (done) { if (done) {
tscDebug("0x%"PRIx64" subquery:%p,%d all subs already done", pParentSql->self, pSql, idx); tscDebug("0x%"PRIx64" subquery:0x%"PRIx64",%d all subs already done", pParentSql->self,
pSql->self, idx);
pthread_mutex_unlock(&subState->mutex); pthread_mutex_unlock(&subState->mutex);
return false; return false;
} }
tscDebug("0x%"PRIx64" subquery:%p,%d state set to 1", pParentSql->self, pSql, idx); tscDebug("0x%"PRIx64" subquery:0x%"PRIx64",%d state set to 1", pParentSql->self, pSql->self, idx);
subState->states[idx] = 1; subState->states[idx] = 1;
@ -170,7 +172,8 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
return 0; return 0;
} }
tscDebug("0x%"PRIx64" sub:%p table idx:%d, input group number:%d", pSql->self, pSql->pSubs[i], i, pSupporter->pTSBuf->numOfGroups); tscDebug("0x%"PRIx64" sub:0x%"PRIx64" table idx:%d, input group number:%d", pSql->self,
pSql->pSubs[i]->self, i, pSupporter->pTSBuf->numOfGroups);
ctxlist[i].p = pSupporter; ctxlist[i].p = pSupporter;
ctxlist[i].res = output; ctxlist[i].res = output;
@ -376,9 +379,9 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) {
TSKEY et = taosGetTimestampUs(); TSKEY et = taosGetTimestampUs();
for (int32_t i = 0; i < joinNum; ++i) { for (int32_t i = 0; i < joinNum; ++i) {
tscDebug("0x%"PRIx64" sub:%p tblidx:%d, input:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks " tscDebug("0x%"PRIx64" sub:0x%"PRIx64" tblidx:%d, input:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks "
"intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us", "intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us",
pSql->self, pSql->pSubs[i], i, ctxlist[i].numOfInput, ctxlist[i].res->numOfTotal, ctxlist[i].res->numOfGroups, win->skey, win->ekey, pSql->self, pSql->pSubs[i]->self, i, ctxlist[i].numOfInput, ctxlist[i].res->numOfTotal, ctxlist[i].res->numOfGroups, win->skey, win->ekey,
tsBufGetNumOfGroup(ctxlist[i].res), et - st); tsBufGetNumOfGroup(ctxlist[i].res), et - st);
} }
@ -655,7 +658,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
//prepare the subqueries object failed, abort //prepare the subqueries object failed, abort
if (!success) { if (!success) {
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY; pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscError("%p failed to prepare subqueries objs for secondary phase query, numOfSub:%d, code:%d", pSql, tscError("0x%"PRIx64" failed to prepare subqueries objs for secondary phase query, numOfSub:%d, code:%d", pSql->self,
pSql->subState.numOfSub, pSql->res.code); pSql->subState.numOfSub, pSql->res.code);
freeJoinSubqueryObj(pSql); freeJoinSubqueryObj(pSql);
@ -700,7 +703,7 @@ void freeJoinSubqueryObj(SSqlObj* pSql) {
static int32_t quitAllSubquery(SSqlObj* pSqlSub, SSqlObj* pSqlObj, SJoinSupporter* pSupporter) { static int32_t quitAllSubquery(SSqlObj* pSqlSub, SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
if (subAndCheckDone(pSqlSub, pSqlObj, pSupporter->subqueryIndex)) { if (subAndCheckDone(pSqlSub, pSqlObj, pSupporter->subqueryIndex)) {
tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code)); tscError("0x%"PRIx64" all subquery return and query failed, global code:%s", pSqlObj->self, tstrerror(pSqlObj->res.code));
freeJoinSubqueryObj(pSqlObj); freeJoinSubqueryObj(pSqlObj);
return 0; return 0;
} }
@ -784,7 +787,7 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr
STableIdInfo item = {.uid = tt->uid, .tid = tt->tid, .key = INT64_MIN}; STableIdInfo item = {.uid = tt->uid, .tid = tt->tid, .key = INT64_MIN};
taosArrayPush(vgTables, &item); taosArrayPush(vgTables, &item);
tscTrace("%p tid:%d, uid:%"PRIu64",vgId:%d added", pSql, tt->tid, tt->uid, tt->vgId); tscTrace("0x%"PRIx64" tid:%d, uid:%"PRIu64",vgId:%d added", pSql->self, tt->tid, tt->uid, tt->vgId);
prev = tt; prev = tt;
} }
@ -850,9 +853,9 @@ static void issueTsCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList); size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
tscDebug( tscDebug(
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, " "0x%"PRIx64" subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, "
"numOfExpr:%" PRIzu ", colList:%" PRIzu ", numOfOutputFields:%d, name:%s", "numOfExpr:%" PRIzu ", colList:%" PRIzu ", numOfOutputFields:%d, name:%s",
pParent, pSql, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type, pParent->self, pSql->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type,
tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name)); tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
tscBuildAndSendRequest(pSql, NULL); tscBuildAndSendRequest(pSql, NULL);
@ -865,7 +868,7 @@ static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSq
assert(prev->vgId >= 1 && p->vgId >= 1); assert(prev->vgId >= 1 && p->vgId >= 1);
if (doCompare(prev->tag, p->tag, pColSchema->type, pColSchema->bytes) == 0) { if (doCompare(prev->tag, p->tag, pColSchema->type, pColSchema->bytes) == 0) {
tscError("%p join tags have same value for different table, free all sub SqlObj and quit", pPSqlObj); tscError("0x%"PRIx64" join tags have same value for different table, free all sub SqlObj and quit", pPSqlObj->self);
pPSqlObj->res.code = TSDB_CODE_QRY_DUP_JOIN_KEY; pPSqlObj->res.code = TSDB_CODE_QRY_DUP_JOIN_KEY;
return false; return false;
} }
@ -1101,7 +1104,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY)); assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY));
if (pParentSql->res.code != TSDB_CODE_SUCCESS) { if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code); tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) { if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return; return;
} }
@ -1116,7 +1119,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// todo retry if other subqueries are not failed // todo retry if other subqueries are not failed
assert(numOfRows < 0 && numOfRows == taos_errno(pSql)); assert(numOfRows < 0 && numOfRows == taos_errno(pSql));
tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex); tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows; pParentSql->res.code = numOfRows;
if (quitAllSubquery(pSql, pParentSql, pSupporter)) { if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
@ -1135,7 +1138,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// todo handle memory error // todo handle memory error
char* tmp = realloc(pSupporter->pIdTagList, length); char* tmp = realloc(pSupporter->pIdTagList, length);
if (tmp == NULL) { if (tmp == NULL) {
tscError("%p failed to malloc memory", pSql); tscError("0x%"PRIx64" failed to malloc memory", pSql->self);
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno); pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) { if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
@ -1255,7 +1258,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
assert(!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)); assert(!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE));
if (pParentSql->res.code != TSDB_CODE_SUCCESS) { if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code); tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)){ if (quitAllSubquery(pSql, pParentSql, pSupporter)){
return; return;
} }
@ -1269,7 +1272,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
// todo retry if other subqueries are not failed yet // todo retry if other subqueries are not failed yet
assert(numOfRows < 0 && numOfRows == taos_errno(pSql)); assert(numOfRows < 0 && numOfRows == taos_errno(pSql));
tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex); tscError("0x%"PRIx64" sub query failed, code:%s, index:%d", pSql->self, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows; pParentSql->res.code = numOfRows;
if (quitAllSubquery(pSql, pParentSql, pSupporter)){ if (quitAllSubquery(pSql, pParentSql, pSupporter)){
@ -1285,7 +1288,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pSupporter->f = fopen(pSupporter->path, "wb"); pSupporter->f = fopen(pSupporter->path, "wb");
if (pSupporter->f == NULL) { if (pSupporter->f == NULL) {
tscError("%p failed to create tmp file:%s, reason:%s", pSql, pSupporter->path, strerror(errno)); tscError("0x%"PRIx64" failed to create tmp file:%s, reason:%s", pSql->self, pSupporter->path, strerror(errno));
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno); pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
@ -1305,7 +1308,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
STSBuf* pBuf = tsBufCreateFromFile(pSupporter->path, true); STSBuf* pBuf = tsBufCreateFromFile(pSupporter->path, true);
if (pBuf == NULL) { // in error process, close the fd if (pBuf == NULL) { // in error process, close the fd
tscError("%p invalid ts comp file from vnode, abort subquery, file size:%d", pSql, numOfRows); tscError("0x%"PRIx64" invalid ts comp file from vnode, abort subquery, file size:%d", pSql->self, numOfRows);
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno); pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
if (quitAllSubquery(pSql, pParentSql, pSupporter)){ if (quitAllSubquery(pSql, pParentSql, pSupporter)){
@ -1402,7 +1405,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex);
if (pParentSql->res.code != TSDB_CODE_SUCCESS) { if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code); tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) { if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return; return;
} }
@ -1417,7 +1420,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
assert(numOfRows == taos_errno(pSql)); assert(numOfRows == taos_errno(pSql));
pParentSql->res.code = numOfRows; pParentSql->res.code = numOfRows;
tscError("%p retrieve failed, index:%d, code:%s", pSql, pSupporter->subqueryIndex, tstrerror(numOfRows)); tscError("0x%"PRIx64" retrieve failed, index:%d, code:%s", pSql->self, pSupporter->subqueryIndex, tstrerror(numOfRows));
tscAsyncResultOnError(pParentSql); tscAsyncResultOnError(pParentSql);
return; return;
@ -1453,7 +1456,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
} }
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) { if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
tscDebug("0x%"PRIx64" sub:%p,%d completed, total:%d", pParentSql->self, tres, pSupporter->subqueryIndex, pState->numOfSub); tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d completed, total:%d", pParentSql->self, pSql->self, pSupporter->subqueryIndex, pState->numOfSub);
return; return;
} }
@ -1475,16 +1478,16 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
SSqlRes* pRes1 = &pParentSql->pSubs[i]->res; SSqlRes* pRes1 = &pParentSql->pSubs[i]->res;
if (pRes1->row > 0 && pRes1->numOfRows > 0) { if (pRes1->row > 0 && pRes1->numOfRows > 0) {
tscDebug("0x%"PRIx64" sub:%p index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self, pParentSql->pSubs[i], i, tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self,
pRes1->numOfRows, pRes1->numOfTotal); pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
assert(pRes1->row < pRes1->numOfRows); assert(pRes1->row < pRes1->numOfRows);
} else { } else {
if (!stableQuery) { if (!stableQuery) {
pRes1->numOfClauseTotal += pRes1->numOfRows; pRes1->numOfClauseTotal += pRes1->numOfRows;
} }
tscDebug("0x%"PRIx64" sub:%p index:%d numOfRows:%d total:%"PRId64, pParentSql->self, pParentSql->pSubs[i], i, tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64, pParentSql->self,
pRes1->numOfRows, pRes1->numOfTotal); pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal);
} }
} }
@ -1665,7 +1668,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pRes1->row >= pRes1->numOfRows) { if (pRes1->row >= pRes1->numOfRows) {
tscDebug("0x%"PRIx64" subquery:%p retrieve data from vnode, subquery:%d, vgroupIndex:%d", pSql->self, pSql1, tscDebug("0x%"PRIx64" subquery:0x%"PRIx64" retrieve data from vnode, subquery:%d, vgroupIndex:%d", pSql->self, pSql1->self,
pSupporter->subqueryIndex, pTableMetaInfo->vgroupIndex); pSupporter->subqueryIndex, pTableMetaInfo->vgroupIndex);
tscResetForNextRetrieve(pRes1); tscResetForNextRetrieve(pRes1);
@ -1745,7 +1748,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
// retrieve actual query results from vnode during the second stage join subquery // retrieve actual query results from vnode during the second stage join subquery
if (pParentSql->res.code != TSDB_CODE_SUCCESS) { if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, code, pParentSql->res.code); tscError("0x%"PRIx64" abort query due to other subquery failure. code:%d, global code:%d", pSql->self, code, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) { if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return; return;
} }
@ -1759,7 +1762,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
assert(taos_errno(pSql) == code); assert(taos_errno(pSql) == code);
tscError("%p abort query, code:%s, global code:%s", pSql, tstrerror(code), tstrerror(pParentSql->res.code)); tscError("0x%"PRIx64" abort query, code:%s, global code:%s", pSql->self, tstrerror(code), tstrerror(pParentSql->res.code));
pParentSql->res.code = code; pParentSql->res.code = code;
if (quitAllSubquery(pSql, pParentSql, pSupporter)) { if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
@ -1990,7 +1993,7 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
SJoinSupporter *pSupporter = tscCreateJoinSupporter(pSql, i); SJoinSupporter *pSupporter = tscCreateJoinSupporter(pSql, i);
if (pSupporter == NULL) { // failed to create support struct, abort current query if (pSupporter == NULL) { // failed to create support struct, abort current query
tscError("%p tableIndex:%d, failed to allocate join support object, abort further query", pSql, i); tscError("0x%"PRIx64" tableIndex:%d, failed to allocate join support object, abort further query", pSql->self, i);
code = TSDB_CODE_TSC_OUT_OF_MEMORY; code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error; goto _error;
} }
@ -2386,9 +2389,9 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
tscTansformFuncForSTableQuery(pNewQueryInfo); tscTansformFuncForSTableQuery(pNewQueryInfo);
tscDebug( tscDebug(
"%p first round subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, " "0x%"PRIx64" first round subquery:0x%"PRIx64" tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, query to retrieve timestamps, "
"numOfExpr:%" PRIzu ", colList:%d, numOfOutputFields:%d, name:%s", "numOfExpr:%" PRIzu ", colList:%d, numOfOutputFields:%d, name:%s",
pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type, pSql->self, pNew->self, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pNewQueryInfo->type,
tscSqlExprNumOfExprs(pNewQueryInfo), index+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name)); tscSqlExprNumOfExprs(pNewQueryInfo), index+1, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
tscHandleMasterSTableQuery(pNew); tscHandleMasterSTableQuery(pNew);
@ -2474,7 +2477,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
for (; i < pState->numOfSub; ++i) { for (; i < pState->numOfSub; ++i) {
SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport)); SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport));
if (trs == NULL) { if (trs == NULL) {
tscError("%p failed to malloc buffer for SRetrieveSupport, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); tscError("0x%"PRIx64" failed to malloc buffer for SRetrieveSupport, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno));
break; break;
} }
@ -2483,7 +2486,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage)); trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
if (trs->localBuffer == NULL) { if (trs->localBuffer == NULL) {
tscError("%p failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); tscError("0x%"PRIx64" failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno));
tfree(trs); tfree(trs);
break; break;
} }
@ -2495,7 +2498,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL); SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL);
if (pNew == NULL) { if (pNew == NULL) {
tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, i, strerror(errno)); tscError("0x%"PRIx64" failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno));
tfree(trs->localBuffer); tfree(trs->localBuffer);
tfree(trs); tfree(trs);
break; break;
@ -2557,7 +2560,7 @@ static void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, i
static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES *tres, int32_t code) { static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES *tres, int32_t code) {
// set no disk space error info // set no disk space error info
tscError("sub:%p failed to flush data to disk, reason:%s", tres, tstrerror(code)); tscError("sub:0x%"PRIx64" failed to flush data to disk, reason:%s", ((SSqlObj *)tres)->self, tstrerror(code));
SSqlObj* pParentSql = trsupport->pParentSql; SSqlObj* pParentSql = trsupport->pParentSql;
pParentSql->res.code = code; pParentSql->res.code = code;
@ -2582,7 +2585,7 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32
const uint32_t nBufferSize = (1u << 16u); // 64KB const uint32_t nBufferSize = (1u << 16u); // 64KB
trsupport->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage)); trsupport->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
if (trsupport->localBuffer == NULL) { if (trsupport->localBuffer == NULL) {
tscError("%p failed to malloc buffer for local buffer, reason:%s", pSql, strerror(errno)); tscError("0x%"PRIx64" failed to malloc buffer for local buffer, reason:%s", pSql->self, strerror(errno));
tfree(trsupport); tfree(trsupport);
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
@ -2597,13 +2600,13 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32
// clear local saved number of results // clear local saved number of results
trsupport->localBuffer->num = 0; trsupport->localBuffer->num = 0;
tscError("%p sub:%p retrieve/query failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSql, pSql, tscError("0x%"PRIx64" sub:0x%"PRIx64" retrieve/query failed, code:%s, orderOfSub:%d, retry:%d", trsupport->pParentSql->self, pSql->self,
tstrerror(code), subqueryIndex, trsupport->numOfRetry); tstrerror(code), subqueryIndex, trsupport->numOfRetry);
SSqlObj *pNew = tscCreateSTableSubquery(trsupport->pParentSql, trsupport, pSql); SSqlObj *pNew = tscCreateSTableSubquery(trsupport->pParentSql, trsupport, pSql);
if (pNew == NULL) { if (pNew == NULL) {
tscError("%p sub:%p failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d", tscError("0x%"PRIx64" sub:0x%"PRIx64" failed to create new subquery due to error:%s, abort retry, vgId:%d, orderOfSub:%d",
oriTrs->pParentSql, pSql, tstrerror(terrno), pVgroup->vgId, oriTrs->subqueryIndex); oriTrs->pParentSql->self, pSql->self, tstrerror(terrno), pVgroup->vgId, oriTrs->subqueryIndex);
pParentSql->res.code = terrno; pParentSql->res.code = terrno;
oriTrs->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; oriTrs->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
@ -2657,7 +2660,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query. if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query.
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" retrieve numOfRows:%d,orderOfSub:%d", pParentSql->self, pSql->self, numOfRows, subqueryIndex); tscDebug("0x%"PRIx64" sub:0x%"PRIx64" retrieve numOfRows:%d,orderOfSub:%d", pParentSql->self, pSql->self, numOfRows, subqueryIndex);
tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%s", pParentSql, pSql, tscError("0x%"PRIx64" sub:0x%"PRIx64" abort further retrieval due to other queries failure,orderOfSub:%d,code:%s", pParentSql->self, pSql->self,
subqueryIndex, tstrerror(pParentSql->res.code)); subqueryIndex, tstrerror(pParentSql->res.code));
} else { } else {
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pParentSql->res.code == TSDB_CODE_SUCCESS) {
@ -2669,20 +2672,21 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
} }
} else { // reach the maximum retry count, abort } else { // reach the maximum retry count, abort
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, numOfRows); atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, numOfRows);
tscError("%p sub:%p retrieve failed,code:%s,orderOfSub:%d failed.no more retry,set global code:%s", pParentSql, pSql, tscError("0x%"PRIx64" sub:0x%"PRIx64" retrieve failed,code:%s,orderOfSub:%d failed.no more retry,set global code:%s", pParentSql->self, pSql->self,
tstrerror(numOfRows), subqueryIndex, tstrerror(pParentSql->res.code)); tstrerror(numOfRows), subqueryIndex, tstrerror(pParentSql->res.code));
} }
} }
if (!subAndCheckDone(pSql, pParentSql, subqueryIndex)) { if (!subAndCheckDone(pSql, pParentSql, subqueryIndex)) {
tscDebug("0x%"PRIx64" sub:%p,%d freed, not finished, total:%d", pParentSql->self, pSql, trsupport->subqueryIndex, pState->numOfSub); tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d freed, not finished, total:%d", pParentSql->self,
pSql->self, trsupport->subqueryIndex, pState->numOfSub);
tscFreeRetrieveSup(pSql); tscFreeRetrieveSup(pSql);
return; return;
} }
// all subqueries are failed // all subqueries are failed
tscError("%p retrieve from %d vnode(s) completed,code:%s.FAILED.", pParentSql, pState->numOfSub, tscError("0x%"PRIx64" retrieve from %d vnode(s) completed,code:%s.FAILED.", pParentSql->self, pState->numOfSub,
tstrerror(pParentSql->res.code)); tstrerror(pParentSql->res.code));
// release allocated resource // release allocated resource
@ -2730,8 +2734,8 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
#endif #endif
if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) { if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) {
tscError("0x%"PRIx64" sub:0x%"PRIx64" client disk space remain %.3f GB, need at least %.3f GB, stop query", tscError("0x%"PRIx64" sub:0x%"PRIx64" client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql->self, pSql->self,
pParentSql->self, pSql->self, tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace); tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace);
tscAbortFurtherRetryRetrieval(trsupport, pSql, TSDB_CODE_TSC_NO_DISKSPACE); tscAbortFurtherRetryRetrieval(trsupport, pSql, TSDB_CODE_TSC_NO_DISKSPACE);
return; return;
} }
@ -2819,7 +2823,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
} }
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(numOfRows), trsupport->numOfRetry); tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(numOfRows), trsupport->numOfRetry);
int32_t sent = 0; int32_t sent = 0;
@ -2847,8 +2851,8 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
pParentSql->self, pSql, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx); pParentSql->self, pSql, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) { if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
tscError("%p sub:%p num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64, tscError("0x%"PRIx64" sub:0x%"PRIx64" num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
pParentSql, pSql, tsMaxNumOfOrderedResults, num); pParentSql->self, pSql->self, tsMaxNumOfOrderedResults, num);
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY); tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);
return; return;
} }
@ -2863,7 +2867,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
// no disk space for tmp directory // no disk space for tmp directory
if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) { if (tsTotalTmpDirGB != 0 && tsAvailTmpDirectorySpace < tsReservedTmpDirectorySpace) {
tscError("%p sub:%p client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql, pSql, tscError("0x%"PRIx64" sub:0x%"PRIx64" client disk space remain %.3f GB, need at least %.3f GB, stop query", pParentSql->self, pSql->self,
tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace); tsAvailTmpDirectorySpace, tsReservedTmpDirectorySpace);
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_NO_DISKSPACE); tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_NO_DISKSPACE);
return; return;
@ -2933,8 +2937,8 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
// stable query killed or other subquery failed, all query stopped // stable query killed or other subquery failed, all query stopped
if (pParentSql->res.code != TSDB_CODE_SUCCESS) { if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
tscError("%p query cancelled or failed, sub:%p, vgId:%d, orderOfSub:%d, code:%s, global code:%s", tscError("0x%"PRIx64" query cancelled or failed, sub:0x%"PRIx64", vgId:%d, orderOfSub:%d, code:%s, global code:%s",
pParentSql, pSql, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(code), tstrerror(pParentSql->res.code)); pParentSql->self, pSql->self, pVgroup->vgId, trsupport->subqueryIndex, tstrerror(code), tstrerror(pParentSql->res.code));
tscHandleSubqueryError(param, tres, code); tscHandleSubqueryError(param, tres, code);
return; return;
@ -2951,7 +2955,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
assert(code == taos_errno(pSql)); assert(code == taos_errno(pSql));
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY) {
tscError("%p sub:%p failed code:%s, retry:%d", pParentSql, pSql, tstrerror(code), trsupport->numOfRetry); tscError("0x%"PRIx64" sub:0x%"PRIx64" failed code:%s, retry:%d", pParentSql->self, pSql->self, tstrerror(code), trsupport->numOfRetry);
int32_t sent = 0; int32_t sent = 0;
@ -2960,7 +2964,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
return; return;
} }
} else { } else {
tscError("%p sub:%p reach the max retry times, set global code:%s", pParentSql, pSql, tstrerror(code)); tscError("0x%"PRIx64" sub:0x%"PRIx64" reach the max retry times, set global code:%s", pParentSql->self, pSql->self, tstrerror(code));
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code); // set global code and abort atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, code); // set global code and abort
} }
@ -2980,7 +2984,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
static bool needRetryInsert(SSqlObj* pParentObj, int32_t numOfSub) { static bool needRetryInsert(SSqlObj* pParentObj, int32_t numOfSub) {
if (pParentObj->retry > pParentObj->maxRetry) { if (pParentObj->retry > pParentObj->maxRetry) {
tscError("%p max retry reached, abort the retry effort", pParentObj); tscError("0x%"PRIx64" max retry reached, abort the retry effort", pParentObj->self);
return false; return false;
} }
@ -3072,7 +3076,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
} }
} }
tscError("%p Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj, tscError("0x%"PRIx64" Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj->self,
pParentObj->res.numOfRows, numOfFailed, numOfSub); pParentObj->res.numOfRows, numOfFailed, numOfSub);
tscDebug("0x%"PRIx64" cleanup %d tableMeta in hashTable", pParentObj->self, pParentObj->cmd.numOfTables); tscDebug("0x%"PRIx64" cleanup %d tableMeta in hashTable", pParentObj->self, pParentObj->cmd.numOfTables);
@ -3141,7 +3145,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
pSup->pSql = pSql; pSup->pSql = pSql;
pSub->param = pSup; pSub->param = pSup;
tscDebug("0x%"PRIx64" sub:%p launch sub insert, orderOfSub:%d", pSql->self, pSub, i); tscDebug("0x%"PRIx64" sub:0x%"PRIx64" launch sub insert, orderOfSub:%d", pSql->self, pSub->self, i);
if (pSub->res.code != TSDB_CODE_SUCCESS) { if (pSub->res.code != TSDB_CODE_SUCCESS) {
tscHandleInsertRetry(pSql, pSub); tscHandleInsertRetry(pSql, pSub);
} }
@ -3189,7 +3193,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
SSqlObj *pNew = createSimpleSubObj(pSql, multiVnodeInsertFinalize, pSupporter, TSDB_SQL_INSERT); SSqlObj *pNew = createSimpleSubObj(pSql, multiVnodeInsertFinalize, pSupporter, TSDB_SQL_INSERT);
if (pNew == NULL) { if (pNew == NULL) {
tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, numOfSub, strerror(errno)); tscError("0x%"PRIx64" failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql->self, numOfSub, strerror(errno));
goto _error; goto _error;
} }
@ -3213,7 +3217,7 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
} }
if (numOfSub < pSql->subState.numOfSub) { if (numOfSub < pSql->subState.numOfSub) {
tscError("%p failed to prepare subObj structure and launch sub-insertion", pSql); tscError("0x%"PRIx64" failed to prepare subObj structure and launch sub-insertion", pSql->self);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY; pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error; goto _error;
} }

View File

@ -1262,7 +1262,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList); INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
if (ret != TSDB_CODE_SUCCESS) { if (ret != TSDB_CODE_SUCCESS) {
tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret); tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pSql->self, ret);
taosHashCleanup(pVnodeDataBlockHashList); taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList); tscDestroyBlockArrayList(pVnodeDataBlockList);
return ret; return ret;
@ -1281,7 +1281,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
dataBuf->pData = tmp; dataBuf->pData = tmp;
memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size); memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size);
} else { // failed to allocate memory, free already allocated memory and return error code } else { // failed to allocate memory, free already allocated memory and return error code
tscError("%p failed to allocate memory for merging submit block, size:%d", pSql, dataBuf->nAllocSize); tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pSql->self, dataBuf->nAllocSize);
taosHashCleanup(pVnodeDataBlockHashList); taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList); tscDestroyBlockArrayList(pVnodeDataBlockList);
@ -2489,7 +2489,7 @@ void registerSqlObj(SSqlObj* pSql) {
SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd) { SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, int32_t cmd) {
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj)); SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
if (pNew == NULL) { if (pNew == NULL) {
tscError("%p new subquery failed, tableIndex:%d", pSql, 0); tscError("0x%"PRIx64" new subquery failed, tableIndex:%d", pSql->self, 0);
return NULL; return NULL;
} }
@ -2503,7 +2503,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData); int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tscError("%p new subquery failed, unable to malloc tag data, tableIndex:%d", pSql, 0); tscError("0x%"PRIx64" new subquery failed, unable to malloc tag data, tableIndex:%d", pSql->self, 0);
free(pNew); free(pNew);
return NULL; return NULL;
} }
@ -2579,7 +2579,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj)); SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
if (pNew == NULL) { if (pNew == NULL) {
tscError("%p new subquery failed, tableIndex:%d", pSql, tableIndex); tscError("0x%"PRIx64" new subquery failed, tableIndex:%d", pSql->self, tableIndex);
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
return NULL; return NULL;
} }
@ -2670,7 +2670,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
} }
if (tscAllocPayload(pnCmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) { if (tscAllocPayload(pnCmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) {
tscError("%p new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql, tableIndex, pTableMetaInfo->vgroupIndex); tscError("0x%"PRIx64" new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql->self, tableIndex, pTableMetaInfo->vgroupIndex);
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error; goto _error;
} }
@ -2721,7 +2721,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
// this case cannot be happened // this case cannot be happened
if (pFinalInfo->pTableMeta == NULL) { if (pFinalInfo->pTableMeta == NULL) {
tscError("%p new subquery failed since no tableMeta, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name)); tscError("0x%"PRIx64" new subquery failed since no tableMeta, name:%s", pSql->self, tNameGetTableName(&pTableMetaInfo->name));
if (pPrevSql != NULL) { // pass the previous error to client if (pPrevSql != NULL) { // pass the previous error to client
assert(pPrevSql->res.code != TSDB_CODE_SUCCESS); assert(pPrevSql->res.code != TSDB_CODE_SUCCESS);

View File

@ -139,7 +139,7 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int8_t tsEnableBalance = 1; int8_t tsEnableBalance = 1;
int8_t tsAlternativeRole = 0; int8_t tsAlternativeRole = 0;
int32_t tsBalanceInterval = 300; // seconds int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400 * 100; // seconds 100 days int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days
int32_t tsMnodeEqualVnodeNum = 4; int32_t tsMnodeEqualVnodeNum = 4;
int8_t tsEnableFlowCtrl = 1; int8_t tsEnableFlowCtrl = 1;
int8_t tsEnableSlaveQuery = 1; int8_t tsEnableSlaveQuery = 1;

View File

@ -81,7 +81,7 @@ enum QUERY_MODE {
#define MAX_DB_NAME_SIZE 64 #define MAX_DB_NAME_SIZE 64
#define MAX_HOSTNAME_SIZE 64 #define MAX_HOSTNAME_SIZE 64
#define MAX_TB_NAME_SIZE 64 #define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE 16000 #define MAX_DATA_SIZE (16*1024)
#define MAX_NUM_DATATYPE 10 #define MAX_NUM_DATATYPE 10
#define OPT_ABORT 1 /* abort */ #define OPT_ABORT 1 /* abort */
#define STRING_LEN 60000 #define STRING_LEN 60000
@ -252,8 +252,8 @@ typedef struct SSuperTable_S {
int maxSqlLen; // int maxSqlLen; //
int insertInterval; // insert interval, will override global insert interval int insertInterval; // insert interval, will override global insert interval
int64_t insertRows; // 0: no limit int64_t insertRows;
int timeStampStep; int64_t timeStampStep;
char startTimestamp[MAX_TB_NAME_SIZE]; char startTimestamp[MAX_TB_NAME_SIZE];
char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json
char sampleFile[MAX_FILE_NAME_LEN+1]; char sampleFile[MAX_FILE_NAME_LEN+1];
@ -1369,7 +1369,7 @@ static int printfInsertMeta() {
g_Dbs.db[i].superTbls[j].disorderRatio); g_Dbs.db[i].superTbls[j].disorderRatio);
printf(" maxSqlLen: \033[33m%d\033[0m\n", printf(" maxSqlLen: \033[33m%d\033[0m\n",
g_Dbs.db[i].superTbls[j].maxSqlLen); g_Dbs.db[i].superTbls[j].maxSqlLen);
printf(" timeStampStep: \033[33m%d\033[0m\n", printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n",
g_Dbs.db[i].superTbls[j].timeStampStep); g_Dbs.db[i].superTbls[j].timeStampStep);
printf(" startTimestamp: \033[33m%s\033[0m\n", printf(" startTimestamp: \033[33m%s\033[0m\n",
g_Dbs.db[i].superTbls[j].startTimestamp); g_Dbs.db[i].superTbls[j].startTimestamp);
@ -1541,7 +1541,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio); fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio);
fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen); fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen);
fprintf(fp, " timeStampStep: %d\n", g_Dbs.db[i].superTbls[j].timeStampStep); fprintf(fp, " timeStampStep: %"PRId64"\n", g_Dbs.db[i].superTbls[j].timeStampStep);
fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp); fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp);
fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat); fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat);
fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile); fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile);
@ -3657,7 +3657,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER; goto PARSE_OVER;
} }
/*
cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num");
if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint; g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint;
@ -3667,7 +3666,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
printf("ERROR: failed to read json, batch_create_tbl_num not found\n"); printf("ERROR: failed to read json, batch_create_tbl_num not found\n");
goto PARSE_OVER; goto PARSE_OVER;
} }
*/
cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no
if (childTblExists if (childTblExists
@ -4648,7 +4646,7 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq)
static int generateDataTail( static int generateDataTail(
SSuperTable* superTblInfo, SSuperTable* superTblInfo,
int batch, char* buffer, int remainderBufLen, int64_t insertRows, int batch, char* buffer, int remainderBufLen, int64_t insertRows,
int64_t startFrom, uint64_t startTime, int *pSamplePos, int *dataLen) { int64_t startFrom, int64_t startTime, int *pSamplePos, int *dataLen) {
int len = 0; int len = 0;
int ncols_per_record = 1; // count first col ts int ncols_per_record = 1; // count first col ts
@ -4868,6 +4866,8 @@ static int generateInterlaceDataBuffer(
pstr += dataLen; pstr += dataLen;
*pRemainderBufLen -= dataLen; *pRemainderBufLen -= dataLen;
} else { } else {
debugPrint("%s() LN%d, generated data tail: %d, not equal batch per table: %d\n",
__func__, __LINE__, k, batchPerTbl);
pstr -= headLen; pstr -= headLen;
pstr[0] = '\0'; pstr[0] = '\0';
k = 0; k = 0;
@ -4925,10 +4925,24 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
debugPrint("[%d] %s() LN%d: ### interlace write\n", debugPrint("[%d] %s() LN%d: ### interlace write\n",
pThreadInfo->threadID, __func__, __LINE__); pThreadInfo->threadID, __func__, __LINE__);
int64_t insertRows;
int interlaceRows;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo; SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; if (superTblInfo) {
int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; insertRows = superTblInfo->insertRows;
if ((superTblInfo->interlaceRows == 0)
&& (g_args.interlace_rows > 0)) {
interlaceRows = g_args.interlace_rows;
} else {
interlaceRows = superTblInfo->interlaceRows;
}
} else {
insertRows = g_args.num_of_DPT;
interlaceRows = g_args.interlace_rows;
}
if (interlaceRows > insertRows) if (interlaceRows > insertRows)
interlaceRows = insertRows; interlaceRows = insertRows;
@ -4960,7 +4974,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->totalInsertRows = 0; pThreadInfo->totalInsertRows = 0;
pThreadInfo->totalAffectedRows = 0; pThreadInfo->totalAffectedRows = 0;
int nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; int64_t nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
int insert_interval = int insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
@ -5063,15 +5077,15 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
if (generatedRecPerTbl >= insertRows) if (generatedRecPerTbl >= insertRows)
break; break;
int remainRows = insertRows - generatedRecPerTbl;
if ((remainRows > 0) && (batchPerTbl > remainRows))
batchPerTbl = remainRows;
if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR) if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR)
break; break;
} }
} }
int remainRows = insertRows - generatedRecPerTbl;
if ((remainRows > 0) && (batchPerTbl > remainRows))
batchPerTbl = remainRows;
verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%d insertRows=%"PRId64"\n", verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%d insertRows=%"PRId64"\n",
pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->threadID, __func__, __LINE__,
generatedRecPerTbl, insertRows); generatedRecPerTbl, insertRows);
@ -5169,7 +5183,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
int64_t startTs = taosGetTimestampMs(); int64_t startTs = taosGetTimestampMs();
int64_t endTs; int64_t endTs;
int timeStampStep = int64_t timeStampStep =
superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
/* int insert_interval = /* int insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
@ -5294,7 +5308,18 @@ static void* syncWrite(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg; threadInfo *pThreadInfo = (threadInfo *)sarg;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo; SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; int interlaceRows;
if (superTblInfo) {
if ((superTblInfo->interlaceRows == 0)
&& (g_args.interlace_rows > 0)) {
interlaceRows = g_args.interlace_rows;
} else {
interlaceRows = superTblInfo->interlaceRows;
}
} else {
interlaceRows = g_args.interlace_rows;
}
if (interlaceRows > 0) { if (interlaceRows > 0) {
// interlace mode // interlace mode
@ -5993,9 +6018,9 @@ static void *specifiedTableQuery(void *sarg) {
pThreadInfo->threadID, pThreadInfo->threadID,
totalQueried, totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0))); (double)(totalQueried/((endTs-startTs)/1000.0)));
}
lastPrintTime = currentPrintTime; lastPrintTime = currentPrintTime;
} }
}
return NULL; return NULL;
} }
@ -6079,10 +6104,10 @@ static void *superTableQuery(void *sarg) {
pThreadInfo->threadID, pThreadInfo->threadID,
totalQueried, totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0))); (double)(totalQueried/((endTs-startTs)/1000.0)));
}
lastPrintTime = currentPrintTime; lastPrintTime = currentPrintTime;
} }
} }
}
et = taosGetTimestampMs(); et = taosGetTimestampMs();
printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n",
taosGetSelfPthreadId(), taosGetSelfPthreadId(),
@ -6424,7 +6449,7 @@ static void *specifiedSubscribe(void *sarg) {
} }
tsub[i] = subscribeImpl(pThreadInfo->taos, tsub[i] = subscribeImpl(pThreadInfo->taos,
g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile); g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile);
if (NULL == g_queryInfo.specifiedQueryInfo.tsub[i]) { if (NULL == tsub[i]) {
taos_close(pThreadInfo->taos); taos_close(pThreadInfo->taos);
return NULL; return NULL;
} }

View File

@ -31,6 +31,7 @@ enum _TSDB_DB_STATUS {
int32_t mnodeInitDbs(); int32_t mnodeInitDbs();
void mnodeCleanupDbs(); void mnodeCleanupDbs();
int64_t mnodeGetDbNum(); int64_t mnodeGetDbNum();
int32_t mnodeGetDbMaxReplica();
SDbObj *mnodeGetDb(char *db); SDbObj *mnodeGetDb(char *db);
SDbObj *mnodeGetDbByTableName(char *db); SDbObj *mnodeGetDbByTableName(char *db);
void * mnodeGetNextDb(void *pIter, SDbObj **pDb); void * mnodeGetNextDb(void *pIter, SDbObj **pDb);

View File

@ -74,6 +74,24 @@ int64_t mnodeGetDbNum() {
return sdbGetNumOfRows(tsDbSdb); return sdbGetNumOfRows(tsDbSdb);
} }
int32_t mnodeGetDbMaxReplica() {
int32_t maxReplica = 0;
SDbObj *pDb = NULL;
void *pIter = NULL;
while (1) {
pIter = mnodeGetNextDb(pIter, &pDb);
if (pDb == NULL) break;
if (pDb->cfg.replications > maxReplica)
maxReplica = pDb->cfg.replications;
mnodeDecDbRef(pDb);
}
return maxReplica;
}
static int32_t mnodeDbActionInsert(SSdbRow *pRow) { static int32_t mnodeDbActionInsert(SSdbRow *pRow) {
SDbObj *pDb = pRow->pObj; SDbObj *pDb = pRow->pObj;
SAcctObj *pAcct = mnodeGetAcct(pDb->acct); SAcctObj *pAcct = mnodeGetAcct(pDb->acct);

View File

@ -29,6 +29,7 @@
#include "mnodeDef.h" #include "mnodeDef.h"
#include "mnodeInt.h" #include "mnodeInt.h"
#include "mnodeDnode.h" #include "mnodeDnode.h"
#include "mnodeDb.h"
#include "mnodeMnode.h" #include "mnodeMnode.h"
#include "mnodeSdb.h" #include "mnodeSdb.h"
#include "mnodeShow.h" #include "mnodeShow.h"
@ -745,6 +746,14 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) {
return TSDB_CODE_MND_NO_REMOVE_MASTER; return TSDB_CODE_MND_NO_REMOVE_MASTER;
} }
int32_t maxReplica = mnodeGetDbMaxReplica();
int32_t dnodesNum = mnodeGetDnodesNum();
if (dnodesNum <= maxReplica) {
mError("dnode:%d, can't drop dnode:%s, #dnodes: %d, replia: %d", pDnode->dnodeId, ep, dnodesNum, maxReplica);
mnodeDecDnodeRef(pDnode);
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
}
mInfo("dnode:%d, start to drop it", pDnode->dnodeId); mInfo("dnode:%d, start to drop it", pDnode->dnodeId);
int32_t code = bnDropDnode(pDnode); int32_t code = bnDropDnode(pDnode);

View File

@ -347,9 +347,11 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
vDebug("vgId:%d, msg:%p, write into vwqueue after flowctrl, retry:%d", pVnode->vgId, pWrite, vDebug("vgId:%d, msg:%p, write into vwqueue after flowctrl, retry:%d", pVnode->vgId, pWrite,
pWrite->processedCount); pWrite->processedCount);
pWrite->processedCount = 0; pWrite->processedCount = 0;
void *handle = pWrite->rpcMsg.handle;
code = vnodeWriteToWQueueImp(pWrite); code = vnodeWriteToWQueueImp(pWrite);
if (code != 0) { if (code != TSDB_CODE_SUCCESS) {
dnodeSendRpcVWriteRsp(pWrite->pVnode, pWrite, code); SRpcMsg rpcRsp = {.handle = handle, .code = code};
rpcSendResponse(&rpcRsp);
} }
} }
} }

View File

@ -0,0 +1,147 @@
#!/bin/bash
WORK_DIR=/home/ubuntu/pxiao
TDENGINE_DIR=/home/ubuntu/pxiao/TDengine
NUM_OF_VERSIONS=5
CURRENT_VERSION=0
today=`date +"%Y%m%d"`
TAOSDEMO_COMPARE_TEST_REPORT=$TDENGINE_DIR/tests/taosdemo-compare-test-report-$today.log
# Coloured Echoes
function red_echo { echo -e "\033[31m$@\033[0m"; }
function green_echo { echo -e "\033[32m$@\033[0m"; }
function yellow_echo { echo -e "\033[33m$@\033[0m"; }
function white_echo { echo -e "\033[1;37m$@\033[0m"; }
# Coloured Printfs
function red_printf { printf "\033[31m$@\033[0m"; }
function green_printf { printf "\033[32m$@\033[0m"; }
function yellow_printf { printf "\033[33m$@\033[0m"; }
function white_printf { printf "\033[1;37m$@\033[0m"; }
# Debugging Outputs
function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; }
function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; }
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; }
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; }
function getCurrentVersion {
echoInfo "Build TDengine"
cd $WORK_DIR/TDengine
git remote update > /dev/null
git reset --hard HEAD
git checkout master
REMOTE_COMMIT=`git rev-parse --short remotes/origin/master`
LOCAL_COMMIT=`git rev-parse --short @`
echo " LOCAL: $LOCAL_COMMIT"
echo "REMOTE: $REMOTE_COMMIT"
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
echo "repo up-to-date"
else
echo "repo need to pull"
git pull > /dev/null 2>&1
fi
cd debug
rm -rf *
cmake .. > /dev/null 2>&1
make > /dev/null 2>&1
make install > /dev/null 2>&1
rm -rf $WORK_DIR/taosdemo
cp -r $TDENGINE_DIR/src/kit/taosdemo $WORK_DIR
CURRENT_VERSION=`taosd -V | grep version | awk '{print $3}' | awk -F. '{print $3}'`
}
function buildTDengineByVersion() {
echoInfo "build TDengine on branch: $1"
git reset --hard HEAD
git checkout $1
git pull > /dev/null
rm -rf $TDENGINE_DIR/src/kit/taosdemo
cp -r $WORK_DIR/taosdemo $TDENGINE_DIR/src/kit
cd $TDENGINE_DIR/debug
rm -rf *
cmake .. > /dev/null 2>&1
make > /dev/null 2>&1
make install > /dev/null 2>&1
}
function stopTaosd {
echo "Stop taosd"
systemctl stop taosd
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
function startTaosd {
echo "Start taosd"
rm -rf /var/lib/perf/*
rm -rf /var/log/perf/*
nohup taosd -c /etc/perf/ > /dev/null 2>&1 &
sleep 10
}
function runTaosdemoCompare {
echoInfo "Stop Taosd"
stopTaosd
getCurrentVersion
release="master"
[ -f $TAOSDEMO_COMPARE_TEST_REPORT ] && rm $TAOSDEMO_COMPARE_TEST_REPORT
for((i=0;i<$NUM_OF_VERSIONS;i++))
do
startTaosd
taos -s "drop database if exists demodb;"
taosdemo -y -d demodb > taosdemoperf.txt
echo "==================== taosdemo performance for $release ====================" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT
CREATE_TABLE_TIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'`
INSERT_RECORDS_TIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'`
RECORDS_PER_SECOND=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'`
AVG_DELAY=`grep 'delay' taosdemoperf.txt | awk '{print $4}' | awk -Fm '{print $1}'`
MAX_DELAY=`grep 'delay' taosdemoperf.txt | awk '{print $6}' | awk -Fm '{print $1}'`
MIN_DELAY=`grep 'delay' taosdemoperf.txt | awk '{print $8}' | awk -Fm '{print $1}'`
echo "create table time: $CREATE_TABLE_TIME seconds" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT
echo "insert records time: $INSERT_RECORDS_TIME seconds" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT
echo "records per second: $RECORDS_PER_SECOND records/second" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT
echo "avg delay: $AVG_DELAY ms" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT
echo "max delay: $MAX_DELAY ms" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT
echo "min delay: $MIN_DELAY ms" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT
[ -f taosdemoperf.txt ] && rm taosdemoperf.txt
stopTaosd
version=`expr $CURRENT_VERSION - $i`
release="release/s1$version"
buildTDengineByVersion $release
done
}
function sendReport {
echo "send report"
receiver="develop@taosdata.com"
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
cd $TDENGINE_DIR
sed -i 's/\x1b\[[0-9;]*m//g' $TAOSDEMO_COMPARE_TEST_REPORT
BODY_CONTENT=`cat $TAOSDEMO_COMPARE_TEST_REPORT`
echo -e "to: ${receiver}\nsubject: taosdemo performance compare test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
(cat - && uuencode $TAOSDEMO_COMPARE_TEST_REPORT taosdemo-compare-test-report-$today.log) | \
ssmtp "${receiver}" && echo "Report Sent!"
}
runTaosdemoCompare
sendReport
echoInfo "End of Taosdemo Compare Test" | tee -a $WORK_DIR/cron.log

View File

@ -0,0 +1,55 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.getcwd())
from util.log import *
from util.sql import *
from util.dnodes import *
import taos
import threading
class TwoClients:
def initConnection(self):
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
self.config = "/home/chr/taosdata/TDengine/sim/dnode1/cfg "
def newCloseCon(times):
newConList = []
for times in range(0,times) :
newConList.append(taos.connect(self.host, self.user, self.password, self.config))
for times in range(0,times) :
newConList[times].close()
def run(self):
tdDnodes.init("")
tdDnodes.setTestCluster(False)
tdDnodes.setValgrind(False)
tdDnodes.stopAll()
tdDnodes.deploy(1)
tdDnodes.start(1)
# multiple new and cloes connection
for m in range(1,101) :
t= threading.Thread(target=newCloseCon,args=(10,))
t.start()
clients = TwoClients()
clients.initConnection()
clients.run()

View File

@ -0,0 +1,9 @@
execute:
cd TDengine/tests/pytest && python3 ./test.py -f cluster/TD-3693/multClient.py && python3 cluster/TD-3693/multQuery.py
1. 使用测试的集群三个节点fc1、fct2、fct4。
2. 用taosdemo建两个库db1和db2副本数目为1插入一定数据。
3. db1在mnode的master上fct2db2在mnode的slave上fct4
4. 珲哥修改taosdemo变成多线程查询修改后的软件我命名成taosdemoMul然后做持续多线程查询db2上的数据建立多个连接
5. 4中查询过程放到后台同时再次在db2执行建表、插入查询操作。循环执行查询10次每次间隔91s。
6. 然后查询taosd的log日志看是否还存在上述问题“send auth msg to mnodes”。

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "192.168.1.104",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db1",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 3650,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 20,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "192.168.1.104",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db2",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 3650,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 20,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,74 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.rowNum = 100000
self.ts = 1537146000000
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert data to cluster'db
os.system("%staosdemo -f cluster/TD-3693/insert1Data.json -y " % binPath)
# multiple new and cloes connection with query data
os.system("%staosdemo -f cluster/TD-3693/insert2Data.json -y " % binPath)
os.system("nohup %staosdemoMul -f cluster/TD-3693/queryCount.json -y & " % binPath)
# delete useless files
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf ./querySystemInfo*")
os.system("rm -rf cluster/TD-3693/multClient.py.sql")
os.system("rm -rf ./querySystemInfo*")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,72 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.getcwd())
from util.log import *
from util.sql import *
from util.dnodes import *
import taos
import threading
class TwoClients:
def initConnection(self):
self.host = "fct4"
self.user = "root"
self.password = "taosdata"
self.config = "/etc/taos/"
self.rowNum = 10
self.ts = 1537146000000
def run(self):
# query data from cluster'db
conn = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config)
cur = conn.cursor()
tdSql.init(cur, True)
tdSql.execute("use db2")
cur.execute("select count (tbname) from stb0")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 10)
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 20)
tdSql.query("select count(*) from stb00_0")
tdSql.checkData(0, 0, 10000)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 100000)
tdSql.query("select count(*) from stb01_0")
tdSql.checkData(0, 0, 20000)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 400000)
tdSql.execute("drop table if exists squerytest")
tdSql.execute("drop table if exists querytest")
tdSql.execute('''create stable squerytest(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
tdSql.execute("create table querytest using squerytest tags('beijing')")
tdSql.execute("insert into querytest(ts) values(%d)" % (self.ts - 1))
for i in range(self.rowNum):
tdSql.execute("insert into querytest values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" % (self.ts + i, i + 1, 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
for j in range(10):
tdSql.execute("use db2")
tdSql.query("select count(*),last(*) from querytest group by col1")
tdSql.checkRows(10)
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 2, 2)
tdSql.checkData(1, 3, 1)
sleep(88)
tdSql.execute("drop table if exists squerytest")
tdSql.execute("drop table if exists querytest")
clients = TwoClients()
clients.initConnection()
clients.run()

View File

@ -0,0 +1,15 @@
{
"filetype":"query",
"cfgdir": "/etc/taos",
"host": "192.168.1.104",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "db2",
"query_times": 1000000,
"specified_table_query":
{"query_interval":1, "concurrent":100,
"sqls": [{"sql": "select count(*) from db.stb0", "result": ""}]
}
}

View File

@ -151,6 +151,9 @@ python3 test.py -f tools/taosdemoTestTblAlt.py
python3 test.py -f tools/taosdemoTestSampleData.py python3 test.py -f tools/taosdemoTestSampleData.py
python3 test.py -f tools/taosdemoTestInterlace.py python3 test.py -f tools/taosdemoTestInterlace.py
python3 test.py -f tools/taosdemoTestQuery.py python3 test.py -f tools/taosdemoTestQuery.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
# update # update
python3 ./test.py -f update/allow_update.py python3 ./test.py -f update/allow_update.py
@ -221,6 +224,7 @@ python3 ./test.py -f query/queryJoin10tables.py
python3 ./test.py -f query/queryStddevWithGroupby.py python3 ./test.py -f query/queryStddevWithGroupby.py
python3 ./test.py -f query/querySecondtscolumnTowherenow.py python3 ./test.py -f query/querySecondtscolumnTowherenow.py
python3 ./test.py -f query/queryFilterTswithDateUnit.py python3 ./test.py -f query/queryFilterTswithDateUnit.py
python3 ./test.py -f query/queryTscomputWithNow.py
@ -235,6 +239,8 @@ python3 ./test.py -f stream/history.py
python3 ./test.py -f stream/sys.py python3 ./test.py -f stream/sys.py
python3 ./test.py -f stream/table_1.py python3 ./test.py -f stream/table_1.py
python3 ./test.py -f stream/table_n.py python3 ./test.py -f stream/table_n.py
python3 ./test.py -f stream/showStreamExecTimeisNull.py
python3 ./test.py -f stream/cqSupportBefore1970.py
#alter table #alter table
python3 ./test.py -f alter/alter_table_crash.py python3 ./test.py -f alter/alter_table_crash.py
@ -280,6 +286,7 @@ python3 ./test.py -f functions/all_null_value.py
python3 ./test.py -f functions/function_avg.py -r 1 python3 ./test.py -f functions/function_avg.py -r 1
python3 ./test.py -f functions/function_bottom.py -r 1 python3 ./test.py -f functions/function_bottom.py -r 1
python3 ./test.py -f functions/function_count.py -r 1 python3 ./test.py -f functions/function_count.py -r 1
python3 ./test.py -f functions/function_count_last_stab.py
python3 ./test.py -f functions/function_diff.py -r 1 python3 ./test.py -f functions/function_diff.py -r 1
python3 ./test.py -f functions/function_first.py -r 1 python3 ./test.py -f functions/function_first.py -r 1
python3 ./test.py -f functions/function_last.py -r 1 python3 ./test.py -f functions/function_last.py -r 1

View File

@ -0,0 +1,70 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.rowNum = 10
self.ts = 1537146000000
def run(self):
tdSql.prepare()
tdSql.execute('''create stable stest(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
tdSql.execute("create table test1 using stest tags('beijing')")
tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1))
# last verifacation
for i in range(self.rowNum):
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
tdSql.query("select count(*),last(*) from stest group by col1")
tdSql.checkRows(10)
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 2, 2)
tdSql.checkData(1, 3, 1)
tdSql.query("select count(*),last(*) from stest group by col2")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 10)
tdSql.checkData(0, 2, 10)
tdSql.checkData(0, 3, 1)
tdSql.query("select count(*),last(ts,stest.*) from stest group by col1")
tdSql.checkRows(10)
tdSql.checkData(0, 0, 1)
tdSql.checkData(0, 2, "2018-09-17 09:00:00")
tdSql.checkData(1, 4, 1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -82,14 +82,14 @@ class TDTestCase:
self.ts = self.ts + self.rowNum + 10 self.ts = self.ts + self.rowNum + 10
tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, NULL, '涛思数据3', 1, 1, 1, 1)" % ( self.ts + self.rowNum + 1 )) tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, NULL, '涛思数据3', 254, 65534, 4294967294, 18446744073709551614)" % ( self.ts + self.rowNum + 1 ))
tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, 'taosdata', NULL, 1, 1, 1, 1)" % ( self.ts + self.rowNum + 2 )) tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, 'taosdata', NULL, 1, 1, 1, 1)" % ( self.ts + self.rowNum + 2 ))
tdSql.execute("insert into test1 values(%d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)" % ( self.ts + self.rowNum + 3 )) tdSql.execute("insert into test1 values(%d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)" % ( self.ts + self.rowNum + 3 ))
tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, NULL, 1.1, 1, NULL, '涛思数据3', 1, 1, 1, 1)" % ( self.ts + self.rowNum + 4 )) tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, NULL, 1.1, 1, NULL, '涛思数据3', 1, 1, 1, 1)" % ( self.ts + self.rowNum + 4 ))
tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, NULL, 1, 'taosdata', NULL, 1, 1, 1, 1)" % ( self.ts + self.rowNum + 5 )) tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, NULL, 1, 'taosdata', NULL, 1, 1, 1, 1)" % ( self.ts + self.rowNum + 5 ))
self.rowNum = self.rowNum + 5 self.rowNum = self.rowNum + 5
col_list = [ 'col1' , 'col2' , 'col3' , 'col4' , 'col5' , 'col6' , 'col7' , 'col8' , 'col9' , 'col11' , 'col12' , 'col13' , 'col14' , '1' , '1.1' , 'NULL' ] col_list = [ 'col1' , 'col2' , 'col3' , 'col4' , 'col5' , 'col6' , 'col7' , 'col8' , 'col9' , 'col11' , 'col12' , 'col13' , 'col14' , '1' , '1.1' , 'NULL' , '18446744073709551614' ]
op_list = [ '+' , '-' , '*' , '/' , '%' ] op_list = [ '+' , '-' , '*' , '/' , '%' ]
err_list = [ 'col7' , 'col8' , 'col9' , 'NULL' ] err_list = [ 'col7' , 'col8' , 'col9' , 'NULL' ]
order_lsit = [ ' order by ts ', ' order by ts desc ', ' order by ts asc '] order_lsit = [ ' order by ts ', ' order by ts desc ', ' order by ts asc ']

View File

@ -47,53 +47,53 @@ class TDTestCase:
for col in cols: for col in cols:
tdSql.error(f" select * from tts1 where {col} = 1d ") tdSql.error(f" select * from tts1 where {col} = 1d ")
tdSql.error(f" select * from tts1 where {col} < 1d ") tdSql.error(f" select * from tts1 where {col} < -1d ")
tdSql.error(f" select * from tts1 where {col} > 1d ") tdSql.error(f" select * from tts1 where {col} > 1d ")
tdSql.error(f" select * from tts1 where {col} >= 1d ") tdSql.error(f" select * from tts1 where {col} >= -1d ")
tdSql.error(f" select * from tts1 where {col} <= 1d ") tdSql.error(f" select * from tts1 where {col} <= 1d ")
tdSql.error(f" select * from tts1 where {col} <> 1d ") tdSql.error(f" select * from tts1 where {col} <> 1d ")
tdSql.error(f" select * from tts1 where {col} = 1m ") tdSql.error(f" select * from tts1 where {col} = -1m ")
tdSql.error(f" select * from tts1 where {col} < 1m ") tdSql.error(f" select * from tts1 where {col} < 1m ")
tdSql.error(f" select * from tts1 where {col} > 1m ") tdSql.error(f" select * from tts1 where {col} > 1m ")
tdSql.error(f" select * from tts1 where {col} >= 1m ") tdSql.error(f" select * from tts1 where {col} >= -1m ")
tdSql.error(f" select * from tts1 where {col} <= 1m ") tdSql.error(f" select * from tts1 where {col} <= 1m ")
tdSql.error(f" select * from tts1 where {col} <> 1m ") tdSql.error(f" select * from tts1 where {col} <> 1m ")
tdSql.error(f" select * from tts1 where {col} = 1s ") tdSql.error(f" select * from tts1 where {col} = -1s ")
tdSql.error(f" select * from tts1 where {col} < 1s ") tdSql.error(f" select * from tts1 where {col} < 1s ")
tdSql.error(f" select * from tts1 where {col} > 1s ") tdSql.error(f" select * from tts1 where {col} > 1s ")
tdSql.error(f" select * from tts1 where {col} >= 1s ") tdSql.error(f" select * from tts1 where {col} >= -1s ")
tdSql.error(f" select * from tts1 where {col} <= 1s ") tdSql.error(f" select * from tts1 where {col} <= 1s ")
tdSql.error(f" select * from tts1 where {col} <> 1s ") tdSql.error(f" select * from tts1 where {col} <> 1s ")
tdSql.error(f" select * from tts1 where {col} = 1a ") tdSql.error(f" select * from tts1 where {col} = -1a ")
tdSql.error(f" select * from tts1 where {col} < 1a ") tdSql.error(f" select * from tts1 where {col} < 1a ")
tdSql.error(f" select * from tts1 where {col} > 1a ") tdSql.error(f" select * from tts1 where {col} > 1a ")
tdSql.error(f" select * from tts1 where {col} >= 1a ") tdSql.error(f" select * from tts1 where {col} >= -1a ")
tdSql.error(f" select * from tts1 where {col} <= 1a ") tdSql.error(f" select * from tts1 where {col} <= 1a ")
tdSql.error(f" select * from tts1 where {col} <> 1a ") tdSql.error(f" select * from tts1 where {col} <> 1a ")
tdSql.error(f" select * from tts1 where {col} = 1h ") tdSql.error(f" select * from tts1 where {col} = -1h ")
tdSql.error(f" select * from tts1 where {col} < 1h ") tdSql.error(f" select * from tts1 where {col} < 1h ")
tdSql.error(f" select * from tts1 where {col} > 1h ") tdSql.error(f" select * from tts1 where {col} > 1h ")
tdSql.error(f" select * from tts1 where {col} >= 1h ") tdSql.error(f" select * from tts1 where {col} >= -1h ")
tdSql.error(f" select * from tts1 where {col} <= 1h ") tdSql.error(f" select * from tts1 where {col} <= 1h ")
tdSql.error(f" select * from tts1 where {col} <> 1h ") tdSql.error(f" select * from tts1 where {col} <> 1h ")
tdSql.error(f" select * from tts1 where {col} = 1w ") tdSql.error(f" select * from tts1 where {col} = -1w ")
tdSql.error(f" select * from tts1 where {col} < 1w ") tdSql.error(f" select * from tts1 where {col} < 1w ")
tdSql.error(f" select * from tts1 where {col} > 1w ") tdSql.error(f" select * from tts1 where {col} > 1w ")
tdSql.error(f" select * from tts1 where {col} >= 1w ") tdSql.error(f" select * from tts1 where {col} >= -1w ")
tdSql.error(f" select * from tts1 where {col} <= 1w ") tdSql.error(f" select * from tts1 where {col} <= 1w ")
tdSql.error(f" select * from tts1 where {col} <> 1w ") tdSql.error(f" select * from tts1 where {col} <> 1w ")
tdSql.error(f" select * from tts1 where {col} = 1u ") tdSql.error(f" select * from tts1 where {col} = -1u ")
tdSql.error(f" select * from tts1 where {col} < 1u ") tdSql.error(f" select * from tts1 where {col} < 1u ")
tdSql.error(f" select * from tts1 where {col} > 1u ") tdSql.error(f" select * from tts1 where {col} > 1u ")
tdSql.error(f" select * from tts1 where {col} >= 1u ") tdSql.error(f" select * from tts1 where {col} >= -1u ")
tdSql.error(f" select * from tts1 where {col} <= 1u ") tdSql.error(f" select * from tts1 where {col} <= 1u ")
tdSql.error(f" select * from tts1 where {col} <> 1u ") tdSql.error(f" select * from tts1 where {col} <> u ")
tdSql.error(f" select * from tts1 where {col} = 0d ") tdSql.error(f" select * from tts1 where {col} = 0d ")
tdSql.error(f" select * from tts1 where {col} < 0s ") tdSql.error(f" select * from tts1 where {col} < 0s ")
@ -125,6 +125,12 @@ class TDTestCase:
tdSql.error(f" select * from tts1 where {col} <> 0/1d ") tdSql.error(f" select * from tts1 where {col} <> 0/1d ")
tdSql.error(f" select * from tts1 where {col} <> 1w+'2010-01-01 00:00:00' ") tdSql.error(f" select * from tts1 where {col} <> 1w+'2010-01-01 00:00:00' ")
tdSql.error(f" select * from tts1 where {col} = 1-1h ")
tdSql.error(f" select * from tts1 where {col} < 1w-d ")
tdSql.error(f" select * from tts1 where {col} > 0/u ")
tdSql.error(f" select * from tts1 where {col} >= d/s ")
tdSql.error(f" select * from tts1 where {col} <= 1/a ")
tdSql.error(f" select * from tts1 where {col} <> d/1 ")
def run(self): def run(self):
tdSql.execute("drop database if exists dbms") tdSql.execute("drop database if exists dbms")
@ -148,15 +154,12 @@ class TDTestCase:
# create databases precision is us # create databases precision is us
tdSql.execute("create database if not exists dbus keep 36500 precision 'us' ") tdSql.execute("create database if not exists dbus keep 36500 precision 'us' ")
tdSql.execute("use dbus") tdSql.execute("use dbus")
tsp2 = -28800000 * 1000 tsp2 = tsp2 * 1000
tsp3 = -946800000000 * 1000 tsp3 = tsp3 * 1000
self.insertnow(tsp1,tsp2,tsp3) self.insertnow(tsp1,tsp2,tsp3)
self.querynow() self.querynow()
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success(f"{__file__} successfully executed") tdLog.success(f"{__file__} successfully executed")

View File

@ -0,0 +1,177 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to execute {__file__}")
tdSql.init(conn.cursor(), logSql)
def inertnow(self):
tsp1 = 0
tsp2 = -28800000
tsp3 = -946800000000
tdSql.execute(
"create table stbts (ts timestamp, ts1 timestamp, c1 int, ts2 timestamp) TAGS(t1 int)"
)
tdSql.execute("create table tts1 using stbts tags(1)")
tdSql.execute("insert into tts1 values (now+1d, now+1d, 6, now+1d)")
tdSql.execute("insert into tts1 values (now, now, 5, now)")
tdSql.execute("insert into tts1 values (now-1d, now-1d, 4, now-1d)")
tdSql.execute(f"insert into tts1 values ({tsp1}, {tsp1}, 3, {tsp1})")
tdSql.execute(f"insert into tts1 values ({tsp2}, {tsp2}, 2, {tsp2})")
tdSql.execute(f"insert into tts1 values ({tsp3}, {tsp3}, 1, {tsp3})")
def querynow(self):
interval_day1 = (datetime.date.today() - datetime.date(1970, 1, 1)).days
interval_day2 = (datetime.date.today() - datetime.date(1940, 1, 1)).days
tdLog.printNoPrefix("==========step query: execute query operation")
time.sleep(1)
tdSql.execute(" select * from tts1 where ts > now+1d ")
ts_len1 = len(tdSql.cursor.fetchall())
tdSql.execute(" select * from tts1 where ts < now+1d ")
ts_len2 = len(tdSql.cursor.fetchall())
tdSql.execute(" select * from tts1 where ts > now-1d ")
ts_len3 = len(tdSql.cursor.fetchall())
tdSql.execute(" select * from tts1 where ts < now-1d ")
ts_len4 = len(tdSql.cursor.fetchall())
tdSql.execute(f" select * from tts1 where ts > now-{interval_day1+1}d ")
ts_len5 = len(tdSql.cursor.fetchall())
tdSql.execute(f" select * from tts1 where ts < now-{interval_day1+1}d ")
ts_len6 = len(tdSql.cursor.fetchall())
tdSql.execute(f" select * from tts1 where ts > now-{interval_day1-1}d ")
ts_len7 = len(tdSql.cursor.fetchall())
tdSql.execute(f" select * from tts1 where ts < now-{interval_day1-1}d ")
ts_len8 = len(tdSql.cursor.fetchall())
tdSql.execute(f" select * from tts1 where ts > now-{interval_day2+1}d ")
ts_len9 = len(tdSql.cursor.fetchall())
tdSql.execute(f" select * from tts1 where ts < now-{interval_day2+1}d ")
ts_len10 = len(tdSql.cursor.fetchall())
tdSql.execute(f" select * from tts1 where ts > now-{interval_day2-1}d ")
ts_len11 = len(tdSql.cursor.fetchall())
tdSql.execute(f" select * from tts1 where ts < now-{interval_day2-1}d ")
ts_len12 = len(tdSql.cursor.fetchall())
tdSql.query(" select * from tts1 where ts1 > now+1d ")
tdSql.checkRows(ts_len1)
tdSql.query(" select * from tts1 where ts2 > now+1440m ")
tdSql.checkRows(ts_len1)
tdSql.query(" select * from tts1 where ts1 < now+1d ")
tdSql.checkRows(ts_len2)
tdSql.query(" select * from tts1 where ts2 < now+1440m ")
tdSql.checkRows(ts_len2)
tdSql.query(" select * from tts1 where ts1 > now-1d ")
tdSql.checkRows(ts_len3)
tdSql.query(" select * from tts1 where ts2 > now-1440m ")
tdSql.checkRows(ts_len3)
tdSql.query(" select * from tts1 where ts1 < now-1d ")
tdSql.checkRows(ts_len4)
tdSql.query(" select * from tts1 where ts2 < now-1440m ")
tdSql.checkRows(ts_len4)
tdSql.query(f" select * from tts1 where ts1 > now-{interval_day1+1}d ")
tdSql.checkRows(ts_len5)
tdSql.query(f" select * from tts1 where ts2 > now-{(interval_day1+1)*1440}m " )
tdSql.checkRows(ts_len5)
tdSql.query(f" select * from tts1 where ts1 < now-{interval_day1+1}d ")
tdSql.checkRows(ts_len6)
tdSql.query(f" select * from tts1 where ts2 < now-{(interval_day1+1)*1440}m ")
tdSql.checkRows(ts_len6)
tdSql.query(f" select * from tts1 where ts1 > now-{interval_day1-1}d ")
tdSql.checkRows(ts_len7)
tdSql.query(f" select * from tts1 where ts2 > now-{(interval_day1-1)*1440}m ")
tdSql.checkRows(ts_len7)
tdSql.query(f" select * from tts1 where ts1 < now-{interval_day1-1}d ")
tdSql.checkRows(ts_len8)
tdSql.query(f" select * from tts1 where ts2 < now-{(interval_day1-1)*1440}m ")
tdSql.checkRows(ts_len8)
tdSql.query(f" select * from tts1 where ts1 > now-{interval_day2 + 1}d ")
tdSql.checkRows(ts_len9)
tdSql.query(f" select * from tts1 where ts2 > now-{(interval_day2 + 1)*1440}m ")
tdSql.checkRows(ts_len9)
tdSql.query(f" select * from tts1 where ts1 < now-{interval_day2 + 1}d ")
tdSql.checkRows(ts_len10)
tdSql.query(f" select * from tts1 where ts2 < now-{(interval_day2 + 1)*1440}m ")
tdSql.checkRows(ts_len10)
tdSql.query(f" select * from tts1 where ts1 > now-{interval_day2 - 1}d ")
tdSql.checkRows(ts_len11)
tdSql.query(f" select * from tts1 where ts2 > now-{(interval_day2 - 1)*1440}m ")
tdSql.checkRows(ts_len11)
tdSql.query(f" select * from tts1 where ts1 < now-{interval_day2 - 1}d ")
tdSql.checkRows(ts_len12)
tdSql.query(f" select * from tts1 where ts2 < now-{(interval_day2 - 1)*1440}m ")
tdSql.checkRows(ts_len12)
def run(self):
tdSql.execute("drop database if exists dbms")
tdSql.execute("drop database if exists dbus")
# timestamp list:
# 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00"
# -631180800000 -> "1950-01-01 00:00:00"
tdLog.printNoPrefix("==========step1:create table precision ms && insert data && query")
# create databases precision is ms
tdSql.execute("create database if not exists dbms keep 36500")
tdSql.execute("use dbms")
self.inertnow()
self.querynow()
tdLog.printNoPrefix("==========step2:create table precision us && insert data && query")
# create databases precision is us
tdSql.execute("create database if not exists dbus keep 36500 precision 'us' ")
tdSql.execute("use dbus")
self.inertnow()
self.querynow()
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
tdDnodes.stop(index)
tdDnodes.start(index)
tdLog.printNoPrefix("==========step3:after wal, query table precision ms")
tdSql.execute("use dbus")
self.querynow()
tdLog.printNoPrefix("==========step4: query table precision us")
tdSql.execute("use dbus")
self.querynow()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,93 @@
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to execute {__file__}")
tdSql.init(conn.cursor(), logSql)
def insertnow(self):
# timestamp list:
# 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00"
# -631180800000 -> "1950-01-01 00:00:00"
tsp1 = 0
tsp2 = -28800000
tsp3 = -946800000000
tsp4 = "1969-01-01 00:00:00.000"
tdSql.execute("insert into tcq1 values (now-11d, 5)")
tdSql.execute(f"insert into tcq1 values ({tsp1}, 4)")
tdSql.execute(f"insert into tcq1 values ({tsp2}, 3)")
tdSql.execute(f"insert into tcq1 values ('{tsp4}', 2)")
tdSql.execute(f"insert into tcq1 values ({tsp3}, 1)")
def waitedQuery(self, sql, expectRows, timeout):
tdLog.info(f"sql: {sql}, try to retrieve {expectRows} rows in {timeout} seconds")
try:
for i in range(timeout):
tdSql.cursor.execute(sql)
self.queryResult = tdSql.cursor.fetchall()
self.queryRows = len(self.queryResult)
self.queryCols = len(tdSql.cursor.description)
# tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
if self.queryRows >= expectRows:
return (self.queryRows, i)
time.sleep(1)
except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.notice(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, {repr(e)}")
raise Exception(repr(e))
return (self.queryRows, timeout)
def cq(self):
tdSql.execute(
"create table cq1 as select avg(c1) from tcq1 where ts > -946800000000 interval(10d) sliding(1d)"
)
self.waitedQuery("select * from cq1", 1, 120)
def querycq(self):
tdSql.query("select * from cq1")
tdSql.checkData(0, 1, 1.0)
tdSql.checkData(10, 1, 2.0)
def run(self):
tdSql.execute("drop database if exists dbcq")
tdSql.execute("create database if not exists dbcq keep 36500")
tdSql.execute("use dbcq")
tdSql.execute("create table stbcq (ts timestamp, c1 int ) TAGS(t1 int)")
tdSql.execute("create table tcq1 using stbcq tags(1)")
self.insertnow()
self.cq()
self.querycq()
# after wal and sync, check again
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
tdDnodes.stop(index)
tdDnodes.start(index)
self.querycq()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,97 @@
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to execute {__file__}")
tdSql.init(conn.cursor(), logSql)
def insertnow(self):
# timestamp list:
# 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00"
# -631180800000 -> "1950-01-01 00:00:00"
tsp1 = 0
tsp2 = -28800000
tsp3 = -946800000000
tsp4 = "1969-01-01 00:00:00.000"
tdSql.execute("insert into tcq1 values (now-11d, 5)")
tdSql.execute(f"insert into tcq1 values ({tsp1}, 4)")
tdSql.execute(f"insert into tcq1 values ({tsp2}, 3)")
tdSql.execute(f"insert into tcq1 values ('{tsp4}', 2)")
tdSql.execute(f"insert into tcq1 values ({tsp3}, 1)")
def waitedQuery(self, sql, expectRows, timeout):
tdLog.info(f"sql: {sql}, try to retrieve {expectRows} rows in {timeout} seconds")
try:
for i in range(timeout):
tdSql.cursor.execute(sql)
self.queryResult = tdSql.cursor.fetchall()
self.queryRows = len(self.queryResult)
self.queryCols = len(tdSql.cursor.description)
# tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
if self.queryRows >= expectRows:
return (self.queryRows, i)
time.sleep(1)
except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.notice(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, {repr(e)}")
raise Exception(repr(e))
return (self.queryRows, timeout)
def showstream(self):
tdSql.execute(
"create table cq1 as select avg(c1) from tcq1 interval(10d) sliding(1d)"
)
sql = "show streams"
timeout = 30
exception = "ValueError('year -292275055 is out of range')"
try:
for i in range(timeout):
tdSql.cursor.execute(sql)
self.queryResult = tdSql.cursor.fetchall()
self.queryRows = len(self.queryResult)
self.queryCols = len(tdSql.cursor.description)
# tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
if self.queryRows >= timeout:
return (self.queryRows, i)
time.sleep(1)
except Exception as e:
tdLog.info(f"sql: {sql} except raise {exception}, actually raise {repr(e)} ")
else:
tdLog.exit(f"sql: {sql} except raise {exception}, actually not")
def run(self):
tdSql.execute("drop database if exists dbcq")
tdSql.execute("create database if not exists dbcq keep 36500")
tdSql.execute("use dbcq")
tdSql.execute("create table stbcq (ts timestamp, c1 int ) TAGS(t1 int)")
tdSql.execute("create table tcq1 using stbcq tags(1)")
self.insertnow()
self.showstream()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 60,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100000,
"childtable_limit": -1,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 1000,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,89 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
import time
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
tdSql.prepare()
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# # insert 1000w rows in stb0
os.system("%staosdemo -f tools/taosdemoAllTest/TD-3453/query-interrupt.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0,60)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 6000000)
os.system('%staosdemo -f tools/taosdemoAllTest/TD-3453/queryall.json -y & ' % binPath)
time.sleep(2)
query_pid = int(subprocess.getstatusoutput('ps aux|grep "TD-3453/queryall.json" |grep -v "grep"|awk \'{print $2}\'')[1])
taosd_cpu_load_1 = float(subprocess.getstatusoutput('top -n 1 -b -p $(ps aux|grep "bin/taosd -c"|grep -v "grep" |awk \'{print $2}\')|awk \'END{print}\' |awk \'{print $9}\'')[1])
if taosd_cpu_load_1 > 10.0 :
os.system("kill -9 %d" % query_pid)
time.sleep(5)
taosd_cpu_load_2 = float(subprocess.getstatusoutput('top -n 1 -b -p $(ps aux|grep "bin/taosd -c"|grep -v "grep" |awk \'{print $2}\')|awk \'END{print}\' |awk \'{print $9}\'')[1])
if taosd_cpu_load_2 < 10.0 :
suc_kill = 60
else:
suc_kill = 10
print("taosd_cpu_load is higher than 10%")
else:
suc_kill = 20
print("taosd_cpu_load is still less than 10%")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, "%d" % suc_kill)
os.system("rm -rf querySystemInfo*")
os.system("rm -rf insert_res.txt")
os.system("rm -rf insert_res.txt")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,20 @@
{
"filetype":"query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "db",
"specified_table_query":{
"query_interval":1,
"concurrent":1,
"sqls":[
{
"sql": "select * from stb0",
"result": ""
}
]
}
}

View File

@ -0,0 +1,35 @@
from datetime import datetime
import time
import os
os.system("awk -v OFS=',' '{$1=$1;print$0}' ./all_query_res0.txt > ./new_query_res0.txt")
with open('./new_query_res0.txt','r+') as f0:
contents = f0.readlines()
if os.path.exists('./test_query_res0.txt'):
os.system("rm -rf ./test_query_res0.txt")
for i in range(len(contents)):
content = contents[i].rstrip('\n')
stimestamp = content.split(',')[0]
timestamp = int(stimestamp)
d = datetime.fromtimestamp(timestamp/1000)
str0 = d.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
ts = "'"+str0+"'"
str1 = "'"+content.split(',')[1]+"'"
str2 = "'"+content.split(',')[2]+"'"
content = ts + "," + str1 + "," + str2 + "," + content.split(',',3)[3]
contents[i] = content + "\n"
with open('./test_query_res0.txt','a') as fi:
fi.write(contents[i])
os.system("rm -rf ./new_query_res0.txt")
# timestamp = 1604160000099
# d = datetime.fromtimestamp(timestamp/1000)
# str1 = d.strftime("%Y-%m-%d %H:%M:%S.%f")
# print(str1[:-3])

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 1000,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 1,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 1000,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 200,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":4}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 20,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"": 4,
"result_file":"./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 1,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 10,
"disorder_range": 100,
"timestamp_step": 1000,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count":1,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 1,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": -1,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 100,
"disorder_range": 1,
"timestamp_step": 1000,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 100,
"max_sql_len": 10240000000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":0}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 20,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 2000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 100,
"max_sql_len": 10240000000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1024}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 20,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 2000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
}]
}]
}

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 100,
"max_sql_len": 10240000000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 20,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 2000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 100,
"max_sql_len": 10240000000,
"databases": [{
"dbinfo": {
"name": "db1",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 1000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BIGINT", "count":1}, {"type": "float", "count":1}, {"type": "double", "count":1}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":127}, {"type": "BINARY", "len": 16, "count":2}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 20,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 12,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 2000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 100,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 150,
"childtable_limit": -1,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 151,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 100,
"interlace_rows": 0,
"num_of_records_per_req": 2000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 1000,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20000,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 1000,
"insert_interval": 2000,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":9}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,166 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 0,
"num_of_records_per_req": 3000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 1
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"yes",
"childtable_count": 5,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 6,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb2",
"child_table_exists":"no",
"childtable_count": 7,
"childtable_prefix": "stb02_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 4,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb3",
"child_table_exists":"no",
"childtable_count": 8,
"childtable_prefix": "stb03_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 2,
"childtable_offset": 7,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb4",
"child_table_exists":"no",
"childtable_count": 8,
"childtable_prefix": "stb04_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 0,
"childtable_offset": 7,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,166 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 0,
"num_of_records_per_req": 3000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "no",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 1
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"yes",
"childtable_count": 5,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-12-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 6,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-12-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb2",
"child_table_exists":"no",
"childtable_count": 7,
"childtable_prefix": "stb02_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 4,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-12-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb3",
"child_table_exists":"no",
"childtable_count": 8,
"childtable_prefix": "stb03_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 2,
"childtable_offset": 7,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-12-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb4",
"child_table_exists":"no",
"childtable_count": 8,
"childtable_prefix": "stb04_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 30,
"childtable_limit": 0,
"childtable_offset": 7,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-12-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 0,
"num_of_records_per_req": 3000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "dbno",
"drop": "no",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 1
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 5,
"childtable_prefix": "stb00_",
"auto_create_table": "yes",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,166 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 0,
"num_of_records_per_req": 3000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "no",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"yes",
"childtable_count": 5,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 0,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-11-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"yes",
"childtable_count": 6,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-11-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb2",
"child_table_exists":"yes",
"childtable_count": 7,
"childtable_prefix": "stb02_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 4,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-11-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb3",
"child_table_exists":"yes",
"childtable_count": 8,
"childtable_prefix": "stb03_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 2,
"childtable_offset":7,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-11-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb4",
"child_table_exists":"yes",
"childtable_count": 8,
"childtable_prefix": "stb04_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 0,
"childtable_offset": 7,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-11-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,166 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 0,
"num_of_records_per_req": 3000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 1
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"yes",
"childtable_count": 5,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 6,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb2",
"child_table_exists":"no",
"childtable_count": 7,
"childtable_prefix": "stb02_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 4,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb3",
"child_table_exists":"no",
"childtable_count": 8,
"childtable_prefix": "stb03_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 2,
"childtable_offset": 7,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb4",
"child_table_exists":"no",
"childtable_count": 8,
"childtable_prefix": "stb04_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 0,
"childtable_offset": 7,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file":"./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "dbtest123",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "sample",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": -1,
"childtable_offset": 0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./tools/taosdemoAllTest/sample.csv",
"tags_file": "",
"columns": [{"type": "INT", "count":3}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "BOOL"}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count":2,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10,
"childtable_limit": -1,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "./tools/taosdemoAllTest/tags.csv",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":3}, {"type": "BINARY", "len": 16, "count":2}]
}]
}]
}

View File

@ -0,0 +1,88 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file":"./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 10,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count":20,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 20,
"childtable_limit": 0,
"childtable_offset":0,
"multi_thread_write_one_tbl": "no",
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10,
"start_timestamp": "2020-11-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,72 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: drop and child_table_exists combination test
# insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset
os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-newdb.json" % binPath)
os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit1.json & " % binPath)
os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit94.json & " % binPath)
os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit5.json & " % binPath)
sleep(15)
tdSql.execute("use db")
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 1000000)
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf tools/taosdemoAllTest/taosdemoTestWithJson-1.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 0,
"num_of_records_per_req": 3000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "no",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"yes",
"childtable_count": 100,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10000,
"childtable_limit": 1,
"childtable_offset": 99,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-11-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 0,
"num_of_records_per_req": 3000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "no",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"yes",
"childtable_count": 100,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10000,
"childtable_limit": 5,
"childtable_offset": 0,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-11-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 0,
"num_of_records_per_req": 3000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "no",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"yes",
"childtable_count": 100,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10000,
"childtable_limit": 94,
"childtable_offset": 5,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-11-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,61 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 0,
"num_of_records_per_req": 3000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 0,
"childtable_limit": 0,
"childtable_offset": 0,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-11-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,3 @@
1,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','true'
0,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','true'
0,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','false'
1 1 -1 2147483647 0 2247483647.1 -12.2 '12ac ;\[uer]' '23ac ;\[uer23423]123123' 'true'
2 0 -1 2147483647 0 2247483647.1 -12.2 '12ac ;\[uer]' '23ac ;\[uer23423]123123' 'true'
3 0 -1 2147483647 0 2247483647.1 -12.2 '12ac ;\[uer]' '23ac ;\[uer23423]123123' 'false'

View File

@ -0,0 +1,36 @@
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "db",
"query_times": 2,
"specified_table_query": {
"query_interval": 1,
"concurrent": 3,
"sqls": [
{
"sql": "select last_row(*) from stb0 ",
"result": "./query_res0.txt"
},
{
"sql": "select count(*) from stb00_1",
"result": "./query_res1.txt"
}
]
},
"super_table_query": {
"stblname": "stb1",
"query_interval": 1,
"threads": 3,
"sqls": [
{
"sql": "select last_row(ts) from xxxx",
"result": "./query_res2.txt"
}
]
}
}

View File

@ -0,0 +1,86 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 0,
"num_of_records_per_req": 3000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_limit": 0,
"childtable_offset": 0,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-11-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
},
{
"name": "stb1",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"batch_create_tbl_num": 10,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 200,
"childtable_limit": 0,
"childtable_offset": 0,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-11-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,2 @@
1,-127,127,'23ac,;\[uer]3','true'
1,-127,126,'23ac,;\[uer]3','true'
1 1 -127 127 '23ac ;\[uer]3' 'true'
2 1 -127 126 '23ac ;\[uer]3' 'true'

View File

@ -0,0 +1,229 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tnt1r.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 1000)
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 1000)
tdSql.query("select count(*) from stb00_0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 100000)
tdSql.query("select count(*) from stb01_1")
tdSql.checkData(0, 0, 200)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 200000)
# insert: create mutiple tables per sql and insert one rows per sql .
os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 10)
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 20)
tdSql.query("select count(*) from stb00_0")
tdSql.checkData(0, 0, 10000)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 100000)
tdSql.query("select count(*) from stb01_0")
tdSql.checkData(0, 0, 20000)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 400000)
# insert: using parament "insert_interval to controls spped of insert.
# but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。
os.system("%staosdemo -f tools/taosdemoAllTest/insert-interval-speed.json -y" % binPath)
tdSql.execute("use db")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
tdSql.query("select count(*) from stb00_0")
tdSql.checkData(0, 0, 20000)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 2000000)
tdSql.query("show stables")
tdSql.checkData(1, 4, 100)
tdSql.query("select count(*) from stb01_0")
tdSql.checkData(0, 0, 20000)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 2000000)
# spend 2min30s for 3 testcases.
# insert: drop and child_table_exists combination test
# insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset
os.system("%staosdemo -f tools/taosdemoAllTest/insert-nodbnodrop.json -y" % binPath)
tdSql.error("show dbno.stables")
os.system("%staosdemo -f tools/taosdemoAllTest/insert-newdb.json -y" % binPath)
tdSql.execute("use db")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 5)
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 6)
tdSql.query("select count (tbname) from stb2")
tdSql.checkData(0, 0, 7)
tdSql.query("select count (tbname) from stb3")
tdSql.checkData(0, 0, 8)
tdSql.query("select count (tbname) from stb4")
tdSql.checkData(0, 0, 8)
os.system("%staosdemo -f tools/taosdemoAllTest/insert-offset.json -y" % binPath)
tdSql.execute("use db")
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 50)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 240)
tdSql.query("select count(*) from stb2")
tdSql.checkData(0, 0, 220)
tdSql.query("select count(*) from stb3")
tdSql.checkData(0, 0, 180)
tdSql.query("select count(*) from stb4")
tdSql.checkData(0, 0, 160)
os.system("%staosdemo -f tools/taosdemoAllTest/insert-newtable.json -y" % binPath)
tdSql.execute("use db")
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 150)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 360)
tdSql.query("select count(*) from stb2")
tdSql.checkData(0, 0, 360)
tdSql.query("select count(*) from stb3")
tdSql.checkData(0, 0, 340)
tdSql.query("select count(*) from stb4")
tdSql.checkData(0, 0, 400)
os.system("%staosdemo -f tools/taosdemoAllTest/insert-renewdb.json -y" % binPath)
tdSql.execute("use db")
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 50)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 120)
tdSql.query("select count(*) from stb2")
tdSql.checkData(0, 0, 140)
tdSql.query("select count(*) from stb3")
tdSql.checkData(0, 0, 160)
tdSql.query("select count(*) from stb4")
tdSql.checkData(0, 0, 160)
# insert: let parament in json file is illegal i need know how to write exception.
tdSql.execute("drop database if exists db")
os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-columns.json -y " % binPath)
tdSql.error("use db")
os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-columns-lmax.json -y " % binPath)
tdSql.error("select * from db.stb0")
os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-columns-count-0.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("select count(*) from db.stb0")
tdSql.checkData(0, 0, 10000)
tdSql.execute("drop database if exists db")
os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-tags-count129.json -y " % binPath)
tdSql.error("use db1")
# insert: timestamp and step
os.system("%staosdemo -f tools/taosdemoAllTest/insert-timestep.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("show stables")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 10)
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 20)
tdSql.query("select last(ts) from db.stb00_0")
tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000")
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 200)
tdSql.query("select last(ts) from db.stb01_0")
tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000")
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 400)
# # insert: disorder_ratio
os.system("%staosdemo -f tools/taosdemoAllTest/insert-disorder.json -g 2>&1 -y " % binPath)
tdSql.execute("use db")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 1)
tdSql.query("select count (tbname) from stb1")
tdSql.checkData(0, 0, 1)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10)
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 10)
# insert: sample json
os.system("%staosdemo -f tools/taosdemoAllTest/insert-sample.json -y " % binPath)
tdSql.execute("use dbtest123")
tdSql.query("select col2 from stb0")
tdSql.checkData(0, 0, 2147483647)
tdSql.query("select t1 from stb1")
tdSql.checkData(0, 0, -127)
tdSql.query("select t2 from stb1")
tdSql.checkData(1, 0, 126)
# insert: test interlace parament
os.system("%staosdemo -f tools/taosdemoAllTest/insert-interlace-row.json -y " % binPath)
tdSql.execute("use db")
tdSql.query("select count (tbname) from stb0")
tdSql.checkData(0, 0, 100)
tdSql.query("select count (*) from stb0")
tdSql.checkData(0, 0, 15000)
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf tools/taosdemoAllTest/taosdemoTestInsertWithJson.py.sql")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,91 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
import time
from datetime import datetime
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
# insert: drop and child_table_exists combination test
# insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset
os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryInsertdata.json" % binPath)
os.system("%staosdemo -f tools/taosdemoAllTest/speciQuery.json" % binPath)
os.system("cat query_res0.txt* |sort -u > all_query_res0.txt")
os.system("cat query_res1.txt* |sort -u > all_query_res1.txt")
os.system("cat query_res2.txt* |sort -u > all_query_res2.txt")
tdSql.execute("use db")
tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")')
os.system("python3 tools/taosdemoAllTest/convertResFile.py")
tdSql.execute("insert into result0 file './test_query_res0.txt'")
tdSql.query("select ts from result0")
tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000")
tdSql.query("select count(*) from result0")
tdSql.checkData(0, 0, 1)
with open('./all_query_res1.txt','r+') as f1:
result1 = int(f1.readline())
tdSql.query("select count(*) from stb00_1")
tdSql.checkData(0, 0, "%d" % result1)
with open('./all_query_res2.txt','r+') as f2:
result2 = int(f2.readline())
d2 = datetime.fromtimestamp(result2/1000)
timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f")
tdSql.query("select last_row(ts) from stb1")
tdSql.checkData(0, 0, "%s" % timest)
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf tools/taosdemoAllTest/taosdemoTestQuerytWithJson.py.sql")
os.system("rm -rf ./querySystemInfo*")
os.system("rm -rf ./query_res*")
os.system("rm -rf ./all_query*")
os.system("rm -rf ./test_query_res0.txt")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -338,10 +338,6 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
print stop dnode1 and sleep 3000 print stop dnode1 and sleep 3000
sleep 3000 sleep 3000
sql drop dnode $hostname1
print drop dnode1 and sleep 9000
sleep 9000
sql show mnodes sql show mnodes
$dnode1Role = $data2_1 $dnode1Role = $data2_1
$dnode4Role = $data2_4 $dnode4Role = $data2_4
@ -357,6 +353,25 @@ endi
print ============================== step6.1 print ============================== step6.1
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
$x = 0
step6.1:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 $data4_1
if $data4_1 != ready then
goto step6.1
endi
sql drop dnode $hostname1
print drop dnode1 and sleep 9000
sleep 9000
$x = 0 $x = 0
show6: show6:
$x = $x + 1 $x = $x + 1

View File

@ -97,7 +97,6 @@ if $data2_2 != 3 then
endi endi
print ========== step3 print ========== step3
sql drop dnode $hostname2
$x = 0 $x = 0
show3: show3:
@ -114,6 +113,7 @@ print dnode2 openVnodes $data2_2
print ========== step4 print ========== step4
sql create dnode $hostname3 sql create dnode $hostname3
system sh/exec.sh -n dnode3 -s start system sh/exec.sh -n dnode3 -s start
sql drop dnode $hostname2
$x = 0 $x = 0
show4: show4:

View File

@ -98,7 +98,6 @@ endi
print ========== step3 print ========== step3
system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT
sql drop dnode $hostname2
sql show dnodes sql show dnodes
print dnode1 openVnodes $data2_1 print dnode1 openVnodes $data2_1
@ -128,6 +127,26 @@ endi
print ============ step 4.1 print ============ step 4.1
system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode2 -s start
$x = 0
step4.1:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 $data4_1
print dnode2 $data4_2
print dnode3 $data4_3
print dnode4 $data4_4
if $data4_2 != ready then
goto step4.1
endi
sql drop dnode $hostname2
$x = 0 $x = 0
show4: show4:
$x = $x + 1 $x = $x + 1