Merge branch 'develop' into feature/2.0tsdb

This commit is contained in:
Hongze Cheng 2020-05-19 01:51:04 +00:00
commit 4d1f834904
33 changed files with 1160 additions and 517 deletions

View File

@ -1016,7 +1016,9 @@ int doParseInsertSql(SSqlObj *pSql, char *str) {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
} }
if ((code = tscAllocPayload(pCmd, TSDB_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) { // TODO: 2048 is added because TSDB_MAX_TAGS_LEN now is 65536
// but TSDB_PAYLOAD_SIZE is 65380
if ((code = tscAllocPayload(pCmd, TSDB_PAYLOAD_SIZE + 2048)) != TSDB_CODE_SUCCESS) {
return code; return code;
} }

View File

@ -1467,15 +1467,16 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
char * pMsg; char * pMsg;
int msgLen = 0; int msgLen = 0;
char *tmpData = 0; char *tmpData = NULL;
if (pSql->cmd.allocSize > 0) { uint32_t len = pSql->cmd.payloadLen;
tmpData = calloc(1, pSql->cmd.allocSize); if (len > 0) {
tmpData = calloc(1, len);
if (NULL == tmpData) { if (NULL == tmpData) {
return TSDB_CODE_CLI_OUT_OF_MEMORY; return TSDB_CODE_CLI_OUT_OF_MEMORY;
} }
// STagData is in binary format, strncpy is not available // STagData is in binary format, strncpy is not available
memcpy(tmpData, pSql->cmd.payload, pSql->cmd.allocSize); memcpy(tmpData, pSql->cmd.payload, len);
} }
SSqlCmd * pCmd = &pSql->cmd; SSqlCmd * pCmd = &pSql->cmd;
@ -1489,9 +1490,9 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg = (char*)pInfoMsg + sizeof(SCMTableInfoMsg); pMsg = (char*)pInfoMsg + sizeof(SCMTableInfoMsg);
if (pSql->cmd.autoCreated) { if (pSql->cmd.autoCreated && len > 0) {
memcpy(pInfoMsg->tags, tmpData, sizeof(STagData)); memcpy(pInfoMsg->tags, tmpData, len);
pMsg += sizeof(STagData); pMsg += len;
} }
pCmd->payloadLen = pMsg - (char*)pInfoMsg;; pCmd->payloadLen = pMsg - (char*)pInfoMsg;;
@ -2375,7 +2376,7 @@ static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInf
tscGetQueryInfoDetailSafely(&pNew->cmd, 0, &pNewQueryInfo); tscGetQueryInfoDetailSafely(&pNew->cmd, 0, &pNewQueryInfo);
pNew->cmd.autoCreated = pSql->cmd.autoCreated; // create table if not exists pNew->cmd.autoCreated = pSql->cmd.autoCreated; // create table if not exists
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) { if (TSDB_CODE_SUCCESS != tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE + pSql->cmd.payloadLen)) {
tscError("%p malloc failed for payload to get table meta", pSql); tscError("%p malloc failed for payload to get table meta", pSql);
free(pNew); free(pNew);
@ -2386,7 +2387,8 @@ static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInf
assert(pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1); assert(pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1);
strncpy(pNewMeterMetaInfo->name, pTableMetaInfo->name, tListLen(pNewMeterMetaInfo->name)); strncpy(pNewMeterMetaInfo->name, pTableMetaInfo->name, tListLen(pNewMeterMetaInfo->name));
memcpy(pNew->cmd.payload, pSql->cmd.payload, TSDB_DEFAULT_PAYLOAD_SIZE); // tag information if table does not exists. memcpy(pNew->cmd.payload, pSql->cmd.payload, pSql->cmd.payloadLen); // tag information if table does not exists.
pNew->cmd.payloadLen = pSql->cmd.payloadLen;
tscTrace("%p new pSqlObj:%p to get tableMeta, auto create:%d", pSql, pNew, pNew->cmd.autoCreated); tscTrace("%p new pSqlObj:%p to get tableMeta, auto create:%d", pSql, pNew, pNew->cmd.autoCreated);
pNew->fp = tscTableMetaCallBack; pNew->fp = tscTableMetaCallBack;

View File

@ -2169,7 +2169,7 @@ int tscSetMgmtIpListFromCfg(const char *first, const char *second) {
tscMgmtIpSet.inUse = 0; tscMgmtIpSet.inUse = 0;
if (first && first[0] != 0) { if (first && first[0] != 0) {
if (strlen(first) >= TSDB_FQDN_LEN) { if (strlen(first) >= TSDB_EP_LEN) {
terrno = TSDB_CODE_INVALID_FQDN; terrno = TSDB_CODE_INVALID_FQDN;
return -1; return -1;
} }
@ -2178,7 +2178,7 @@ int tscSetMgmtIpListFromCfg(const char *first, const char *second) {
} }
if (second && second[0] != 0) { if (second && second[0] != 0) {
if (strlen(second) >= TSDB_FQDN_LEN) { if (strlen(second) >= TSDB_EP_LEN) {
terrno = TSDB_CODE_INVALID_FQDN; terrno = TSDB_CODE_INVALID_FQDN;
return -1; return -1;
} }

View File

@ -61,10 +61,10 @@ int32_t tscEmbedded = 0;
*/ */
int64_t tsMsPerDay[] = {86400000L, 86400000000L}; int64_t tsMsPerDay[] = {86400000L, 86400000000L};
char tsFirst[TSDB_FQDN_LEN] = {0}; char tsFirst[TSDB_EP_LEN] = {0};
char tsSecond[TSDB_FQDN_LEN] = {0}; char tsSecond[TSDB_EP_LEN] = {0};
char tsArbitrator[TSDB_FQDN_LEN] = {0}; char tsArbitrator[TSDB_EP_LEN] = {0};
char tsLocalEp[TSDB_FQDN_LEN] = {0}; // Local End Point, hostname:port char tsLocalEp[TSDB_EP_LEN] = {0}; // Local End Point, hostname:port
uint16_t tsServerPort = 6030; uint16_t tsServerPort = 6030;
uint16_t tsDnodeShellPort = 6030; // udp[6035-6039] tcp[6035] uint16_t tsDnodeShellPort = 6030; // udp[6035-6039] tcp[6035]
uint16_t tsDnodeDnodePort = 6035; // udp/tcp uint16_t tsDnodeDnodePort = 6035; // udp/tcp
@ -284,7 +284,7 @@ static void doInitGlobalConfig() {
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0; cfg.minValue = 0;
cfg.maxValue = 0; cfg.maxValue = 0;
cfg.ptrLength = TSDB_FQDN_LEN; cfg.ptrLength = TSDB_EP_LEN;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
@ -294,7 +294,7 @@ static void doInitGlobalConfig() {
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0; cfg.minValue = 0;
cfg.maxValue = 0; cfg.maxValue = 0;
cfg.ptrLength = TSDB_FQDN_LEN; cfg.ptrLength = TSDB_EP_LEN;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
@ -356,7 +356,7 @@ static void doInitGlobalConfig() {
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0; cfg.minValue = 0;
cfg.maxValue = 0; cfg.maxValue = 0;
cfg.ptrLength = TSDB_FQDN_LEN; cfg.ptrLength = TSDB_EP_LEN;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);

View File

@ -411,7 +411,7 @@ static bool dnodeReadMnodeInfos() {
dError("failed to read mnode mgmtIpList.json, nodeName not found"); dError("failed to read mnode mgmtIpList.json, nodeName not found");
goto PARSE_OVER; goto PARSE_OVER;
} }
strncpy(tsMnodeInfos.nodeInfos[i].nodeEp, nodeEp->valuestring, TSDB_FQDN_LEN); strncpy(tsMnodeInfos.nodeInfos[i].nodeEp, nodeEp->valuestring, TSDB_EP_LEN);
} }
ret = true; ret = true;

View File

@ -218,7 +218,8 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_LOCALE_LEN 64 #define TSDB_LOCALE_LEN 64
#define TSDB_TIMEZONE_LEN 64 #define TSDB_TIMEZONE_LEN 64
#define TSDB_FQDN_LEN 256 #define TSDB_FQDN_LEN 128
#define TSDB_EP_LEN (TSDB_FQDN_LEN+6)
#define TSDB_IPv4ADDR_LEN 16 #define TSDB_IPv4ADDR_LEN 16
#define TSDB_FILENAME_LEN 128 #define TSDB_FILENAME_LEN 128
#define TSDB_METER_VNODE_BITS 20 #define TSDB_METER_VNODE_BITS 20

View File

@ -530,7 +530,7 @@ typedef struct {
typedef struct { typedef struct {
int32_t nodeId; int32_t nodeId;
char nodeEp[TSDB_FQDN_LEN]; char nodeEp[TSDB_EP_LEN];
} SDMMnodeInfo; } SDMMnodeInfo;
typedef struct { typedef struct {
@ -542,7 +542,7 @@ typedef struct {
typedef struct { typedef struct {
uint32_t version; uint32_t version;
int32_t dnodeId; int32_t dnodeId;
char dnodeEp[TSDB_FQDN_LEN]; char dnodeEp[TSDB_EP_LEN];
uint32_t moduleStatus; uint32_t moduleStatus;
uint32_t lastReboot; // time stamp for last reboot uint32_t lastReboot; // time stamp for last reboot
uint16_t numOfTotalVnodes; // from config file uint16_t numOfTotalVnodes; // from config file
@ -584,7 +584,7 @@ typedef struct {
typedef struct { typedef struct {
int32_t nodeId; int32_t nodeId;
char nodeEp[TSDB_FQDN_LEN]; char nodeEp[TSDB_EP_LEN];
} SMDVnodeDesc; } SMDVnodeDesc;
typedef struct { typedef struct {
@ -669,7 +669,7 @@ typedef struct SCMShowRsp {
} SCMShowRsp; } SCMShowRsp;
typedef struct { typedef struct {
char ep[TSDB_FQDN_LEN]; // end point, hostname:port char ep[TSDB_EP_LEN]; // end point, hostname:port
} SCMCreateDnodeMsg, SCMDropDnodeMsg; } SCMCreateDnodeMsg, SCMDropDnodeMsg;
typedef struct { typedef struct {
@ -684,7 +684,7 @@ typedef struct {
} SDMConfigVnodeMsg; } SDMConfigVnodeMsg;
typedef struct { typedef struct {
char ep[TSDB_FQDN_LEN]; // end point, hostname:port char ep[TSDB_EP_LEN]; // end point, hostname:port
char config[64]; char config[64];
} SMDCfgDnodeMsg, SCMCfgDnodeMsg; } SMDCfgDnodeMsg, SCMCfgDnodeMsg;

View File

@ -33,7 +33,7 @@ typedef struct SDnodeObj {
int32_t dnodeId; int32_t dnodeId;
uint16_t dnodePort; uint16_t dnodePort;
char dnodeFqdn[TSDB_FQDN_LEN + 1]; char dnodeFqdn[TSDB_FQDN_LEN + 1];
char dnodeEp[TSDB_FQDN_LEN + 1]; char dnodeEp[TSDB_EP_LEN + 1];
int64_t createdTime; int64_t createdTime;
uint32_t lastAccess; uint32_t lastAccess;
int32_t openVnodes; int32_t openVnodes;
@ -123,7 +123,6 @@ typedef struct SVgObj {
int32_t numOfVnodes; int32_t numOfVnodes;
int32_t lbDnodeId; int32_t lbDnodeId;
int32_t lbTime; int32_t lbTime;
int8_t status;
int8_t inUse; int8_t inUse;
int8_t reserved[13]; int8_t reserved[13];
int8_t updateEnd[1]; int8_t updateEnd[1];

View File

@ -22,11 +22,6 @@ extern "C" {
#include "mgmtDef.h" #include "mgmtDef.h"
enum _TSDB_VG_STATUS {
TSDB_VG_STATUS_READY,
TSDB_VG_STATUS_UPDATE
};
int32_t mgmtInitVgroups(); int32_t mgmtInitVgroups();
void mgmtCleanUpVgroups(); void mgmtCleanUpVgroups();
SVgObj *mgmtGetVgroup(int32_t vgId); SVgObj *mgmtGetVgroup(int32_t vgId);

View File

@ -1415,7 +1415,7 @@ static SChildTableObj* mgmtDoCreateChildTable(SCMCreateTableMsg *pCreate, SVgObj
char *pTagData = (char *) pCreate->schema; // it is a tag key char *pTagData = (char *) pCreate->schema; // it is a tag key
SSuperTableObj *pSuperTable = mgmtGetSuperTable(pTagData); SSuperTableObj *pSuperTable = mgmtGetSuperTable(pTagData);
if (pSuperTable == NULL) { if (pSuperTable == NULL) {
mError("table:%s, corresponding super table does not exist", pCreate->tableId); mError("table:%s, corresponding super table:%s does not exist", pCreate->tableId, pTagData);
free(pTable); free(pTable);
terrno = TSDB_CODE_INVALID_TABLE; terrno = TSDB_CODE_INVALID_TABLE;
return NULL; return NULL;
@ -1505,6 +1505,11 @@ static void mgmtProcessCreateChildTableMsg(SQueuedMsg *pMsg) {
} }
pMsg->pTable = (STableObj *)mgmtDoCreateChildTable(pCreate, pVgroup, sid); pMsg->pTable = (STableObj *)mgmtDoCreateChildTable(pCreate, pVgroup, sid);
if (pMsg->pTable == NULL) {
mgmtSendSimpleResp(pMsg->thandle, terrno);
return;
}
mgmtIncTableRef(pMsg->pTable); mgmtIncTableRef(pMsg->pTable);
} }
} else { } else {
@ -1742,7 +1747,12 @@ static void mgmtAutoCreateChildTable(SQueuedMsg *pMsg) {
pCreateMsg->igExists = 1; pCreateMsg->igExists = 1;
pCreateMsg->getMeta = 1; pCreateMsg->getMeta = 1;
pCreateMsg->contLen = htonl(contLen); pCreateMsg->contLen = htonl(contLen);
memcpy(pCreateMsg->schema, pInfo->tags, sizeof(STagData));
contLen = sizeof(STagData);
if (contLen > pMsg->contLen - sizeof(SCMTableInfoMsg)) {
contLen = pMsg->contLen - sizeof(SCMTableInfoMsg);
}
memcpy(pCreateMsg->schema, pInfo->tags, contLen);
SQueuedMsg *newMsg = mgmtCloneQueuedMsg(pMsg); SQueuedMsg *newMsg = mgmtCloneQueuedMsg(pMsg);
pMsg->pCont = newMsg->pCont; pMsg->pCont = newMsg->pCont;

View File

@ -371,12 +371,6 @@ int32_t mgmtGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
pSchema[cols].bytes = htons(pShow->bytes[cols]); pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++; cols++;
pShow->bytes[cols] = 9 + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
strcpy(pSchema[cols].name, "vgroup_status");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
int32_t maxReplica = 0; int32_t maxReplica = 0;
SVgObj *pVgroup = NULL; SVgObj *pVgroup = NULL;
STableObj *pTable = NULL; STableObj *pTable = NULL;
@ -471,11 +465,6 @@ int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pCo
*(int32_t *) pWrite = pVgroup->numOfTables; *(int32_t *) pWrite = pVgroup->numOfTables;
cols++; cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
char* status = pVgroup->status? "updating" : "ready";
STR_TO_VARSTR(pWrite, status);
cols++;
for (int32_t i = 0; i < maxReplica; ++i) { for (int32_t i = 0; i < maxReplica; ++i) {
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId; *(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId;
@ -489,8 +478,8 @@ int32_t mgmtRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pCo
cols++; cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
status = mgmtGetMnodeRoleStr(pVgroup->vnodeGid[i].role); char *role = mgmtGetMnodeRoleStr(pVgroup->vnodeGid[i].role);
STR_TO_VARSTR(pWrite, status); STR_TO_VARSTR(pWrite, role);
cols++; cols++;
} else { } else {
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;

View File

@ -68,7 +68,7 @@ typedef enum {
typedef struct { typedef struct {
void * conn; void * conn;
void * timer; void * timer;
char ep[TSDB_FQDN_LEN]; char ep[TSDB_EP_LEN];
int8_t cmdIndex; int8_t cmdIndex;
int8_t state; int8_t state;
char sql[SQL_LENGTH]; char sql[SQL_LENGTH];

View File

@ -1,33 +0,0 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _rpc_hash_ip_header_
#define _rpc_hash_ip_header_
#ifdef __cplusplus
extern "C" {
#endif
void *rpcOpenIpHash(int maxSessions);
void rpcCloseIpHash(void *handle);
void *rpcAddIpHash(void *handle, void *pData, uint32_t ip, uint16_t port);
void rpcDeleteIpHash(void *handle, uint32_t ip, uint16_t port);
void *rpcGetIpHash(void *handle, uint32_t ip, uint16_t port);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,167 +0,0 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "os.h"
#include "tmempool.h"
#include "rpcLog.h"
typedef struct SIpHash {
uint32_t ip;
uint16_t port;
int hash;
struct SIpHash *prev;
struct SIpHash *next;
void *data;
} SIpHash;
typedef struct {
SIpHash **ipHashList;
mpool_h ipHashMemPool;
int maxSessions;
} SHashObj;
int rpcHashIp(void *handle, uint32_t ip, uint16_t port) {
SHashObj *pObj = (SHashObj *)handle;
int hash = 0;
hash = (int)(ip >> 16);
hash += (unsigned short)(ip & 0xFFFF);
hash += port;
hash = hash % pObj->maxSessions;
return hash;
}
void *rpcAddIpHash(void *handle, void *data, uint32_t ip, uint16_t port) {
int hash;
SIpHash *pNode;
SHashObj *pObj;
pObj = (SHashObj *)handle;
if (pObj == NULL || pObj->maxSessions == 0) return NULL;
hash = rpcHashIp(pObj, ip, port);
pNode = (SIpHash *)taosMemPoolMalloc(pObj->ipHashMemPool);
pNode->ip = ip;
pNode->port = port;
pNode->data = data;
pNode->prev = 0;
pNode->next = pObj->ipHashList[hash];
pNode->hash = hash;
if (pObj->ipHashList[hash] != 0) (pObj->ipHashList[hash])->prev = pNode;
pObj->ipHashList[hash] = pNode;
return pObj;
}
void rpcDeleteIpHash(void *handle, uint32_t ip, uint16_t port) {
int hash;
SIpHash *pNode;
SHashObj *pObj;
pObj = (SHashObj *)handle;
if (pObj == NULL || pObj->maxSessions == 0) return;
hash = rpcHashIp(pObj, ip, port);
pNode = pObj->ipHashList[hash];
while (pNode) {
if (pNode->ip == ip && pNode->port == port) break;
pNode = pNode->next;
}
if (pNode) {
if (pNode->prev) {
pNode->prev->next = pNode->next;
} else {
pObj->ipHashList[hash] = pNode->next;
}
if (pNode->next) {
pNode->next->prev = pNode->prev;
}
taosMemPoolFree(pObj->ipHashMemPool, (char *)pNode);
}
}
void *rpcGetIpHash(void *handle, uint32_t ip, uint16_t port) {
int hash;
SIpHash *pNode;
SHashObj *pObj;
pObj = (SHashObj *)handle;
if (pObj == NULL || pObj->maxSessions == 0) return NULL;
hash = rpcHashIp(pObj, ip, port);
pNode = pObj->ipHashList[hash];
while (pNode) {
if (pNode->ip == ip && pNode->port == port) {
break;
}
pNode = pNode->next;
}
if (pNode) {
return pNode->data;
}
return NULL;
}
void *rpcOpenIpHash(int maxSessions) {
SIpHash **ipHashList;
mpool_h ipHashMemPool;
SHashObj *pObj;
ipHashMemPool = taosMemPoolInit(maxSessions, sizeof(SIpHash));
if (ipHashMemPool == 0) return NULL;
ipHashList = calloc(sizeof(SIpHash *), (size_t)maxSessions);
if (ipHashList == 0) {
taosMemPoolCleanUp(ipHashMemPool);
return NULL;
}
pObj = malloc(sizeof(SHashObj));
if (pObj == NULL) {
taosMemPoolCleanUp(ipHashMemPool);
free(ipHashList);
return NULL;
}
pObj->maxSessions = maxSessions;
pObj->ipHashMemPool = ipHashMemPool;
pObj->ipHashList = ipHashList;
return pObj;
}
void rpcCloseIpHash(void *handle) {
SHashObj *pObj;
pObj = (SHashObj *)handle;
if (pObj == NULL || pObj->maxSessions == 0) return;
if (pObj->ipHashMemPool) taosMemPoolCleanUp(pObj->ipHashMemPool);
if (pObj->ipHashList) free(pObj->ipHashList);
memset(pObj, 0, sizeof(SHashObj));
free(pObj);
}

View File

@ -19,7 +19,6 @@
#include "ttimer.h" #include "ttimer.h"
#include "tutil.h" #include "tutil.h"
#include "rpcLog.h" #include "rpcLog.h"
#include "rpcHaship.h"
#include "rpcUdp.h" #include "rpcUdp.h"
#include "rpcHead.h" #include "rpcHead.h"
@ -28,8 +27,6 @@
#define RPC_UDP_BUF_TIME 5 // mseconds #define RPC_UDP_BUF_TIME 5 // mseconds
#define RPC_MAX_UDP_SIZE 65480 #define RPC_MAX_UDP_SIZE 65480
int tsUdpDelay = 0;
typedef struct { typedef struct {
void *signature; void *signature;
int index; int index;
@ -38,8 +35,6 @@ typedef struct {
uint16_t localPort; // local port uint16_t localPort; // local port
char label[12]; // copy from udpConnSet; char label[12]; // copy from udpConnSet;
pthread_t thread; pthread_t thread;
pthread_mutex_t mutex;
void *tmrCtrl; // copy from UdpConnSet;
void *hash; void *hash;
void *shandle; // handle passed by upper layer during server initialization void *shandle; // handle passed by upper layer during server initialization
void *pSet; void *pSet;
@ -55,26 +50,11 @@ typedef struct {
void *shandle; // handle passed by upper layer during server initialization void *shandle; // handle passed by upper layer during server initialization
int threads; int threads;
char label[12]; char label[12];
void *tmrCtrl;
void *(*fp)(SRecvInfo *pPacket); void *(*fp)(SRecvInfo *pPacket);
SUdpConn udpConn[]; SUdpConn udpConn[];
} SUdpConnSet; } SUdpConnSet;
typedef struct {
void *signature;
uint32_t ip; // dest IP
uint16_t port; // dest Port
SUdpConn *pConn;
struct sockaddr_in destAdd;
void *msgHdr;
int totalLen;
void *timer;
int emptyNum;
} SUdpBuf;
static void *taosRecvUdpData(void *param); static void *taosRecvUdpData(void *param);
static SUdpBuf *taosCreateUdpBuf(SUdpConn *pConn, uint32_t ip, uint16_t port);
static void taosProcessUdpBufTimer(void *param, void *tmrId);
void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads, void *fp, void *shandle) { void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads, void *fp, void *shandle) {
SUdpConn *pConn; SUdpConn *pConn;
@ -94,16 +74,6 @@ void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads
pSet->fp = fp; pSet->fp = fp;
strcpy(pSet->label, label); strcpy(pSet->label, label);
if ( tsUdpDelay ) {
char udplabel[12];
sprintf(udplabel, "%s.b", label);
pSet->tmrCtrl = taosTmrInit(RPC_MAX_UDP_CONNS * threads, 5, 5000, udplabel);
if (pSet->tmrCtrl == NULL) {
tError("%s failed to initialize tmrCtrl") taosCleanUpUdpConnection(pSet);
return NULL;
}
}
uint16_t ownPort; uint16_t ownPort;
for (int i = 0; i < threads; ++i) { for (int i = 0; i < threads; ++i) {
pConn = pSet->udpConn + i; pConn = pSet->udpConn + i;
@ -135,11 +105,6 @@ void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads
pConn->index = i; pConn->index = i;
pConn->pSet = pSet; pConn->pSet = pSet;
pConn->signature = pConn; pConn->signature = pConn;
if (tsUdpDelay) {
pConn->hash = rpcOpenIpHash(RPC_MAX_UDP_CONNS);
pthread_mutex_init(&pConn->mutex, NULL);
pConn->tmrCtrl = pSet->tmrCtrl;
}
pthread_attr_t thAttr; pthread_attr_t thAttr;
pthread_attr_init(&thAttr); pthread_attr_init(&thAttr);
@ -173,10 +138,6 @@ void taosCleanUpUdpConnection(void *handle) {
free(pConn->buffer); free(pConn->buffer);
pthread_cancel(pConn->thread); pthread_cancel(pConn->thread);
taosCloseSocket(pConn->fd); taosCloseSocket(pConn->fd);
if (pConn->hash) {
rpcCloseIpHash(pConn->hash);
pthread_mutex_destroy(&pConn->mutex);
}
} }
for (int i = 0; i < pSet->threads; ++i) { for (int i = 0; i < pSet->threads; ++i) {
@ -185,7 +146,6 @@ void taosCleanUpUdpConnection(void *handle) {
tTrace("chandle:%p is closed", pConn); tTrace("chandle:%p is closed", pConn);
} }
taosTmrCleanUp(pSet->tmrCtrl);
tfree(pSet); tfree(pSet);
} }
@ -205,64 +165,42 @@ void *taosOpenUdpConnection(void *shandle, void *thandle, uint32_t ip, uint16_t
static void *taosRecvUdpData(void *param) { static void *taosRecvUdpData(void *param) {
SUdpConn *pConn = param; SUdpConn *pConn = param;
struct sockaddr_in sourceAdd; struct sockaddr_in sourceAdd;
int dataLen; ssize_t dataLen;
unsigned int addLen; unsigned int addLen;
uint16_t port; uint16_t port;
int minSize = sizeof(SRpcHead);
SRecvInfo recvInfo; SRecvInfo recvInfo;
memset(&sourceAdd, 0, sizeof(sourceAdd)); memset(&sourceAdd, 0, sizeof(sourceAdd));
addLen = sizeof(sourceAdd); addLen = sizeof(sourceAdd);
tTrace("%s UDP thread is created, index:%d", pConn->label, pConn->index); tTrace("%s UDP thread is created, index:%d", pConn->label, pConn->index);
char *msg = pConn->buffer;
while (1) { while (1) {
dataLen = recvfrom(pConn->fd, pConn->buffer, RPC_MAX_UDP_SIZE, 0, (struct sockaddr *)&sourceAdd, &addLen); dataLen = recvfrom(pConn->fd, pConn->buffer, RPC_MAX_UDP_SIZE, 0, (struct sockaddr *)&sourceAdd, &addLen);
port = ntohs(sourceAdd.sin_port); port = ntohs(sourceAdd.sin_port);
//tTrace("%s msg is recv from 0x%x:%hu len:%d", pConn->label, sourceAdd.sin_addr.s_addr, port, dataLen);
if (dataLen < sizeof(SRpcHead)) { if (dataLen < sizeof(SRpcHead)) {
tError("%s recvfrom failed, reason:%s\n", pConn->label, strerror(errno)); tError("%s recvfrom failed, reason:%s\n", pConn->label, strerror(errno));
continue; continue;
} }
int processedLen = 0, leftLen = 0; char *tmsg = malloc(dataLen + tsRpcOverhead);
int msgLen = 0; if (NULL == tmsg) {
int count = 0; tError("%s failed to allocate memory, size:%d", pConn->label, dataLen);
char *msg = pConn->buffer; continue;
while (processedLen < dataLen) {
leftLen = dataLen - processedLen;
SRpcHead *pHead = (SRpcHead *)msg;
msgLen = htonl((uint32_t)pHead->msgLen);
if (leftLen < minSize || msgLen > leftLen || msgLen < minSize) {
tError("%s msg is messed up, dataLen:%d processedLen:%d count:%d msgLen:%d", pConn->label, dataLen,
processedLen, count, msgLen);
break;
}
char *tmsg = malloc((size_t)msgLen + tsRpcOverhead);
if (NULL == tmsg) {
tError("%s failed to allocate memory, size:%d", pConn->label, msgLen);
break;
}
tmsg += tsRpcOverhead; // overhead for SRpcReqContext
memcpy(tmsg, msg, (size_t)msgLen);
recvInfo.msg = tmsg;
recvInfo.msgLen = msgLen;
recvInfo.ip = sourceAdd.sin_addr.s_addr;
recvInfo.port = port;
recvInfo.shandle = pConn->shandle;
recvInfo.thandle = NULL;
recvInfo.chandle = pConn;
recvInfo.connType = 0;
(*(pConn->processData))(&recvInfo);
processedLen += msgLen;
msg += msgLen;
count++;
} }
// tTrace("%s %d UDP packets are received together", pConn->label, count); tmsg += tsRpcOverhead; // overhead for SRpcReqContext
memcpy(tmsg, msg, dataLen);
recvInfo.msg = tmsg;
recvInfo.msgLen = dataLen;
recvInfo.ip = sourceAdd.sin_addr.s_addr;
recvInfo.port = port;
recvInfo.shandle = pConn->shandle;
recvInfo.thandle = NULL;
recvInfo.chandle = pConn;
recvInfo.connType = 0;
(*(pConn->processData))(&recvInfo);
} }
return NULL; return NULL;
@ -270,141 +208,17 @@ static void *taosRecvUdpData(void *param) {
int taosSendUdpData(uint32_t ip, uint16_t port, void *data, int dataLen, void *chandle) { int taosSendUdpData(uint32_t ip, uint16_t port, void *data, int dataLen, void *chandle) {
SUdpConn *pConn = (SUdpConn *)chandle; SUdpConn *pConn = (SUdpConn *)chandle;
SUdpBuf *pBuf;
if (pConn == NULL || pConn->signature != pConn) return -1; if (pConn == NULL || pConn->signature != pConn) return -1;
if (pConn->hash == NULL) { struct sockaddr_in destAdd;
struct sockaddr_in destAdd; memset(&destAdd, 0, sizeof(destAdd));
memset(&destAdd, 0, sizeof(destAdd)); destAdd.sin_family = AF_INET;
destAdd.sin_family = AF_INET; destAdd.sin_addr.s_addr = ip;
destAdd.sin_addr.s_addr = ip; destAdd.sin_port = htons(port);
destAdd.sin_port = htons(port);
//tTrace("%s msg is sent to 0x%x:%hu len:%d ret:%d localPort:%hu chandle:0x%x", pConn->label, destAdd.sin_addr.s_addr, int ret = (int)sendto(pConn->fd, data, (size_t)dataLen, 0, (struct sockaddr *)&destAdd, sizeof(destAdd));
// port, dataLen, ret, pConn->localPort, chandle);
int ret = (int)sendto(pConn->fd, data, (size_t)dataLen, 0, (struct sockaddr *)&destAdd, sizeof(destAdd));
return ret; return ret;
}
pthread_mutex_lock(&pConn->mutex);
pBuf = (SUdpBuf *)rpcGetIpHash(pConn->hash, ip, port);
if (pBuf == NULL) {
pBuf = taosCreateUdpBuf(pConn, ip, port);
rpcAddIpHash(pConn->hash, pBuf, ip, port);
}
if ((pBuf->totalLen + dataLen > RPC_MAX_UDP_SIZE) || (taosMsgHdrSize(pBuf->msgHdr) >= RPC_MAX_UDP_PKTS)) {
taosTmrReset(taosProcessUdpBufTimer, RPC_UDP_BUF_TIME, pBuf, pConn->tmrCtrl, &pBuf->timer);
taosSendMsgHdr(pBuf->msgHdr, pConn->fd);
pBuf->totalLen = 0;
}
taosSetMsgHdrData(pBuf->msgHdr, data, dataLen);
pBuf->totalLen += dataLen;
pthread_mutex_unlock(&pConn->mutex);
return dataLen;
} }
void taosFreeMsgHdr(void *hdr) {
struct msghdr *msgHdr = (struct msghdr *)hdr;
free(msgHdr->msg_iov);
}
int taosMsgHdrSize(void *hdr) {
struct msghdr *msgHdr = (struct msghdr *)hdr;
return (int)msgHdr->msg_iovlen;
}
void taosSendMsgHdr(void *hdr, int fd) {
struct msghdr *msgHdr = (struct msghdr *)hdr;
sendmsg(fd, msgHdr, 0);
msgHdr->msg_iovlen = 0;
}
void taosInitMsgHdr(void **hdr, void *dest, int maxPkts) {
struct msghdr *msgHdr = (struct msghdr *)malloc(sizeof(struct msghdr));
memset(msgHdr, 0, sizeof(struct msghdr));
*hdr = msgHdr;
struct sockaddr_in *destAdd = (struct sockaddr_in *)dest;
msgHdr->msg_name = destAdd;
msgHdr->msg_namelen = sizeof(struct sockaddr_in);
int size = (int)sizeof(struct iovec) * maxPkts;
msgHdr->msg_iov = (struct iovec *)malloc((size_t)size);
memset(msgHdr->msg_iov, 0, (size_t)size);
}
void taosSetMsgHdrData(void *hdr, char *data, int dataLen) {
struct msghdr *msgHdr = (struct msghdr *)hdr;
msgHdr->msg_iov[msgHdr->msg_iovlen].iov_base = data;
msgHdr->msg_iov[msgHdr->msg_iovlen].iov_len = (size_t)dataLen;
msgHdr->msg_iovlen++;
}
void taosRemoveUdpBuf(SUdpBuf *pBuf) {
taosTmrStopA(&pBuf->timer);
rpcDeleteIpHash(pBuf->pConn->hash, pBuf->ip, pBuf->port);
// tTrace("%s UDP buffer to:0x%lld:%d is removed", pBuf->pConn->label,
// pBuf->ip, pBuf->port);
pBuf->signature = NULL;
taosFreeMsgHdr(pBuf->msgHdr);
free(pBuf);
}
void taosProcessUdpBufTimer(void *param, void *tmrId) {
SUdpBuf *pBuf = (SUdpBuf *)param;
if (pBuf->signature != param) return;
if (pBuf->timer != tmrId) return;
SUdpConn *pConn = pBuf->pConn;
pthread_mutex_lock(&pConn->mutex);
if (taosMsgHdrSize(pBuf->msgHdr) > 0) {
taosSendMsgHdr(pBuf->msgHdr, pConn->fd);
pBuf->totalLen = 0;
pBuf->emptyNum = 0;
} else {
pBuf->emptyNum++;
if (pBuf->emptyNum > 200) {
taosRemoveUdpBuf(pBuf);
pBuf = NULL;
}
}
pthread_mutex_unlock(&pConn->mutex);
if (pBuf) taosTmrReset(taosProcessUdpBufTimer, RPC_UDP_BUF_TIME, pBuf, pConn->tmrCtrl, &pBuf->timer);
}
static SUdpBuf *taosCreateUdpBuf(SUdpConn *pConn, uint32_t ip, uint16_t port) {
SUdpBuf *pBuf = (SUdpBuf *)malloc(sizeof(SUdpBuf));
memset(pBuf, 0, sizeof(SUdpBuf));
pBuf->ip = ip;
pBuf->port = port;
pBuf->pConn = pConn;
pBuf->destAdd.sin_family = AF_INET;
pBuf->destAdd.sin_addr.s_addr = ip;
pBuf->destAdd.sin_port = (uint16_t)htons(port);
taosInitMsgHdr(&(pBuf->msgHdr), &(pBuf->destAdd), RPC_MAX_UDP_PKTS);
pBuf->signature = pBuf;
taosTmrReset(taosProcessUdpBufTimer, RPC_UDP_BUF_TIME, pBuf, pConn->tmrCtrl, &pBuf->timer);
// tTrace("%s UDP buffer to:0x%lld:%d is created", pBuf->pConn->label,
// pBuf->ip, pBuf->port);
return pBuf;
}

View File

View File

@ -0,0 +1,129 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self):
tdLog.debug("start to execute %s" % __file__)
tdLog.info("prepare cluster")
tdDnodes.stopAll()
tdDnodes.deploy(1)
tdDnodes.start(1)
self.conn = taos.connect(config=tdDnodes.getSimCfgPath())
tdSql.init(self.conn.cursor())
tdSql.execute('reset query cache')
tdSql.execute('create dnode 192.168.0.2')
tdDnodes.deploy(2)
tdDnodes.start(2)
self.conn = taos.connect(config=tdDnodes.getSimCfgPath())
tdSql.init(self.conn.cursor())
tdSql.execute('reset query cache')
tdSql.execute('create dnode 192.168.0.3')
tdDnodes.deploy(3)
tdDnodes.start(3)
def run(self):
tdSql.execute('create database db replica 3 days 7')
tdSql.execute('use db')
for tid in range(1, 11):
tdSql.execute('create table tb%d(ts timestamp, i int)' % tid)
tdLog.sleep(10)
tdLog.info("================= step1")
startTime = 1520000010000
for rid in range(1, 11):
for tid in range(1, 11):
tdSql.execute(
'insert into tb%d values(%ld, %d)' %
(tid, startTime, rid))
startTime += 1
tdSql.query('select * from tb1')
tdSql.checkRows(10)
tdLog.sleep(5)
tdLog.info("================= step2")
tdSql.execute('alter database db replica 2')
tdLog.sleep(10)
tdLog.info("================= step3")
for rid in range(1, 11):
for tid in range(1, 11):
tdSql.execute(
'insert into tb%d values(%ld, %d)' %
(tid, startTime, rid))
startTime += 1
tdSql.query('select * from tb1')
tdSql.checkRows(20)
tdLog.sleep(5)
tdLog.info("================= step4")
tdSql.execute('alter database db replica 1')
tdLog.sleep(10)
tdLog.info("================= step5")
for rid in range(1, 11):
for tid in range(1, 11):
tdSql.execute(
'insert into tb%d values(%ld, %d)' %
(tid, startTime, rid))
startTime += 1
tdSql.query('select * from tb1')
tdSql.checkRows(30)
tdLog.sleep(5)
tdLog.info("================= step6")
tdSql.execute('alter database db replica 2')
tdLog.sleep(10)
tdLog.info("================= step7")
for rid in range(1, 11):
for tid in range(1, 11):
tdSql.execute(
'insert into tb%d values(%ld, %d)' %
(tid, startTime, rid))
startTime += 1
tdSql.query('select * from tb1')
tdSql.checkRows(40)
tdLog.sleep(5)
tdLog.info("================= step8")
tdSql.execute('alter database db replica 3')
tdLog.sleep(10)
tdLog.info("================= step9")
for rid in range(1, 11):
for tid in range(1, 11):
tdSql.execute(
'insert into tb%d values(%ld, %d)' %
(tid, startTime, rid))
startTime += 1
tdSql.query('select * from tb1')
tdSql.checkRows(50)
tdLog.sleep(5)
def stop(self):
tdSql.close()
self.conn.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addCluster(__file__, TDTestCase())

View File

@ -0,0 +1,138 @@
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.types = [
"int",
"bigint",
"float",
"double",
"smallint",
"tinyint",
"binary(10)",
"nchar(10)",
"timestamp"]
self.rowNum = 300
self.ts = 1537146000000
self.step = 1000
self.sqlHead = "select count(*), count(c1) "
self.sqlTail = " from stb"
def addColumnAndCount(self):
for colIdx in range(len(self.types)):
tdSql.execute(
"alter table stb add column c%d %s" %
(colIdx + 2, self.types[colIdx]))
self.sqlHead = self.sqlHead + ",count(c%d) " % (colIdx + 2)
tdSql.query(self.sqlHead + self.sqlTail)
# count non-NULL values in each column
tdSql.checkData(0, 0, self.rowNum * (colIdx + 1))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 1))
for i in range(2, colIdx + 2):
print("check1: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 2))
# insert more rows
for k in range(self.rowNum):
self.ts += self.step
sql = "insert into tb values (%d, %d" % (self.ts, colIdx + 2)
for j in range(colIdx + 1):
sql += ", %d" % (colIdx + 2)
sql += ")"
tdSql.execute(sql)
# count non-NULL values in each column
tdSql.query(self.sqlHead + self.sqlTail)
tdSql.checkData(0, 0, self.rowNum * (colIdx + 2))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 2))
for i in range(2, colIdx + 2):
print("check2: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3))
def dropColumnAndCount(self):
tdSql.query(self.sqlHead + self.sqlTail)
res = []
for i in range(len(self.types)):
res.append(tdSql.getData(0, i + 2))
print(res)
for colIdx in range(len(self.types), 0, -1):
tdSql.execute("alter table stb drop column c%d" % (colIdx + 2))
# self.sqlHead = self.sqlHead + ",count(c%d) " %(colIdx + 2)
tdSql.query(self.sqlHead + self.sqlTail)
# count non-NULL values in each column
tdSql.checkData(0, 0, self.rowNum * (colIdx + 1))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 1))
for i in range(2, colIdx + 2):
print("check1: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 2))
# insert more rows
for k in range(self.rowNum):
self.ts += self.step
sql = "insert into tb values (%d, %d" % (self.ts, colIdx + 2)
for j in range(colIdx + 1):
sql += ", %d" % (colIdx + 2)
sql += ")"
tdSql.execute(sql)
# count non-NULL values in each column
tdSql.query(self.sqlHead + self.sqlTail)
tdSql.checkData(0, 0, self.rowNum * (colIdx + 2))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 2))
for i in range(2, colIdx + 2):
print("check2: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3))
def run(self):
# Setup params
db = "db"
# Create db
tdSql.execute("drop database if exists %s" % (db))
tdSql.execute("reset query cache")
tdSql.execute("create database %s maxrows 200 maxtables 4" % (db))
tdSql.execute("use %s" % (db))
# Create a table with one colunm of int type and insert 300 rows
tdLog.info("Create stb and tb")
tdSql.execute("create table stb (ts timestamp, c1 int) tags (tg1 int)")
tdSql.execute("create table tb using stb tags (0)")
tdLog.info("Insert %d rows into tb" % (self.rowNum))
for k in range(1, self.rowNum + 1):
self.ts += self.step
tdSql.execute("insert into tb values (%d, 1)" % (self.ts))
# Alter tb and add a column of smallint type, then query tb to see if
# all added column are NULL
self.addColumnAndCount()
tdDnodes.stop(1)
time.sleep(5)
tdDnodes.start(1)
time.sleep(5)
tdSql.query(self.sqlHead + self.sqlTail)
for i in range(2, len(self.types) + 2):
tdSql.checkData(0, i, self.rowNum * (len(self.types) + 2 - i))
self.dropColumnAndCount()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
#tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,138 @@
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.types = [
"int",
"bigint",
"float",
"double",
"smallint",
"tinyint",
"binary(10)",
"nchar(10)",
"timestamp"]
self.rowNum = 300
self.ts = 1537146000000
self.step = 1000
self.sqlHead = "select count(*), count(c1) "
self.sqlTail = " from tb"
def addColumnAndCount(self):
for colIdx in range(len(self.types)):
tdSql.execute(
"alter table tb add column c%d %s" %
(colIdx + 2, self.types[colIdx]))
self.sqlHead = self.sqlHead + ",count(c%d) " % (colIdx + 2)
tdSql.query(self.sqlHead + self.sqlTail)
# count non-NULL values in each column
tdSql.checkData(0, 0, self.rowNum * (colIdx + 1))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 1))
for i in range(2, colIdx + 2):
print("check1: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 2))
# insert more rows
for k in range(self.rowNum):
self.ts += self.step
sql = "insert into tb values (%d, %d" % (self.ts, colIdx + 2)
for j in range(colIdx + 1):
sql += ", %d" % (colIdx + 2)
sql += ")"
tdSql.execute(sql)
# count non-NULL values in each column
tdSql.query(self.sqlHead + self.sqlTail)
tdSql.checkData(0, 0, self.rowNum * (colIdx + 2))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 2))
for i in range(2, colIdx + 2):
print("check2: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3))
def dropColumnAndCount(self):
tdSql.query(self.sqlHead + self.sqlTail)
res = []
for i in range(len(self.types)):
res[i] = tdSql.getData(0, i + 2)
print(res.join)
for colIdx in range(len(self.types), 0, -1):
tdSql.execute("alter table tb drop column c%d" % (colIdx + 2))
# self.sqlHead = self.sqlHead + ",count(c%d) " %(colIdx + 2)
tdSql.query(self.sqlHead + self.sqlTail)
# count non-NULL values in each column
tdSql.checkData(0, 0, self.rowNum * (colIdx + 1))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 1))
for i in range(2, colIdx + 2):
print("check1: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 2))
# insert more rows
for k in range(self.rowNum):
self.ts += self.step
sql = "insert into tb values (%d, %d" % (self.ts, colIdx + 2)
for j in range(colIdx + 1):
sql += ", %d" % (colIdx + 2)
sql += ")"
tdSql.execute(sql)
# count non-NULL values in each column
tdSql.query(self.sqlHead + self.sqlTail)
tdSql.checkData(0, 0, self.rowNum * (colIdx + 2))
tdSql.checkData(0, 1, self.rowNum * (colIdx + 2))
for i in range(2, colIdx + 2):
print("check2: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3))
def run(self):
# Setup params
db = "db"
# Create db
tdSql.execute("drop database if exists %s" % (db))
tdSql.execute("reset query cache")
tdSql.execute("create database %s maxrows 200 maxtables 4" % (db))
tdSql.execute("use %s" % (db))
# Create a table with one colunm of int type and insert 300 rows
tdLog.info("Create table tb")
tdSql.execute("create table tb (ts timestamp, c1 int)")
tdLog.info("Insert %d rows into tb" % (self.rowNum))
for k in range(1, self.rowNum + 1):
self.ts += self.step
tdSql.execute("insert into tb values (%d, 1)" % (self.ts))
# Alter tb and add a column of smallint type, then query tb to see if
# all added column are NULL
self.addColumnAndCount()
tdDnodes.stop(1)
time.sleep(5)
tdDnodes.start(1)
time.sleep(5)
tdSql.query(self.sqlHead + self.sqlTail)
for i in range(2, len(self.types) + 2):
tdSql.checkData(0, i, self.rowNum * (len(self.types) + 2 - i))
self.dropColumnAndCount()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
#tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,77 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
tdSql.prepare()
tdSql.execute(
'create table st (ts timestamp, v1 int, v2 int, v3 int, v4 int, v5 int) tags (t int)')
totalTables = 100
batchSize = 500
totalBatch = 60
tdLog.info(
"create %d tables, insert %d rows per table" %
(totalTables, batchSize * totalBatch))
for t in range(0, totalTables):
tdSql.execute('create table t%d using st tags(%d)' % (t, t))
# 2019-06-10 00:00:00
beginTs = 1560096000000
interval = 10000
for r in range(0, totalBatch):
sql = 'insert into t%d values ' % (t)
for b in range(0, batchSize):
ts = beginTs + (r * batchSize + b) * interval
sql += '(%d, 1, 2, 3, 4, 5)' % (ts)
tdSql.execute(sql)
tdLog.info("insert data finished")
tdSql.execute('alter table st add column v6 int')
tdLog.sleep(5)
tdLog.info("alter table finished")
tdSql.query("select count(*) from t50")
tdSql.checkData(0, 0, (int)(batchSize * totalBatch))
tdLog.info("insert")
tdSql.execute(
"insert into t50 values ('2019-06-13 07:59:55.000', 1, 2, 3, 4, 5, 6)")
tdLog.info("import")
tdSql.execute(
"import into t50 values ('2019-06-13 07:59:55.000', 1, 2, 3, 4, 5, 6)")
tdLog.info("query")
tdSql.query("select count(*) from t50")
tdSql.checkData(0, 0, batchSize * totalBatch + 1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -19,6 +19,8 @@ python3 ./test.py $1 -f table/column_num.py
python3 ./test.py $1 -f table/db_table.py python3 ./test.py $1 -f table/db_table.py
python3 ./test.py $1 -f table/tablename-boundary.py python3 ./test.py $1 -f table/tablename-boundary.py
# tag
python3 ./test.py $1 -f tag_lite/filter.py
python3 ./test.py $1 -f tag_lite/create-tags-boundary.py python3 ./test.py $1 -f tag_lite/create-tags-boundary.py
python3 ./test.py $1 -f dbmgmt/database-name-boundary.py python3 ./test.py $1 -f dbmgmt/database-name-boundary.py
@ -96,3 +98,4 @@ python3 ./test.py $1 -f user/pass_len.py
# table # table
#python3 ./test.py $1 -f table/del_stable.py #python3 ./test.py $1 -f table/del_stable.py

View File

@ -0,0 +1,145 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import random
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class Test:
def __init__(self):
self.current_tb = ""
self.last_tb = ""
self.written = 0
def create_table(self):
tdLog.info("create a table")
self.current_tb = "tb%d" % int(round(time.time() * 1000))
tdLog.info("current table %s" % self.current_tb)
if (self.current_tb == self.last_tb):
return
else:
tdSql.execute(
'create table %s (ts timestamp, speed int)' %
self.current_tb)
self.last_tb = self.current_tb
self.written = 0
def insert_data(self):
tdLog.info("will insert data to table")
if (self.current_tb == ""):
tdLog.info("no table, create first")
self.create_table()
tdLog.info("insert data to table")
insertRows = 10
tdLog.info("insert %d rows to %s" % (insertRows, self.last_tb))
for i in range(0, insertRows):
ret = tdSql.execute(
'insert into %s values (now + %dm, %d)' %
(self.last_tb, i, i))
self.written = self.written + 1
tdLog.info("insert earlier data")
tdSql.execute('insert into %s values (now - 5m , 10)' % self.last_tb)
self.written = self.written + 1
tdSql.execute('insert into %s values (now - 6m , 10)' % self.last_tb)
self.written = self.written + 1
tdSql.execute('insert into %s values (now - 7m , 10)' % self.last_tb)
self.written = self.written + 1
tdSql.execute('insert into %s values (now - 8m , 10)' % self.last_tb)
self.written = self.written + 1
def query_data(self):
if (self.written > 0):
tdLog.info("query data from table")
tdSql.query("select * from %s" % self.last_tb)
tdSql.checkRows(self.written)
def create_stable(self):
tdLog.info("create a super table")
def restart_database(self):
tdLog.info("restart databae")
tdDnodes.stop(1)
tdDnodes.start(1)
tdLog.sleep(5)
def force_restart(self):
tdLog.info("force restart database")
tdDnodes.forcestop(1)
tdDnodes.start(1)
tdLog.sleep(5)
def drop_table(self):
if (self.current_tb != ""):
tdLog.info("drop current tb %s" % self.current_tb)
tdSql.execute("drop table %s" % self.current_tb)
self.current_tb = ""
self.last_tb = ""
self.written = 0
def reset_query_cache(self):
tdLog.info("reset query cache")
tdSql.execute("reset query cache")
tdLog.sleep(1)
def reset_database(self):
tdLog.info("reset database")
tdDnodes.forcestop(1)
tdDnodes.deploy(1)
self.current_tb = ""
self.last_tb = ""
self.written = 0
tdDnodes.start(1)
tdSql.prepare()
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
tdSql.prepare()
test = Test()
switch = {
1: test.create_table,
2: test.insert_data,
3: test.query_data,
4: test.create_stable,
5: test.restart_database,
6: test.force_restart,
7: test.drop_table,
8: test.reset_query_cache,
9: test.reset_database,
}
for x in range(1, 100):
r = random.randint(1, 9)
tdLog.notice("iteration %d run func %d" % (x, r))
switch.get(r, lambda: "ERROR")()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -1,4 +1,5 @@
#!/bin/bash #!/bin/bash
# insert
python3 ./test.py $1 -f insert/basic.py python3 ./test.py $1 -f insert/basic.py
python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/int.py python3 ./test.py $1 -f insert/int.py
@ -24,6 +25,7 @@ python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f insert/multi.py python3 ./test.py $1 -f insert/multi.py
python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -s && sleep 1
# table
python3 ./test.py $1 -f table/column_name.py python3 ./test.py $1 -f table/column_name.py
python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f table/column_num.py python3 ./test.py $1 -f table/column_num.py
@ -31,6 +33,7 @@ python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f table/db_table.py python3 ./test.py $1 -f table/db_table.py
python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -s && sleep 1
# import
python3 ./test.py $1 -f import_merge/importDataLastSub.py python3 ./test.py $1 -f import_merge/importDataLastSub.py
python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importHead.py python3 ./test.py $1 -f import_merge/importHead.py
@ -43,3 +46,7 @@ python3 ./test.py $1 -f import_merge/importTail.py
python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -s && sleep 1
python3 ./test.py $1 -f import_merge/importTRestart.py python3 ./test.py $1 -f import_merge/importTRestart.py
python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -s && sleep 1
#tag
python3 ./test.py $1 -f tag_lite/filter.py
python3 ./test.py $1 -s && sleep 1

View File

View File

@ -0,0 +1,135 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
self.ntables = 10
self.rowsPerTable = 10
self.startTime = 1520000010000
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
tdLog.info("================= step0")
tdSql.execute('reset query cache')
tdLog.info("drop database db if exits")
tdSql.execute('drop database if exists db')
tdLog.info("================= step1")
tdSql.execute('create database db maxtables 4')
tdLog.sleep(5)
tdSql.execute('use db')
tdLog.info("================= step1")
tdLog.info("create 1 super table")
tdSql.execute('create table stb (ts timestamp, i int) \
tags (tin int, tfl float, tbg bigint, tdo double, tbi binary(10), tbl bool)')
tdLog.info("================= step2")
tdLog.info("create %d tables" % self.ntables)
for tid in range(1, self.ntables + 1):
tdSql.execute(
'create table tb%d using stb tags(%d,%f,%ld,%f,\'%s\',%d)' %
(tid,
tid %
3,
1.2 *
tid,
self.startTime +
tid,
1.22 *
tid,
't' +
str(tid),
tid %
2))
tdLog.sleep(5)
tdLog.info("================= step3")
tdLog.info(
"insert %d data in to each %d tables" %
(self.rowsPerTable, self.ntables))
for rid in range(1, self.rowsPerTable + 1):
sqlcmd = ['insert into']
for tid in range(1, self.ntables + 1):
sqlcmd.append(
'tb%d values(%ld,%d)' %
(tid, self.startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
tdSql.query('select count(*) from stb')
tdSql.checkData(0, 0, self.rowsPerTable * self.ntables)
tdLog.info("================= step4")
tdLog.info("drop one tag")
tdSql.execute('alter table stb drop tag tbi')
tdLog.info("insert %d data in to each %d tables" % (2, self.ntables))
for rid in range(self.rowsPerTable + 1, self.rowsPerTable + 3):
sqlcmd = ['insert into']
for tid in range(1, self.ntables + 1):
sqlcmd.append(
'tb%d values(%ld,%d)' %
(tid, self.startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
self.rowsPerTable += 2
tdSql.query('select count(*) from stb')
tdSql.checkData(0, 0, self.rowsPerTable * self.ntables)
tdSql.query('describe tb1')
tdSql.checkRows(2 + 5)
tdLog.info("================= step5")
tdLog.info("add one tag")
tdSql.execute('alter table stb add tag tnc nchar(10)')
for tid in range(1, self.ntables + 1):
tdSql.execute('alter table tb%d set tag tnc=\"%s\"' %
(tid, str(tid * 1.2)))
tdLog.info("insert %d data in to each %d tables" % (2, self.ntables))
for rid in range(self.rowsPerTable + 1, self.rowsPerTable + 3):
sqlcmd = ['insert into']
for tid in range(1, self.ntables + 1):
sqlcmd.append(
'tb%d values(%ld,%d)' %
(tid, self.startTime + rid, rid))
tdSql.execute(" ".join(sqlcmd))
self.rowsPerTable += 2
tdSql.query('select count(*) from stb')
tdSql.checkData(0, 0, self.rowsPerTable * self.ntables)
tdSql.query('describe tb1')
tdSql.checkRows(2 + 6)
tdLog.info("================= step6")
tdLog.info("group and filter by tag1 int")
tdSql.query('select max(i) from stb where tbl=0 group by tin')
tdSql.checkRows(3)
tdSql.execute('reset query cache')
tdSql.query('select max(i) from stb where tbl=true group by tin')
tdSql.checkData(2, 0, self.rowsPerTable)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,270 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def run(self):
tdSql.prepare()
#TSIM: system sh/stop_dnodes.sh
#TSIM: system sh/deploy.sh -n dnode1 -i 1
#TSIM: system sh/exec.sh -n dnode1 -s start
#TSIM:
#TSIM: sleep 3000
#TSIM: sql connect
#TSIM:
#TSIM: print ======================== dnode1 start
tdLog.info('======================== dnode1 start')
#TSIM:
dbPrefix = "ta_fi_db"
tbPrefix = "ta_fi_tb"
mtPrefix = "ta_fi_mt"
#TSIM: $tbNum = 10
rowNum = 20
#TSIM: $totalNum = 200
#TSIM:
#TSIM: print =============== step1
tdLog.info('=============== step1')
i = 0
#TSIM: $db = $dbPrefix . $i
mt = "%s%d" % (mtPrefix, i)
#TSIM:
#TSIM: sql create database $db
#TSIM: sql use $db
#TSIM: sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol binary(10))
tdLog.info("create table %s (ts timestamp, tbcol int) TAGS(tgcol binary(10))" % mt)
tdSql.execute('create table %s (ts timestamp, tbcol int) TAGS(tgcol binary(10))' % mt)
#TSIM:
i = 0
while (i < 5):
tb = "tbPrefix%d" % i
tdLog.info("create table %s using %s tags( '0' )" % (tb, mt))
tdSql.execute("create table %s using %s tags( '0' )" % (tb, mt))
x = 0
while (x < rowNum):
ms = "%dm" % x
tdLog.info("insert into %s values (now + %s , %d)" % (tb, ms, x))
tdSql.execute("insert into %s values (now + %s , %d)" % (tb, ms, x))
x = x + 1
i = i + 1
while (i < 10):
tb = "%s%d" % (tbPrefix , i)
#TSIM: sql create table $tb using $mt tags( '1' )
tdLog.info("create table %s using %s tags( '1' )" % (tb, mt))
tdSql.execute("create table %s using %s tags( '1' )" % (tb, mt))
x = 0
while (x < rowNum):
ms = "%dm" % x
#TSIM: sql insert into $tb values (now + $ms , $x )
tdLog.info("insert into %s values (now + %s, %d )" % (tb, ms, x))
tdSql.execute("insert into %s values (now + %s, %d )" % (tb, ms, x))
x = x + 1
i = i + 1
#TSIM:
#TSIM: print =============== step2
tdLog.info('=============== step2')
#TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tgcol = '1'
tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = '1'" % mt)
tdSql.query("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tgcol = '1'" % mt)
#TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06
tdLog.info("%s %s %s %s %s %s %s" % (tdSql.getData(0, 0), tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4), tdSql.getData(0, 5), tdSql.getData(0, 6)))
#TSIM: if $data00 != 100 then
tdLog.info('tdSql.checkData(0, 0, 100)')
tdSql.checkData(0, 0, 100)
#TSIM: return -1
#TSIM: endi
#TSIM:
#TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tg = '1' -x step2
tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tg = '1' -x step2" % mt)
tdSql.error("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tg = '1'" % mt)
#TSIM: return -1
#TSIM: step2:
#TSIM:
#TSIM: print =============== step3
tdLog.info('=============== step3')
#TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where noexist = '1' -x step3
tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where noexist = '1' -x step3" % mt)
tdSql.error("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where noexist = '1'" % mt)
#TSIM: return -1
#TSIM: step3:
#TSIM:
#TSIM: print =============== step4
tdLog.info('=============== step4')
#TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = '1'
tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tbcol = '1'" % mt)
tdSql.query("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s where tbcol = '1'" % mt)
#TSIM: if $rows != 1 then
tdLog.info('tdSql.checkRow(1)')
tdSql.checkRows(1)
#TSIM: return -1
#TSIM: endi
#TSIM: if $data00 != 10 then
tdLog.info('tdSql.checkData(0, 0, 10)')
tdSql.checkData(0, 0, 10)
#TSIM: return -1
#TSIM: endi
#TSIM:
#TSIM: print =============== step5
tdLog.info('=============== step5')
#TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt
tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s" % mt)
tdSql.query("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s" % mt)
#TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06
tdLog.info("%s %s %s %s %s %s %s" % (tdSql.getData(0,0), tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0, 3), tdSql.getData(0, 4), tdSql.getData(0,5 ), tdSql.getData(0, 6)))
#TSIM: if $data00 != 200 then
tdLog.info('tdSql.checkData(0, 0, 200)')
tdSql.checkData(0, 0, 200)
#TSIM: return -1
#TSIM: endi
#TSIM:
#TSIM: print =============== step6
tdLog.info('=============== step6')
#TSIM: sql select count(tbcol), avg(cc), sum(xx), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt -x step6
tdLog.info("select count(tbcol), avg(cc), sum(xx), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s -x step6" % mt)
tdSql.error("select count(tbcol), avg(cc), sum(xx), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s" % mt)
#TSIM: return -1
#TSIM: step6:
#TSIM:
#TSIM: print =============== step7
tdLog.info('=============== step7')
#TSIM: sql select count(tgcol), avg(tgcol), sum(tgcol), min(tgcol), max(tgcol), first(tgcol), last(tgcol) from $mt -x step7
tdLog.info("select count(tgcol), avg(tgcol), sum(tgcol), min(tgcol), max(tgcol), first(tgcol), last(tgcol) from %s -x step7" % mt)
tdSql.error("select count(tgcol), avg(tgcol), sum(tgcol), min(tgcol), max(tgcol), first(tgcol), last(tgcol) from %s" % mt)
#TSIM: return -1
#TSIM: step7:
#TSIM:
#TSIM: print =============== step8
tdLog.info('=============== step8')
#TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tbcol
tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s by tbcol" % mt)
tdSql.query("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tbcol" % mt)
#TSIM:
#TSIM: print =============== step9
tdLog.info('=============== step9')
#TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by noexist -x step9
tdLog.info("select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by noexist -x step9" % mt)
tdSql.error('select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by noexist ' % mt)
#TSIM: return -1
#TSIM: step9:
#TSIM:
#TSIM: print =============== step10
tdLog.info('=============== step10')
#TSIM: sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt group by tgcol
tdLog.info('select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tgcol' % mt)
tdSql.query('select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from %s group by tgcol' % mt)
#TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06
tdLog.info('$data00 $data01 $data02 $data03 $data04 $data05 $data06')
#TSIM: if $data00 != 100 then
tdLog.info('tdSql.checkData(0, 0, 100)')
tdSql.checkData(0, 0, 100)
#TSIM: return -1
#TSIM: endi
#TSIM:
#TSIM: print =============== step11
tdLog.info('=============== step11')
#TSIM: sql select count(tbcol) as c from $mt group by tbcol
tdLog.info('select count(tbcol) as c from %s group by tbcol' % mt)
tdSql.query('select count(tbcol) as c from %s group by tbcol' % mt)
#TSIM:
#TSIM: print =============== step12
tdLog.info('=============== step12')
#TSIM: sql select count(tbcol) as c from $mt group by noexist -x step12
tdLog.info('select count(tbcol) as c from %s group by noexist -x step12' % mt)
tdSql.error('select count(tbcol) as c from %s group by noexist2' % mt)
#TSIM: return -1
#TSIM: step12:
#TSIM:
#TSIM: print =============== step13
tdLog.info('=============== step13')
#TSIM: sql select count(tbcol) as c from $mt group by tgcol
tdLog.info('select count(tbcol) as c from %s group by tgcol' % mt)
tdSql.query('select count(tbcol) as c from %s group by tgcol' % mt)
#TSIM: print $data00
tdLog.info('$data00')
#TSIM: if $data00 != 100 then
tdLog.info('tdSql.checkData(0, 0, 100)')
tdSql.checkData(0, 0, 100)
#TSIM: return -1
#TSIM: endi
#TSIM:
#TSIM: print =============== step14
tdLog.info('=============== step14')
#TSIM: sql select count(tbcol) as c from $mt where ts > 1000 group by tgcol
tdLog.info('select count(tbcol) as c from %s where ts > 1000 group by tgcol' % mt)
tdSql.query('select count(tbcol) as c from %s where ts > 1000 group by tgcol' % mt)
#TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06
# tdLog.info("%s %s %s %s %s %s %s" % (tdSql.getData(0, 0), tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4), tdSql.getData(0, 5), tdSql.getData(0, 6)))
#TSIM: if $data00 != 100 then
tdLog.info('tdSql.checkData(0, 0, 100)')
tdSql.checkData(0, 0, 100)
#TSIM: print expect 100, actual $data00
tdLog.info('expect 100, actual $data00')
#TSIM: return -1
#TSIM: endi
#TSIM:
#TSIM: print =============== step15
tdLog.info('=============== step15')
#TSIM: sql select count(tbcol) as c from $mt where noexist < 1 group by tgcol -x step15
tdLog.info('select count(tbcol) as c from %s where noexist < 1 group by tgcol -x step15' % mt)
tdSql.error('select count(tbcol) as c from %s where noexist < 1 group by tgcol5' % mt)
#TSIM: return -1
#TSIM: step15:
#TSIM:
#TSIM: print =============== step16
tdLog.info('=============== step16')
#TSIM: sql select count(tbcol) as c from $mt where tgcol = '1' group by tgcol
tdLog.info("select count(tbcol) as c from %s where tgcol = '1' group by tgcol" % mt)
tdSql.query("select count(tbcol) as c from %s where tgcol = '1' group by tgcol" % mt)
#TSIM: print $data00 $data01 $data02 $data03 $data04 $data05 $data06
# tdLog.info("%s %s %s %s %s %s %s" % (tdSql.getData(0, 0), tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4), tdSql.getData(0, 5), tdSql.getData(0, 6)))
#TSIM: if $data00 != 100 then
tdLog.info('tdSql.checkData(0, 0, 100)')
tdSql.checkData(0, 0, 100)
#TSIM: return -1
#TSIM: endi
#TSIM:
#TSIM: print =============== clear
tdLog.info('=============== clear')
#TSIM: sql drop database $db
tdLog.info('drop database db')
tdSql.execute('drop database db')
#TSIM: sql show databases
tdLog.info('show databases')
tdSql.query('show databases')
#TSIM: if $rows != 0 then
tdLog.info('tdSql.checkRow(0)')
tdSql.checkRows(0)
#TSIM: return -1
#TSIM: endi
#TSIM:
#TSIM: system sh/exec.sh -n dnode1 -s stop -x SIGINT
# convert end
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -49,10 +49,10 @@ class TDSql:
callerModule = inspect.getmodule(frame[0]) callerModule = inspect.getmodule(frame[0])
callerFilename = callerModule.__file__ callerFilename = callerModule.__file__
tdLog.exit( tdLog.exit(
"%s failed: sql:%.40s, expect error not occured" % "%s failed: sql:%s, expect error not occured" %
(callerFilename, sql)) (callerFilename, sql))
else: else:
tdLog.info("sql:%.40s, expect error occured" % (sql)) tdLog.info("sql:%s, expect error occured" % (sql))
def query(self, sql): def query(self, sql):
self.sql = sql self.sql = sql
@ -72,9 +72,9 @@ class TDSql:
callerModule = inspect.getmodule(frame[0]) callerModule = inspect.getmodule(frame[0])
callerFilename = callerModule.__file__ callerFilename = callerModule.__file__
tdLog.exit( tdLog.exit(
"%s failed: sql:%.40s, queryRows:%d != expect:%d" % "%s failed: sql:%s, queryRows:%d != expect:%d" %
(callerFilename, self.sql, self.queryRows, expectRows)) (callerFilename, self.sql, self.queryRows, expectRows))
tdLog.info("sql:%.40s, queryRows:%d == expect:%d" % tdLog.info("sql:%s, queryRows:%d == expect:%d" %
(self.sql, self.queryRows, expectRows)) (self.sql, self.queryRows, expectRows))
def checkData(self, row, col, data): def checkData(self, row, col, data):
@ -84,35 +84,35 @@ class TDSql:
if row < 0: if row < 0:
tdLog.exit( tdLog.exit(
"%s failed: sql:%.40s, row:%d is smaller than zero" % "%s failed: sql:%s, row:%d is smaller than zero" %
(callerFilename, self.sql, row)) (callerFilename, self.sql, row))
if col < 0: if col < 0:
tdLog.exit( tdLog.exit(
"%s failed: sql:%.40s, col:%d is smaller than zero" % "%s failed: sql:%s, col:%d is smaller than zero" %
(callerFilename, self.sql, col)) (callerFilename, self.sql, col))
if row >= self.queryRows: if row > self.queryRows:
tdLog.exit( tdLog.exit(
"%s failed: sql:%.40s, row:%d is larger than queryRows:%d" % "%s failed: sql:%s, row:%d is larger than queryRows:%d" %
(callerFilename, self.sql, row, self.queryRows)) (callerFilename, self.sql, row, self.queryRows))
if col >= self.queryCols: if col > self.queryCols:
tdLog.exit( tdLog.exit(
"%s failed: sql:%.40s, col:%d is larger than queryRows:%d" % "%s failed: sql:%s, col:%d is larger than queryCols:%d" %
(callerFilename, self.sql, col, self.queryCols)) (callerFilename, self.sql, col, self.queryCols))
if self.queryResult[row][col] != data: if self.queryResult[row][col] != data:
tdLog.exit("%s failed: sql:%.40s row:%d col:%d data:%s != expect:%s" % ( tdLog.exit("%s failed: sql:%s row:%d col:%d data:%s != expect:%s" % (
callerFilename, self.sql, row, col, self.queryResult[row][col], data)) callerFilename, self.sql, row, col, self.queryResult[row][col], data))
if data is None: if data is None:
tdLog.info("sql:%.40s, row:%d col:%d data:%s == expect:%s" % tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(self.sql, row, col, self.queryResult[row][col], data)) (self.sql, row, col, self.queryResult[row][col], data))
elif isinstance(data, str): elif isinstance(data, str):
tdLog.info("sql:%.40s, row:%d col:%d data:%s == expect:%s" % tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(self.sql, row, col, self.queryResult[row][col], data)) (self.sql, row, col, self.queryResult[row][col], data))
elif isinstance(data, datetime.date): elif isinstance(data, datetime.date):
tdLog.info("sql:%.40s, row:%d col:%d data:%s == expect:%s" % tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(self.sql, row, col, self.queryResult[row][col], data)) (self.sql, row, col, self.queryResult[row][col], data))
else: else:
tdLog.info("sql:%.40s, row:%d col:%d data:%s == expect:%d" % tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
(self.sql, row, col, self.queryResult[row][col], data)) (self.sql, row, col, self.queryResult[row][col], data))
def getData(self, row, col): def getData(self, row, col):
@ -122,19 +122,19 @@ class TDSql:
if row < 0: if row < 0:
tdLog.exit( tdLog.exit(
"%s failed: sql:%.40s, row:%d is smaller than zero" % "%s failed: sql:%s, row:%d is smaller than zero" %
(callerFilename, self.sql, row)) (callerFilename, self.sql, row))
if col < 0: if col < 0:
tdLog.exit( tdLog.exit(
"%s failed: sql:%.40s, col:%d is smaller than zero" % "%s failed: sql:%s, col:%d is smaller than zero" %
(callerFilename, self.sql, col)) (callerFilename, self.sql, col))
if row >= self.queryRows: if row > self.queryRows:
tdLog.exit( tdLog.exit(
"%s failed: sql:%.40s, row:%d is larger than queryRows:%d" % "%s failed: sql:%s, row:%d is larger than queryRows:%d" %
(callerFilename, self.sql, row, self.queryRows)) (callerFilename, self.sql, row, self.queryRows))
if col >= self.queryCols: if col > self.queryCols:
tdLog.exit( tdLog.exit(
"%s failed: sql:%.40s, col:%d is larger than queryRows:%d" % "%s failed: sql:%s, col:%d is larger than queryCols:%d" %
(callerFilename, self.sql, col, self.queryCols)) (callerFilename, self.sql, col, self.queryCols))
return self.queryResult[row][col] return self.queryResult[row][col]
@ -157,9 +157,9 @@ class TDSql:
callerModule = inspect.getmodule(frame[0]) callerModule = inspect.getmodule(frame[0])
callerFilename = callerModule.__file__ callerFilename = callerModule.__file__
tdLog.exit("%s failed: sql:%.40s, affectedRows:%d != expect:%d" % ( tdLog.exit("%s failed: sql:%s, affectedRows:%d != expect:%d" % (
callerFilename, self.sql, self.affectedRows, expectAffectedRows)) callerFilename, self.sql, self.affectedRows, expectAffectedRows))
tdLog.info("sql:%.40s, affectedRows:%d == expect:%d" % tdLog.info("sql:%s, affectedRows:%d == expect:%d" %
(self.sql, self.affectedRows, expectAffectedRows)) (self.sql, self.affectedRows, expectAffectedRows))

View File

@ -1,4 +1,5 @@
#!/bin/bash #!/bin/bash
# insert
python3 ./test.py -g -f insert/basic.py python3 ./test.py -g -f insert/basic.py
python3 ./test.py -g -s && sleep 1 python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/int.py python3 ./test.py -g -f insert/int.py
@ -24,6 +25,7 @@ python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/multi.py python3 ./test.py -g -f insert/multi.py
python3 ./test.py -g -s && sleep 1 python3 ./test.py -g -s && sleep 1
# table
python3 ./test.py -g -f table/column_name.py python3 ./test.py -g -f table/column_name.py
python3 ./test.py -g -s && sleep 1 python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f table/column_num.py python3 ./test.py -g -f table/column_num.py
@ -31,5 +33,10 @@ python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f table/db_table.py python3 ./test.py -g -f table/db_table.py
python3 ./test.py -g -s && sleep 1 python3 ./test.py -g -s && sleep 1
# import
python3 ./test.py -g -f import_merge/importDataLastSub.py python3 ./test.py -g -f import_merge/importDataLastSub.py
python3 ./test.py -g -s && sleep 1 python3 ./test.py -g -s && sleep 1
#tag
python3 ./test.py $1 -f tag_lite/filter.py
python3 ./test.py $1 -s && sleep 1

View File

@ -43,9 +43,6 @@ endi
if $data01 != 4 then if $data01 != 4 then
return -1 return -1
endi endi
if $data02 != ready then
return -1
endi
print =============== drop table print =============== drop table
sql drop table d1.t1 sql drop table d1.t1
@ -82,9 +79,6 @@ endi
if $data01 != 3 then if $data01 != 3 then
return -1 return -1
endi endi
if $data02 != ready then
return -1
endi
print =============== drop all table print =============== drop all table
sql drop table d1.t2 sql drop table d1.t2

View File

@ -43,9 +43,6 @@ endi
if $data01 != 4 then if $data01 != 4 then
return -1 return -1
endi endi
if $data02 != ready then
return -1
endi
print =============== drop database print =============== drop database
sql drop database d1 sql drop database d1

View File

@ -34,13 +34,12 @@ if $rows != 0 then
endi endi
print =============== step2 print =============== step2
sql create table $tb (ts timestamp, speed bigint, v1 binary(1500), v2 binary(1500), v3 binary(1500), v4 binary(500), v5 binary(500)) -x step2 sql create table $tb (ts timestamp, speed bigint, v1 binary(1500), v2 binary(1500), v3 binary(1500), v4 binary(500), v5 binary(500))
return -1
step2:
sql show tables sql show tables
if $rows != 0 then if $rows != 1 then
return -1 return -1
endi endi
sql drop table $tb
print =============== step3 print =============== step3
sql create table $tb (ts timestamp, speed float, v1 binary(100), v2 binary(100), v3 binary(100), v4 binary(100), v5 binary(100)) sql create table $tb (ts timestamp, speed float, v1 binary(100), v2 binary(100), v3 binary(100), v4 binary(100), v5 binary(100))

View File

@ -96,10 +96,10 @@ echo "second ${HOSTNAME}:7200" >> $TAOS_CFG
echo "serverPort ${NODE}" >> $TAOS_CFG echo "serverPort ${NODE}" >> $TAOS_CFG
echo "dataDir $DATA_DIR" >> $TAOS_CFG echo "dataDir $DATA_DIR" >> $TAOS_CFG
echo "logDir $LOG_DIR" >> $TAOS_CFG echo "logDir $LOG_DIR" >> $TAOS_CFG
echo "dDebugFlag 199" >> $TAOS_CFG echo "dDebugFlag 135" >> $TAOS_CFG
echo "mDebugFlag 199" >> $TAOS_CFG echo "mDebugFlag 135" >> $TAOS_CFG
echo "sdbDebugFlag 199" >> $TAOS_CFG echo "sdbDebugFlag 135" >> $TAOS_CFG
echo "rpcDebugFlag 151" >> $TAOS_CFG echo "rpcDebugFlag 135" >> $TAOS_CFG
echo "tmrDebugFlag 131" >> $TAOS_CFG echo "tmrDebugFlag 131" >> $TAOS_CFG
echo "cDebugFlag 135" >> $TAOS_CFG echo "cDebugFlag 135" >> $TAOS_CFG
echo "httpDebugFlag 135" >> $TAOS_CFG echo "httpDebugFlag 135" >> $TAOS_CFG

View File

@ -1,19 +1,18 @@
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
$totalVnodes = 100 $totalVnodes = 100
$minVnodes = 48 $minVnodes = 50
$maxVnodes = 52 $maxVnodes = 50
$maxTables = 4 $maxTables = 4
$totalRows = $totalVnodes * $maxTables $totalRows = $totalVnodes * $maxTables
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0 system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $maxTables
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v $totalVnodes system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v $totalVnodes
system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 100000 system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 100000 system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c maxShellConns -v 100000 system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v $totalVnodes
system sh/cfg.sh -n dnode1 -c maxMgmtConnections -v 100000
print ========== prepare data print ========== prepare data
system sh/exec_up.sh -n dnode1 -s start system sh/exec_up.sh -n dnode1 -s start
@ -44,16 +43,7 @@ if $data00 != $totalRows then
return -1 return -1
endi endi
system sh/deploy.sh -n dnode2 -i 2 print ========== step3
system sh/cfg.sh -n dnode2 -c walLevel -v 0
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 100
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 256
system sh/cfg.sh -n dnode2 -c maxVnodeConnections -v 100000
system sh/cfg.sh -n dnode2 -c maxMeterConnections -v 100000
system sh/cfg.sh -n dnode2 -c maxShellConns -v 100000
system sh/cfg.sh -n dnode2 -c maxMgmtConnections -v 100000
print ========== step2
sql create dnode $hostname2 sql create dnode $hostname2
system sh/exec_up.sh -n dnode2 -s start system sh/exec_up.sh -n dnode2 -s start
@ -86,6 +76,8 @@ if $data00 != $totalRows then
return -1 return -1
endi endi
return
system sh/exec_up.sh -n dnode1 -s stop -x SIGINT system sh/exec_up.sh -n dnode1 -s stop -x SIGINT
system sh/exec_up.sh -n dnode2 -s stop -x SIGINT system sh/exec_up.sh -n dnode2 -s stop -x SIGINT
system sh/exec_up.sh -n dnode3 -s stop -x SIGINT system sh/exec_up.sh -n dnode3 -s stop -x SIGINT