Merge branch 'feature/pyconn' of https://github.com/taosdata/TDengine into feature/pyconn
This commit is contained in:
commit
d7eafe9c32
|
@ -63,7 +63,7 @@ matrix:
|
|||
pkill -TERM -x taosd
|
||||
fuser -k -n tcp 6030
|
||||
sleep 1
|
||||
./crash_gen.sh -a -p -t 4 -s 25|| travis_terminate $?
|
||||
./crash_gen.sh -a -p -t 4 -s 2000|| travis_terminate $?
|
||||
sleep 1
|
||||
|
||||
cd ${TRAVIS_BUILD_DIR}/tests/pytest
|
||||
|
|
|
@ -15,28 +15,15 @@
|
|||
# arbitrator arbitrator_hostname:6030
|
||||
|
||||
# the full-qualified domain name (FQDN) of dnode
|
||||
# fqdn hostname:6030
|
||||
# fqdn hostname
|
||||
|
||||
# port for MNode connect to Client, default udp[6030-6055] tcp[6030]
|
||||
# port for MNode connect to Client, default udp/tcp [6030-6040]
|
||||
# serverPort 6030
|
||||
|
||||
# http service port, default tcp[6020]
|
||||
# http service port, default tcp [6020]
|
||||
# httpPort 6020
|
||||
|
||||
# set socket type ("udp" and "tcp")
|
||||
# the server and client should have the same socket type. Otherwise, connect will fail
|
||||
# sockettype udp
|
||||
|
||||
# for the cluster version, data file's directory is configured this way
|
||||
# option mount_path tier_level
|
||||
# dataDir /mnt/disk1/taos 0
|
||||
# dataDir /mnt/disk2/taos 0
|
||||
# dataDir /mnt/disk3/taos 0
|
||||
# dataDir /mnt/disk4/taos 0
|
||||
# dataDir /mnt/disk5/taos 0
|
||||
# dataDir /mnt/disk6/taos 1
|
||||
# dataDir /mnt/disk7/taos 1
|
||||
# for the stand-alone version, data file's directory is configured this way
|
||||
# data file's directory
|
||||
# dataDir /var/lib/taos
|
||||
|
||||
# log file's directory
|
||||
|
@ -46,12 +33,12 @@
|
|||
# numOfMnodes 3
|
||||
|
||||
# optional roles for dnode. 0 - any, 1 - mnode, 2 - dnode
|
||||
# alternativeRole 0
|
||||
# role 0
|
||||
|
||||
# number of threads per CPU core
|
||||
# numOfThreadsPerCore 1.0
|
||||
|
||||
# number of vgroups per db
|
||||
# max number of vgroups per db
|
||||
# maxVgroupsPerDb 0
|
||||
|
||||
# max number of tables per vnode
|
||||
|
@ -60,9 +47,6 @@
|
|||
# the ratio of threads responsible for querying in the total thread
|
||||
# ratioOfQueryThreads 0.5
|
||||
|
||||
# interval of check load balance when the management node is in normal operation
|
||||
# balanceInterval 300
|
||||
|
||||
# interval of DNode report status to MNode, unit is Second, for cluster version only
|
||||
# statusInterval 1
|
||||
|
||||
|
@ -88,7 +72,7 @@
|
|||
# minIntervalTime 10
|
||||
|
||||
# max length of an SQL
|
||||
# maxSQLLength 65380
|
||||
# maxSQLLength 65480
|
||||
|
||||
# Support the maximum number of records allowed for super table time sorting
|
||||
# maxNumOfOrderedRes 100000
|
||||
|
@ -153,11 +137,8 @@
|
|||
# Stop writing data when the disk size of the log folder is less than this value
|
||||
# minimalDataDirGB 0.1
|
||||
|
||||
# mnode take into account while balance, for cluster version only
|
||||
# mnodeEqualVnodeNum 4
|
||||
|
||||
# number of seconds allowed for a dnode to be offline, for cluster version only
|
||||
# offlineThreshold 864000
|
||||
# offlineThreshold 8640000
|
||||
|
||||
# start http service
|
||||
# http 1
|
||||
|
@ -165,7 +146,7 @@
|
|||
# start system monitor module
|
||||
# monitor 1
|
||||
|
||||
# start http service
|
||||
# start muqq service
|
||||
# mqtt 0
|
||||
|
||||
# mqtt uri
|
||||
|
@ -201,19 +182,22 @@
|
|||
# 131: output warning and error,135: output info, warning and error to log.
|
||||
# 199: output debug, info, warning and error to both screen and file
|
||||
|
||||
# debug flag for basic utils
|
||||
# debugFlag 131
|
||||
# debug flag for all log type, take effect when non-zero value
|
||||
# debugFlag 0
|
||||
|
||||
# debug flag for meta management messages
|
||||
# mDebugFlag 135
|
||||
|
||||
# debug flag for dnode messages
|
||||
# dDebugFlag 131
|
||||
# dDebugFlag 135
|
||||
|
||||
# debug flag for TDengine SDB
|
||||
# debug flag for sync module
|
||||
# sDebugFlag 135
|
||||
|
||||
# debug flag for TDengine SDB
|
||||
# debug flag for WAL
|
||||
# wDebugFlag 135
|
||||
|
||||
# debug flag for SDB
|
||||
# sdbDebugFlag 135
|
||||
|
||||
# debug flag for RPC
|
||||
|
@ -246,6 +230,9 @@
|
|||
# debug flag for query
|
||||
# qDebugflag 131
|
||||
|
||||
# debug flag for vnode
|
||||
# vDebugflag 131
|
||||
|
||||
# debug flag for http server
|
||||
# tsdbDebugFlag 131
|
||||
|
||||
|
|
|
@ -358,7 +358,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
case TSDB_SQL_CFG_DNODE: {
|
||||
const char* msg2 = "invalid configure options or values";
|
||||
const char* msg2 = "invalid configure options or values, such as resetlog / debugFlag 135 / balance 'vnode:2-dnode:2' / monitor 1 ";
|
||||
const char* msg3 = "invalid dnode ep";
|
||||
|
||||
/* validate the ip address */
|
||||
|
@ -4674,26 +4674,42 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
|
|||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
const int DNODE_DYNAMIC_CFG_OPTIONS_SIZE = 19;
|
||||
const SDNodeDynConfOption DNODE_DYNAMIC_CFG_OPTIONS[] = {
|
||||
{"resetLog", 8}, {"resetQueryCache", 15}, {"debugFlag", 9}, {"mDebugFlag", 10},
|
||||
{"dDebugFlag", 10}, {"sdbDebugFlag", 12}, {"vDebugFlag", 10}, {"cDebugFlag", 10},
|
||||
{"httpDebugFlag", 13}, {"monitorDebugFlag", 16}, {"rpcDebugFlag", 12}, {"uDebugFlag", 10},
|
||||
{"tmrDebugFlag", 12}, {"qDebugflag", 10}, {"sDebugflag", 10}, {"tsdbDebugFlag", 13},
|
||||
{"mqttDebugFlag", 13}, {"wDebugFlag", 10}, {"monitor", 7}};
|
||||
const int tokenLogEnd = 2;
|
||||
const int tokenBalance = 2;
|
||||
const int tokenMonitor = 3;
|
||||
const int tokenDebugFlag = 4;
|
||||
const int tokenDebugFlagEnd = 20;
|
||||
const SDNodeDynConfOption cfgOptions[] = {
|
||||
{"resetLog", 8}, {"resetQueryCache", 15}, {"balance", 7}, {"monitor", 7},
|
||||
{"debugFlag", 9}, {"monitorDebugFlag", 16}, {"vDebugFlag", 10}, {"mDebugFlag", 10},
|
||||
{"cDebugFlag", 10}, {"httpDebugFlag", 13}, {"qDebugflag", 10}, {"sdbDebugFlag", 12},
|
||||
{"uDebugFlag", 10}, {"tsdbDebugFlag", 13}, {"sDebugflag", 10}, {"rpcDebugFlag", 12},
|
||||
{"dDebugFlag", 10}, {"mqttDebugFlag", 13}, {"wDebugFlag", 10}, {"tmrDebugFlag", 12},
|
||||
};
|
||||
|
||||
SSQLToken* pOptionToken = &pOptions->a[1];
|
||||
|
||||
if (pOptions->nTokens == 2) {
|
||||
// reset log and reset query cache does not need value
|
||||
for (int32_t i = 0; i < 2; ++i) {
|
||||
const SDNodeDynConfOption* pOption = &DNODE_DYNAMIC_CFG_OPTIONS[i];
|
||||
for (int32_t i = 0; i < tokenLogEnd; ++i) {
|
||||
const SDNodeDynConfOption* pOption = &cfgOptions[i];
|
||||
if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
} else if ((strncasecmp(DNODE_DYNAMIC_CFG_OPTIONS[DNODE_DYNAMIC_CFG_OPTIONS_SIZE - 1].name, pOptionToken->z, pOptionToken->n) == 0) &&
|
||||
(DNODE_DYNAMIC_CFG_OPTIONS[DNODE_DYNAMIC_CFG_OPTIONS_SIZE - 1].len == pOptionToken->n)) {
|
||||
} else if ((strncasecmp(cfgOptions[tokenBalance].name, pOptionToken->z, pOptionToken->n) == 0) &&
|
||||
(cfgOptions[tokenBalance].len == pOptionToken->n)) {
|
||||
SSQLToken* pValToken = &pOptions->a[2];
|
||||
int32_t vnodeId = 0;
|
||||
int32_t dnodeId = 0;
|
||||
strdequote(pValToken->z);
|
||||
bool parseOk = taosCheckBalanceCfgOptions(pValToken->z, &vnodeId, &dnodeId);
|
||||
if (!parseOk) {
|
||||
return TSDB_CODE_TSC_INVALID_SQL; // options value is invalid
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if ((strncasecmp(cfgOptions[tokenMonitor].name, pOptionToken->z, pOptionToken->n) == 0) &&
|
||||
(cfgOptions[tokenMonitor].len == pOptionToken->n)) {
|
||||
SSQLToken* pValToken = &pOptions->a[2];
|
||||
int32_t val = strtol(pValToken->z, NULL, 10);
|
||||
if (val != 0 && val != 1) {
|
||||
|
@ -4709,8 +4725,8 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
|
|||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
for (int32_t i = 2; i < DNODE_DYNAMIC_CFG_OPTIONS_SIZE - 1; ++i) {
|
||||
const SDNodeDynConfOption* pOption = &DNODE_DYNAMIC_CFG_OPTIONS[i];
|
||||
for (int32_t i = tokenDebugFlag; i < tokenDebugFlagEnd; ++i) {
|
||||
const SDNodeDynConfOption* pOption = &cfgOptions[i];
|
||||
|
||||
if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) {
|
||||
/* options is valid */
|
||||
|
|
|
@ -106,13 +106,14 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) {
|
|||
SCMCorVgroupInfo *pVgroupInfo = &pTableMetaInfo->pTableMeta->corVgroupInfo;
|
||||
|
||||
taosCorBeginWrite(&pVgroupInfo->version);
|
||||
//TODO(dengyihao), dont care vgid
|
||||
tscDebug("before: Endpoint in use: %d", pVgroupInfo->inUse);
|
||||
pVgroupInfo->inUse = pEpSet->inUse;
|
||||
pVgroupInfo->numOfEps = pEpSet->numOfEps;
|
||||
for (int32_t i = 0; pVgroupInfo->numOfEps; i++) {
|
||||
for (int32_t i = 0; i < pVgroupInfo->numOfEps; i++) {
|
||||
strncpy(pVgroupInfo->epAddr[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN);
|
||||
pVgroupInfo->epAddr[i].port = pEpSet->port[i];
|
||||
}
|
||||
tscDebug("after: EndPoint in use: %d", pVgroupInfo->inUse);
|
||||
taosCorEndWrite(&pVgroupInfo->version);
|
||||
}
|
||||
void tscPrintMgmtEp() {
|
||||
|
@ -283,9 +284,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
|||
}
|
||||
|
||||
if (pEpSet) {
|
||||
//SRpcEpSet dump;
|
||||
tscEpSetHtons(pEpSet);
|
||||
if (tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
|
||||
if (!tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
|
||||
if(pCmd->command < TSDB_SQL_MGMT) {
|
||||
tscUpdateVgroupInfo(pSql, pEpSet);
|
||||
} else {
|
||||
|
|
|
@ -256,11 +256,12 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
|
|||
}
|
||||
size_t numOfTables = taosArrayGetSize(tables);
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
SArray* progress = taosArrayInit(numOfTables, sizeof(SSubscriptionProgress));
|
||||
for( size_t i = 0; i < numOfTables; i++ ) {
|
||||
STidTags* tt = taosArrayGet( tables, i );
|
||||
SSubscriptionProgress p = { .uid = tt->uid };
|
||||
p.key = tscGetSubscriptionProgress(pSub, tt->uid, INT64_MIN);
|
||||
p.key = tscGetSubscriptionProgress(pSub, tt->uid, pQueryInfo->window.skey);
|
||||
taosArrayPush(progress, &p);
|
||||
}
|
||||
taosArraySort(progress, tscCompareSubscriptionProgress);
|
||||
|
|
|
@ -174,6 +174,7 @@ bool taosCheckGlobalCfg();
|
|||
void taosSetAllDebugFlag();
|
||||
bool taosCfgDynamicOptions(char *msg);
|
||||
int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port);
|
||||
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -318,7 +318,7 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
|
|||
pCols->maxPoints = maxRows;
|
||||
pCols->bufSize = maxRowSize * maxRows;
|
||||
|
||||
pCols->buf = calloc(1, pCols->bufSize);
|
||||
pCols->buf = malloc(pCols->bufSize);
|
||||
if (pCols->buf == NULL) {
|
||||
free(pCols);
|
||||
return NULL;
|
||||
|
|
|
@ -198,6 +198,7 @@ int32_t tsdbDebugFlag = 131;
|
|||
|
||||
int32_t (*monitorStartSystemFp)() = NULL;
|
||||
void (*monitorStopSystemFp)() = NULL;
|
||||
void (*monitorExecuteSQLFp)(char *sql) = NULL;
|
||||
|
||||
static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT;
|
||||
|
||||
|
@ -252,11 +253,15 @@ bool taosCfgDynamicOptions(char *msg) {
|
|||
if (monitorStartSystemFp) {
|
||||
(*monitorStartSystemFp)();
|
||||
uInfo("monitor is enabled");
|
||||
} else {
|
||||
uError("monitor can't be updated, for monitor not initialized");
|
||||
}
|
||||
} else {
|
||||
if (monitorStopSystemFp) {
|
||||
(*monitorStopSystemFp)();
|
||||
uInfo("monitor is disabled");
|
||||
} else {
|
||||
uError("monitor can't be updated, for monitor not initialized");
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
@ -276,7 +281,12 @@ bool taosCfgDynamicOptions(char *msg) {
|
|||
}
|
||||
|
||||
if (strncasecmp(option, "resetQueryCache", 15) == 0) {
|
||||
uError("reset query cache can't be executed, for monitor not initialized");
|
||||
if (monitorExecuteSQLFp) {
|
||||
(*monitorExecuteSQLFp)("resetQueryCache");
|
||||
uInfo("resetquerycache is executed");
|
||||
} else {
|
||||
uError("resetquerycache can't be executed, for monitor not started");
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -1300,3 +1310,32 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) {
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* alter dnode 1 balance "vnode:1-dnode:2"
|
||||
*/
|
||||
|
||||
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId) {
|
||||
int len = strlen(option);
|
||||
if (strncasecmp(option, "vnode:", 6) != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int pos = 0;
|
||||
for (; pos < len; ++pos) {
|
||||
if (option[pos] == '-') break;
|
||||
}
|
||||
|
||||
if (++pos >= len) return false;
|
||||
if (strncasecmp(option + pos, "dnode:", 6) != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
*vnodeId = strtol(option + 6, NULL, 10);
|
||||
*dnodeId = strtol(option + pos + 6, NULL, 10);
|
||||
if (*vnodeId <= 1 || *dnodeId <= 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
|
@ -1,6 +1,8 @@
|
|||
package com.taosdata.jdbc.utils;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
|
@ -31,6 +33,10 @@ public class TDNode {
|
|||
this.testCluster = testCluster;
|
||||
}
|
||||
|
||||
public void setRunning(int running) {
|
||||
this.running = running;
|
||||
}
|
||||
|
||||
public void searchTaosd(File dir, ArrayList<String> taosdPath) {
|
||||
File[] fileList = dir.listFiles();
|
||||
|
||||
|
@ -102,15 +108,46 @@ public class TDNode {
|
|||
this.running = 1;
|
||||
}
|
||||
|
||||
public Integer getTaosdPid() {
|
||||
String cmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'";
|
||||
String[] cmds = {"sh", "-c", cmd};
|
||||
try {
|
||||
Process process = Runtime.getRuntime().exec(cmds);
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
|
||||
String line = null;
|
||||
Integer res = null;
|
||||
while((line = reader.readLine()) != null) {
|
||||
if(!line.isEmpty()) {
|
||||
res = Integer.valueOf(line);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
String toBeKilled = "taosd";
|
||||
|
||||
if (this.running != 0) {
|
||||
String killCmd = "pkill -kill -x " + toBeKilled;
|
||||
Integer pid = null;
|
||||
while((pid = getTaosdPid()) != null) {
|
||||
|
||||
String killCmd = "kill -term " + pid;
|
||||
String[] killCmds = {"sh", "-c", killCmd};
|
||||
try {
|
||||
Runtime.getRuntime().exec(killCmds).waitFor();
|
||||
|
||||
TimeUnit.SECONDS.sleep(2);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
for(int port = 6030; port < 6041; port ++) {
|
||||
String fuserCmd = "fuser -k -n tcp " + port;
|
||||
Runtime.getRuntime().exec(fuserCmd).waitFor();
|
||||
|
@ -120,7 +157,7 @@ public class TDNode {
|
|||
}
|
||||
|
||||
this.running = 0;
|
||||
System.out.println("dnode:" + this.index + " is stopped by pkill");
|
||||
System.out.println("dnode:" + this.index + " is stopped by kill -term");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,33 +14,6 @@ public class TDNodes {
|
|||
}
|
||||
}
|
||||
|
||||
public void setPath(String path) {
|
||||
try {
|
||||
String killCmd = "pkill -kill -x taosd";
|
||||
String[] killCmds = {"sh", "-c", killCmd};
|
||||
Runtime.getRuntime().exec(killCmds).waitFor();
|
||||
|
||||
String binPath = System.getProperty("user.dir");
|
||||
binPath += "/../../../debug";
|
||||
System.out.println("binPath: " + binPath);
|
||||
|
||||
File file = new File(path);
|
||||
binPath = file.getCanonicalPath();
|
||||
System.out.println("binPath real path: " + binPath);
|
||||
|
||||
if(path.isEmpty()){
|
||||
file = new File(path + "/../../");
|
||||
path = file.getCanonicalPath();
|
||||
}
|
||||
|
||||
for(int i = 0; i < tdNodes.size(); i++) {
|
||||
tdNodes.get(i).setPath(path);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public void setTestCluster(boolean testCluster) {
|
||||
this.testCluster = testCluster;
|
||||
}
|
||||
|
@ -71,6 +44,11 @@ public class TDNodes {
|
|||
tdNodes.get(index - 1).setCfgConfig(option, value);
|
||||
}
|
||||
|
||||
public TDNode getTDNode(int index) {
|
||||
check(index);
|
||||
return tdNodes.get(index - 1);
|
||||
}
|
||||
|
||||
public void start(int index) {
|
||||
check(index);
|
||||
tdNodes.get(index - 1).start();
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
package com.taosdata.jdbc;
|
||||
|
||||
import java.io.File;
|
||||
import com.taosdata.jdbc.utils.TDNodes;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
|
@ -9,30 +8,29 @@ import org.junit.BeforeClass;
|
|||
public class BaseTest {
|
||||
|
||||
private static boolean testCluster = false;
|
||||
private static String deployPath = System.getProperty("user.dir");
|
||||
private static TDNodes tdNodes = new TDNodes();
|
||||
private static TDNodes nodes = new TDNodes();
|
||||
|
||||
|
||||
@BeforeClass
|
||||
public static void setupEnv() {
|
||||
try{
|
||||
File file = new File(deployPath + "/../../../");
|
||||
String rootPath = file.getCanonicalPath();
|
||||
if (nodes.getTDNode(1).getTaosdPid() != null) {
|
||||
System.out.println("Kill taosd before running JDBC test");
|
||||
nodes.getTDNode(1).setRunning(1);
|
||||
nodes.stop(1);
|
||||
}
|
||||
|
||||
tdNodes.setPath(rootPath);
|
||||
tdNodes.setTestCluster(testCluster);
|
||||
|
||||
tdNodes.deploy(1);
|
||||
tdNodes.start(1);
|
||||
nodes.setTestCluster(testCluster);
|
||||
nodes.deploy(1);
|
||||
nodes.start(1);
|
||||
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
System.out.println("Base Test Exception");
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void cleanUpEnv() {
|
||||
tdNodes.stop(1);
|
||||
nodes.stop(1);
|
||||
}
|
||||
}
|
|
@ -492,6 +492,7 @@ static void dnodeProcessStatusRsp(SRpcMsg *pMsg) {
|
|||
pCfg->numOfVnodes = htonl(pCfg->numOfVnodes);
|
||||
pCfg->moduleStatus = htonl(pCfg->moduleStatus);
|
||||
pCfg->dnodeId = htonl(pCfg->dnodeId);
|
||||
pCfg->clusterId = htonl(pCfg->clusterId);
|
||||
|
||||
for (int32_t i = 0; i < pMnodes->nodeNum; ++i) {
|
||||
SDMMnodeInfo *pMnodeInfo = &pMnodes->nodeInfos[i];
|
||||
|
@ -697,6 +698,7 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
|
|||
//strcpy(pStatus->dnodeName, tsDnodeName);
|
||||
pStatus->version = htonl(tsVersion);
|
||||
pStatus->dnodeId = htonl(tsDnodeCfg.dnodeId);
|
||||
pStatus->clusterId = htonl(tsDnodeCfg.clusterId);
|
||||
strcpy(pStatus->dnodeEp, tsLocalEp);
|
||||
pStatus->lastReboot = htonl(tsRebootTime);
|
||||
pStatus->numOfCores = htons((uint16_t) tsNumOfCores);
|
||||
|
@ -767,6 +769,13 @@ static bool dnodeReadDnodeCfg() {
|
|||
}
|
||||
tsDnodeCfg.dnodeId = dnodeId->valueint;
|
||||
|
||||
cJSON* clusterId = cJSON_GetObjectItem(root, "clusterId");
|
||||
if (!clusterId || clusterId->type != cJSON_Number) {
|
||||
dError("failed to read dnodeCfg.json, clusterId not found");
|
||||
goto PARSE_CFG_OVER;
|
||||
}
|
||||
tsDnodeCfg.clusterId = clusterId->valueint;
|
||||
|
||||
ret = true;
|
||||
|
||||
dInfo("read numOfVnodes successed, dnodeId:%d", tsDnodeCfg.dnodeId);
|
||||
|
@ -790,7 +799,8 @@ static void dnodeSaveDnodeCfg() {
|
|||
char * content = calloc(1, maxLen + 1);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, "{\n");
|
||||
len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d\n", tsDnodeCfg.dnodeId);
|
||||
len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d,\n", tsDnodeCfg.dnodeId);
|
||||
len += snprintf(content + len, maxLen - len, " \"clusterId\": %d\n", tsDnodeCfg.clusterId);
|
||||
len += snprintf(content + len, maxLen - len, "}\n");
|
||||
|
||||
fwrite(content, 1, len, fp);
|
||||
|
@ -803,8 +813,9 @@ static void dnodeSaveDnodeCfg() {
|
|||
|
||||
void dnodeUpdateDnodeCfg(SDMDnodeCfg *pCfg) {
|
||||
if (tsDnodeCfg.dnodeId == 0) {
|
||||
dInfo("dnodeId is set to %d", pCfg->dnodeId);
|
||||
dInfo("dnodeId is set to %d, clusterId is set to %d", pCfg->dnodeId, pCfg->clusterId);
|
||||
tsDnodeCfg.dnodeId = pCfg->dnodeId;
|
||||
tsDnodeCfg.clusterId = pCfg->clusterId;
|
||||
dnodeSaveDnodeCfg();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -377,6 +377,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
|||
#define TSDB_ORDER_ASC 1
|
||||
#define TSDB_ORDER_DESC 2
|
||||
|
||||
#define TSDB_DEFAULT_CLUSTER_HASH_SIZE 1
|
||||
#define TSDB_DEFAULT_MNODES_HASH_SIZE 5
|
||||
#define TSDB_DEFAULT_DNODES_HASH_SIZE 10
|
||||
#define TSDB_DEFAULT_ACCOUNTS_HASH_SIZE 10
|
||||
|
|
|
@ -120,12 +120,18 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_OBJ_NOT_THERE, 0, 0x0323, "sdb object
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_META_ROW, 0, 0x0324, "sdb invalid meta row")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_KEY_TYPE, 0, 0x0325, "sdb invalid key type")
|
||||
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ALREADY_EXIST, 0, 0x0330, "mnode dnode already exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_EXIST, 0, 0x0331, "mnode dnode not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_EXIST, 0, 0x0332, "mnode vgroup not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_REMOVE_MASTER, 0, 0x0333, "mnode cant not remove master")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_DNODES, 0, 0x0334, "mnode no enough dnodes")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT, 0, 0x0335, "mnode cluster cfg inconsistent")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ALREADY_EXIST, 0, 0x0330, "dnode already exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_EXIST, 0, 0x0331, "dnode not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_EXIST, 0, 0x0332, "vgroup not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_REMOVE_MASTER, 0, 0x0333, "cant not remove master")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_DNODES, 0, 0x0334, "no enough dnodes")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT, 0, 0x0335, "cluster cfg inconsistent")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION, 0, 0x0336, "invalid dnode cfg option")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_BALANCE_ENABLED, 0, 0x0337, "balance already enabled")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_IN_DNODE, 0, 0x0338, "vgroup not in dnode")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE, 0, 0x0339, "vgroup already in dnode")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_FREE, 0, 0x033A, "dnode not avaliable")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CLUSTER_ID, 0, 0x033B, "cluster id not match")
|
||||
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_ALREADY_EXIST, 0, 0x0340, "mnode accounts already exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT, 0, 0x0341, "mnode invalid account")
|
||||
|
@ -227,6 +233,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_CPU_LIMITED, 0, 0x080B, "grant cpu
|
|||
|
||||
// sync
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_CONFIG, 0, 0x0900, "sync invalid configuration")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_NOT_ENABLED, 0, 0x0901, "sync module not enabled")
|
||||
|
||||
// wal
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, 0, 0x1000, "wal app error")
|
||||
|
|
|
@ -139,6 +139,7 @@ enum _mgmt_table {
|
|||
TSDB_MGMT_TABLE_GRANTS,
|
||||
TSDB_MGMT_TABLE_VNODES,
|
||||
TSDB_MGMT_TABLE_STREAMTABLES,
|
||||
TSDB_MGMT_TABLE_CLUSTER,
|
||||
TSDB_MGMT_TABLE_MAX,
|
||||
};
|
||||
|
||||
|
@ -149,6 +150,7 @@ enum _mgmt_table {
|
|||
|
||||
#define TSDB_ALTER_TABLE_ADD_COLUMN 5
|
||||
#define TSDB_ALTER_TABLE_DROP_COLUMN 6
|
||||
#define TSDB_ALTER_TABLE_CHANGE_COLUMN 7
|
||||
|
||||
#define TSDB_FILL_NONE 0
|
||||
#define TSDB_FILL_NULL 1
|
||||
|
@ -545,6 +547,7 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
int32_t clusterId;
|
||||
uint32_t moduleStatus;
|
||||
uint32_t numOfVnodes;
|
||||
uint32_t reserved;
|
||||
|
@ -585,6 +588,7 @@ typedef struct {
|
|||
uint16_t openVnodes;
|
||||
uint16_t numOfCores;
|
||||
float diskAvailable; // GB
|
||||
int32_t clusterId;
|
||||
uint8_t alternativeRole;
|
||||
uint8_t reserve2[15];
|
||||
SClusterCfg clusterCfg;
|
||||
|
|
|
@ -29,6 +29,7 @@ void balanceAsyncNotify();
|
|||
void balanceSyncNotify();
|
||||
void balanceReset();
|
||||
int32_t balanceAllocVnodes(struct SVgObj *pVgroup);
|
||||
int32_t balanceAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId);
|
||||
int32_t balanceDropDnode(struct SDnodeObj *pDnode);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -53,14 +53,12 @@ typedef struct {
|
|||
int32_t tsdbId;
|
||||
int32_t cacheBlockSize;
|
||||
int32_t totalBlocks;
|
||||
int32_t maxTables; // maximum number of tables this repository can have
|
||||
int32_t daysPerFile; // day per file sharding policy
|
||||
int32_t keep; // day of data to keep
|
||||
int32_t keep1;
|
||||
int32_t keep2;
|
||||
int32_t minRowsPerFileBlock; // minimum rows per file block
|
||||
int32_t maxRowsPerFileBlock; // maximum rows per file block
|
||||
int32_t commitTime;
|
||||
int8_t precision;
|
||||
int8_t compression;
|
||||
} STsdbCfg;
|
||||
|
|
|
@ -37,14 +37,12 @@ static int32_t saveVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
|
|||
len += snprintf(content + len, maxLen - len, " \"cfgVersion\": %d,\n", pVnode->cfgVersion);
|
||||
len += snprintf(content + len, maxLen - len, " \"cacheBlockSize\": %d,\n", pVnode->tsdbCfg.cacheBlockSize);
|
||||
len += snprintf(content + len, maxLen - len, " \"totalBlocks\": %d,\n", pVnode->tsdbCfg.totalBlocks);
|
||||
len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnode->tsdbCfg.maxTables);
|
||||
len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pVnode->tsdbCfg.daysPerFile);
|
||||
len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pVnode->tsdbCfg.keep);
|
||||
len += snprintf(content + len, maxLen - len, " \"daysToKeep1\": %d,\n", pVnode->tsdbCfg.keep1);
|
||||
len += snprintf(content + len, maxLen - len, " \"daysToKeep2\": %d,\n", pVnode->tsdbCfg.keep2);
|
||||
len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pVnode->tsdbCfg.minRowsPerFileBlock);
|
||||
len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pVnode->tsdbCfg.maxRowsPerFileBlock);
|
||||
len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnode->tsdbCfg.commitTime);
|
||||
len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnode->tsdbCfg.precision);
|
||||
len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnode->tsdbCfg.compression);
|
||||
len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pVnode->walCfg.walLevel);
|
||||
|
@ -136,12 +134,12 @@ static int32_t readVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
|
|||
}
|
||||
pVnode->tsdbCfg.totalBlocks = totalBlocks->valueint;
|
||||
|
||||
cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
|
||||
if (!maxTables || maxTables->type != cJSON_Number) {
|
||||
printf("vgId:%d, failed to read vnode cfg, maxTables not found\n", pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.maxTables = maxTables->valueint;
|
||||
// cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
|
||||
// if (!maxTables || maxTables->type != cJSON_Number) {
|
||||
// printf("vgId:%d, failed to read vnode cfg, maxTables not found\n", pVnode->vgId);
|
||||
// goto PARSE_OVER;
|
||||
// }
|
||||
// pVnode->tsdbCfg.maxTables = maxTables->valueint;
|
||||
|
||||
cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
|
||||
if (!daysPerFile || daysPerFile->type != cJSON_Number) {
|
||||
|
@ -185,12 +183,12 @@ static int32_t readVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
|
|||
}
|
||||
pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint;
|
||||
|
||||
cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
|
||||
if (!commitTime || commitTime->type != cJSON_Number) {
|
||||
printf("vgId:%d, failed to read vnode cfg, commitTime not found\n", pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
|
||||
// cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
|
||||
// if (!commitTime || commitTime->type != cJSON_Number) {
|
||||
// printf("vgId:%d, failed to read vnode cfg, commitTime not found\n", pVnode->vgId);
|
||||
// goto PARSE_OVER;
|
||||
// }
|
||||
// pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
|
||||
|
||||
cJSON *precision = cJSON_GetObjectItem(root, "precision");
|
||||
if (!precision || precision->type != cJSON_Number) {
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_MNODE_CLUSTER_H
|
||||
#define TDENGINE_MNODE_CLUSTER_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct SClusterObj;
|
||||
|
||||
int32_t mnodeInitCluster();
|
||||
void mnodeCleanupCluster();
|
||||
int32_t mnodeGetClusterId();
|
||||
void mnodeUpdateClusterId();
|
||||
void * mnodeGetCluster(int32_t clusterId);
|
||||
void * mnodeGetNextCluster(void *pIter, struct SClusterObj **pCluster);
|
||||
void mnodeIncClusterRef(struct SClusterObj *pCluster);
|
||||
void mnodeDecClusterRef(struct SClusterObj *pCluster);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -36,6 +36,14 @@ struct define notes:
|
|||
3. The fields behind the updataEnd field can be changed;
|
||||
*/
|
||||
|
||||
typedef struct SClusterObj {
|
||||
int32_t clusterId;
|
||||
int64_t createdTime;
|
||||
int8_t reserved[36];
|
||||
int8_t updateEnd[4];
|
||||
int32_t refCount;
|
||||
} SClusterObj;
|
||||
|
||||
typedef struct SDnodeObj {
|
||||
int32_t dnodeId;
|
||||
int32_t openVnodes;
|
||||
|
@ -50,8 +58,8 @@ typedef struct SDnodeObj {
|
|||
int8_t alternativeRole; // from dnode status msg, 0-any, 1-mgmt, 2-dnode
|
||||
int8_t status; // set in balance function
|
||||
int8_t isMgmt;
|
||||
int8_t reserve1[14];
|
||||
int8_t updateEnd[1];
|
||||
int8_t reserve1[11];
|
||||
int8_t updateEnd[4];
|
||||
int32_t refCount;
|
||||
uint32_t moduleStatus;
|
||||
uint32_t lastReboot; // time stamp for last reboot
|
||||
|
@ -68,8 +76,8 @@ typedef struct SMnodeObj {
|
|||
int32_t mnodeId;
|
||||
int8_t reserved0[4];
|
||||
int64_t createdTime;
|
||||
int8_t reserved1[7];
|
||||
int8_t updateEnd[1];
|
||||
int8_t reserved1[4];
|
||||
int8_t updateEnd[4];
|
||||
int32_t refCount;
|
||||
int8_t role;
|
||||
int8_t reserved2[3];
|
||||
|
@ -90,8 +98,7 @@ typedef struct SSuperTableObj {
|
|||
int32_t tversion;
|
||||
int32_t numOfColumns;
|
||||
int32_t numOfTags;
|
||||
int8_t reserved1[3];
|
||||
int8_t updateEnd[1];
|
||||
int8_t updateEnd[4];
|
||||
int32_t refCount;
|
||||
int32_t numOfTables;
|
||||
SSchema * schema;
|
||||
|
@ -111,8 +118,7 @@ typedef struct {
|
|||
int32_t sid;
|
||||
int32_t vgId;
|
||||
int32_t sqlLen;
|
||||
int8_t updateEnd[1];
|
||||
int8_t reserved1[1];
|
||||
int8_t updateEnd[4];
|
||||
int32_t refCount;
|
||||
char* sql; //used by normal table
|
||||
SSchema* schema; //used by normal table
|
||||
|
@ -138,8 +144,8 @@ typedef struct SVgObj {
|
|||
int8_t status;
|
||||
int8_t reserved0[4];
|
||||
SVnodeGid vnodeGid[TSDB_MAX_REPLICA];
|
||||
int8_t reserved1[7];
|
||||
int8_t updateEnd[1];
|
||||
int8_t reserved1[4];
|
||||
int8_t updateEnd[4];
|
||||
int32_t refCount;
|
||||
int32_t numOfTables;
|
||||
int64_t totalStorage;
|
||||
|
@ -176,8 +182,8 @@ typedef struct SDbObj {
|
|||
int32_t cfgVersion;
|
||||
SDbCfg cfg;
|
||||
int8_t status;
|
||||
int8_t reserved1[14];
|
||||
int8_t updateEnd[1];
|
||||
int8_t reserved1[11];
|
||||
int8_t updateEnd[4];
|
||||
int32_t refCount;
|
||||
int32_t numOfVgroups;
|
||||
int32_t numOfTables;
|
||||
|
@ -196,8 +202,8 @@ typedef struct SUserObj {
|
|||
int64_t createdTime;
|
||||
int8_t superAuth;
|
||||
int8_t writeAuth;
|
||||
int8_t reserved[13];
|
||||
int8_t updateEnd[1];
|
||||
int8_t reserved[10];
|
||||
int8_t updateEnd[4];
|
||||
int32_t refCount;
|
||||
struct SAcctObj * pAcct;
|
||||
} SUserObj;
|
||||
|
@ -228,11 +234,11 @@ typedef struct SAcctObj {
|
|||
int64_t createdTime;
|
||||
int32_t acctId;
|
||||
int8_t status;
|
||||
int8_t reserved0[10];
|
||||
int8_t updateEnd[1];
|
||||
SAcctInfo acctInfo;
|
||||
int8_t reserved0[7];
|
||||
int8_t updateEnd[4];
|
||||
int32_t refCount;
|
||||
int8_t reserved1[4];
|
||||
SAcctInfo acctInfo;
|
||||
pthread_mutex_t mutex;
|
||||
} SAcctObj;
|
||||
|
||||
|
|
|
@ -23,15 +23,16 @@ extern "C" {
|
|||
struct SMnodeMsg;
|
||||
|
||||
typedef enum {
|
||||
SDB_TABLE_DNODE = 0,
|
||||
SDB_TABLE_MNODE = 1,
|
||||
SDB_TABLE_ACCOUNT = 2,
|
||||
SDB_TABLE_USER = 3,
|
||||
SDB_TABLE_DB = 4,
|
||||
SDB_TABLE_VGROUP = 5,
|
||||
SDB_TABLE_STABLE = 6,
|
||||
SDB_TABLE_CTABLE = 7,
|
||||
SDB_TABLE_MAX = 8
|
||||
SDB_TABLE_CLUSTER = 0,
|
||||
SDB_TABLE_DNODE = 1,
|
||||
SDB_TABLE_MNODE = 2,
|
||||
SDB_TABLE_ACCOUNT = 3,
|
||||
SDB_TABLE_USER = 4,
|
||||
SDB_TABLE_DB = 5,
|
||||
SDB_TABLE_VGROUP = 6,
|
||||
SDB_TABLE_STABLE = 7,
|
||||
SDB_TABLE_CTABLE = 8,
|
||||
SDB_TABLE_MAX = 9
|
||||
} ESdbTable;
|
||||
|
||||
typedef enum {
|
||||
|
|
|
@ -64,7 +64,7 @@ static int32_t mnodeAcctActionUpdate(SSdbOper *pOper) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t mnodeActionActionEncode(SSdbOper *pOper) {
|
||||
static int32_t mnodeAcctActionEncode(SSdbOper *pOper) {
|
||||
SAcctObj *pAcct = pOper->pObj;
|
||||
memcpy(pOper->rowData, pAcct, tsAcctUpdateSize);
|
||||
pOper->rowSize = tsAcctUpdateSize;
|
||||
|
@ -109,7 +109,7 @@ int32_t mnodeInitAccts() {
|
|||
.insertFp = mnodeAcctActionInsert,
|
||||
.deleteFp = mnodeAcctActionDelete,
|
||||
.updateFp = mnodeAcctActionUpdate,
|
||||
.encodeFp = mnodeActionActionEncode,
|
||||
.encodeFp = mnodeAcctActionEncode,
|
||||
.decodeFp = mnodeAcctActionDecode,
|
||||
.destroyFp = mnodeAcctActionDestroy,
|
||||
.restoredFp = mnodeAcctActionRestored
|
||||
|
|
|
@ -28,6 +28,7 @@ void balanceCleanUp() {}
|
|||
void balanceAsyncNotify() {}
|
||||
void balanceSyncNotify() {}
|
||||
void balanceReset() {}
|
||||
int32_t balanceAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId) { return TSDB_CODE_SYN_NOT_ENABLED; }
|
||||
|
||||
int32_t balanceAllocVnodes(SVgObj *pVgroup) {
|
||||
void * pIter = NULL;
|
||||
|
|
|
@ -0,0 +1,230 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "os.h"
|
||||
#include "taoserror.h"
|
||||
#include "ttime.h"
|
||||
#include "dnode.h"
|
||||
#include "mnodeDef.h"
|
||||
#include "mnodeInt.h"
|
||||
#include "mnodeCluster.h"
|
||||
#include "mnodeSdb.h"
|
||||
#include "mnodeShow.h"
|
||||
#include "tglobal.h"
|
||||
|
||||
static void * tsClusterSdb = NULL;
|
||||
static int32_t tsClusterUpdateSize;
|
||||
static int32_t tsClusterId;
|
||||
static int32_t mnodeCreateCluster();
|
||||
|
||||
static int32_t mnodeGetClusterMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
|
||||
static int32_t mnodeRetrieveClusters(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
||||
|
||||
static int32_t mnodeClusterActionDestroy(SSdbOper *pOper) {
|
||||
tfree(pOper->pObj);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t mnodeClusterActionInsert(SSdbOper *pOper) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t mnodeClusterActionDelete(SSdbOper *pOper) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t mnodeClusterActionUpdate(SSdbOper *pOper) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t mnodeClusterActionEncode(SSdbOper *pOper) {
|
||||
SClusterObj *pCluster = pOper->pObj;
|
||||
memcpy(pOper->rowData, pCluster, tsClusterUpdateSize);
|
||||
pOper->rowSize = tsClusterUpdateSize;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t mnodeClusterActionDecode(SSdbOper *pOper) {
|
||||
SClusterObj *pCluster = (SClusterObj *) calloc(1, sizeof(SClusterObj));
|
||||
if (pCluster == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY;
|
||||
|
||||
memcpy(pCluster, pOper->rowData, tsClusterUpdateSize);
|
||||
pOper->pObj = pCluster;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t mnodeClusterActionRestored() {
|
||||
int32_t numOfRows = sdbGetNumOfRows(tsClusterSdb);
|
||||
if (numOfRows <= 0 && dnodeIsFirstDeploy()) {
|
||||
mInfo("dnode first deploy, create cluster");
|
||||
int32_t code = mnodeCreateCluster();
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
mError("failed to create cluster, reason:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
mnodeUpdateClusterId();
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t mnodeInitCluster() {
|
||||
SClusterObj tObj;
|
||||
tsClusterUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj;
|
||||
|
||||
SSdbTableDesc tableDesc = {
|
||||
.tableId = SDB_TABLE_CLUSTER,
|
||||
.tableName = "cluster",
|
||||
.hashSessions = TSDB_DEFAULT_CLUSTER_HASH_SIZE,
|
||||
.maxRowSize = tsClusterUpdateSize,
|
||||
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
|
||||
.keyType = SDB_KEY_INT,
|
||||
.insertFp = mnodeClusterActionInsert,
|
||||
.deleteFp = mnodeClusterActionDelete,
|
||||
.updateFp = mnodeClusterActionUpdate,
|
||||
.encodeFp = mnodeClusterActionEncode,
|
||||
.decodeFp = mnodeClusterActionDecode,
|
||||
.destroyFp = mnodeClusterActionDestroy,
|
||||
.restoredFp = mnodeClusterActionRestored
|
||||
};
|
||||
|
||||
tsClusterSdb = sdbOpenTable(&tableDesc);
|
||||
if (tsClusterSdb == NULL) {
|
||||
mError("table:%s, failed to create hash", tableDesc.tableName);
|
||||
return -1;
|
||||
}
|
||||
|
||||
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_CLUSTER, mnodeGetClusterMeta);
|
||||
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_CLUSTER, mnodeRetrieveClusters);
|
||||
|
||||
mDebug("table:%s, hash is created", tableDesc.tableName);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void mnodeCleanupCluster() {
|
||||
sdbCloseTable(tsClusterSdb);
|
||||
tsClusterSdb = NULL;
|
||||
}
|
||||
|
||||
void *mnodeGetCluster(int32_t clusterId) {
|
||||
return sdbGetRow(tsClusterSdb, &clusterId);
|
||||
}
|
||||
|
||||
void *mnodeGetNextCluster(void *pIter, SClusterObj **pCluster) {
|
||||
return sdbFetchRow(tsClusterSdb, pIter, (void **)pCluster);
|
||||
}
|
||||
|
||||
void mnodeIncClusterRef(SClusterObj *pCluster) {
|
||||
sdbIncRef(tsClusterSdb, pCluster);
|
||||
}
|
||||
|
||||
void mnodeDecClusterRef(SClusterObj *pCluster) {
|
||||
sdbDecRef(tsClusterSdb, pCluster);
|
||||
}
|
||||
|
||||
static int32_t mnodeCreateCluster() {
|
||||
int32_t numOfClusters = sdbGetNumOfRows(tsClusterSdb);
|
||||
if (numOfClusters != 0) return TSDB_CODE_SUCCESS;
|
||||
|
||||
SClusterObj *pCluster = malloc(sizeof(SClusterObj));
|
||||
memset(pCluster, 0, sizeof(SClusterObj));
|
||||
pCluster->createdTime = taosGetTimestampMs();
|
||||
pCluster->clusterId = labs((pCluster->createdTime >> 32) & (pCluster->createdTime)) | (*(int32_t*)tsFirst);
|
||||
|
||||
SSdbOper oper = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
.table = tsClusterSdb,
|
||||
.pObj = pCluster,
|
||||
};
|
||||
|
||||
return sdbInsertRow(&oper);
|
||||
}
|
||||
|
||||
int32_t mnodeGetClusterId() {
|
||||
return tsClusterId;
|
||||
}
|
||||
|
||||
void mnodeUpdateClusterId() {
|
||||
SClusterObj *pCluster = NULL;
|
||||
mnodeGetNextCluster(NULL, &pCluster);
|
||||
if (pCluster != NULL) {
|
||||
tsClusterId = pCluster->clusterId;
|
||||
mnodeDecClusterRef(pCluster);
|
||||
mInfo("cluster id is %d", tsClusterId);
|
||||
} else {
|
||||
//assert(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int32_t mnodeGetClusterMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
||||
int32_t cols = 0;
|
||||
SSchema *pSchema = pMeta->schema;
|
||||
|
||||
pShow->bytes[cols] = 4;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
||||
strcpy(pSchema[cols].name, "clusterId");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 8;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||
strcpy(pSchema[cols].name, "create_time");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pMeta->numOfColumns = htons(cols);
|
||||
strcpy(pMeta->tableId, "show cluster");
|
||||
pShow->numOfColumns = cols;
|
||||
|
||||
pShow->offset[0] = 0;
|
||||
for (int32_t i = 1; i < cols; ++i) {
|
||||
pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1];
|
||||
}
|
||||
|
||||
pShow->numOfRows = 1;
|
||||
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mnodeRetrieveClusters(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
|
||||
int32_t numOfRows = 0;
|
||||
int32_t cols = 0;
|
||||
char * pWrite;
|
||||
SClusterObj *pCluster = NULL;
|
||||
|
||||
while (numOfRows < rows) {
|
||||
pShow->pIter = mnodeGetNextCluster(pShow->pIter, &pCluster);
|
||||
if (pCluster == NULL) break;
|
||||
|
||||
cols = 0;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int32_t *) pWrite = pCluster->clusterId;
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int32_t *) pWrite = pCluster->createdTime;
|
||||
cols++;
|
||||
|
||||
mnodeDecClusterRef(pCluster);
|
||||
numOfRows++;
|
||||
}
|
||||
|
||||
pShow->numOfReads += numOfRows;
|
||||
return numOfRows;
|
||||
}
|
|
@ -67,8 +67,11 @@ static int32_t mnodeDbActionInsert(SSdbOper *pOper) {
|
|||
SAcctObj *pAcct = mnodeGetAcct(pDb->acct);
|
||||
|
||||
pthread_mutex_init(&pDb->mutex, NULL);
|
||||
pthread_mutex_lock(&pDb->mutex);
|
||||
pDb->vgListSize = VG_LIST_SIZE;
|
||||
pDb->vgList = calloc(pDb->vgListSize, sizeof(SVgObj *));
|
||||
pthread_mutex_unlock(&pDb->mutex);
|
||||
|
||||
pDb->numOfVgroups = 0;
|
||||
pDb->numOfTables = 0;
|
||||
pDb->numOfSuperTables = 0;
|
||||
|
@ -395,8 +398,8 @@ static int32_t mnodeCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate, void *pMs
|
|||
|
||||
code = sdbInsertRow(&oper);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
mnodeDestroyDb(pDb);
|
||||
mLInfo("db:%s, failed to create, reason:%s", pDb->name, tstrerror(code));
|
||||
mnodeDestroyDb(pDb);
|
||||
return code;
|
||||
} else {
|
||||
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
||||
|
@ -605,7 +608,9 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
|
|||
|
||||
static char *mnodeGetDbStr(char *src) {
|
||||
char *pos = strstr(src, TS_PATH_DELIMITER);
|
||||
return ++pos;
|
||||
if (pos != NULL) ++pos;
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
|
||||
|
@ -623,9 +628,12 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
|
|||
cols = 0;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
|
||||
char* name = mnodeGetDbStr(pDb->name);
|
||||
if (name != NULL) {
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, name, pShow->bytes[cols]);
|
||||
} else {
|
||||
STR_TO_VARSTR(pWrite, "NULL");
|
||||
}
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include "mnodeVgroup.h"
|
||||
#include "mnodeWrite.h"
|
||||
#include "mnodePeer.h"
|
||||
#include "mnodeCluster.h"
|
||||
|
||||
int32_t tsAccessSquence = 0;
|
||||
static void *tsDnodeSdb = NULL;
|
||||
|
@ -279,27 +280,36 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
|
|||
tstrncpy(pCmCfgDnode->ep, tsLocalEp, TSDB_EP_LEN);
|
||||
}
|
||||
|
||||
int32_t dnodeId = 0;
|
||||
char* pos = strchr(pCmCfgDnode->ep, ':');
|
||||
if (NULL == pos) {
|
||||
dnodeId = strtol(pCmCfgDnode->ep, NULL, 10);
|
||||
SDnodeObj *pDnode = mnodeGetDnodeByEp(pCmCfgDnode->ep);
|
||||
if (pDnode == NULL) {
|
||||
int32_t dnodeId = strtol(pCmCfgDnode->ep, NULL, 10);
|
||||
if (dnodeId <= 0 || dnodeId > 65536) {
|
||||
mError("failed to cfg dnode, invalid dnodeId:%s", pCmCfgDnode->ep);
|
||||
mError("failed to cfg dnode, invalid dnodeEp:%s", pCmCfgDnode->ep);
|
||||
return TSDB_CODE_MND_DNODE_NOT_EXIST;
|
||||
}
|
||||
}
|
||||
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromIp(pCmCfgDnode->ep);
|
||||
if (dnodeId != 0) {
|
||||
SDnodeObj *pDnode = mnodeGetDnode(dnodeId);
|
||||
pDnode = mnodeGetDnode(dnodeId);
|
||||
if (pDnode == NULL) {
|
||||
mError("failed to cfg dnode, invalid dnodeId:%d", dnodeId);
|
||||
return TSDB_CODE_MND_DNODE_NOT_EXIST;
|
||||
}
|
||||
epSet = mnodeGetEpSetFromIp(pDnode->dnodeEp);
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromIp(pDnode->dnodeEp);
|
||||
|
||||
if (strncasecmp(pCmCfgDnode->config, "balance", 7) == 0) {
|
||||
int32_t vnodeId = 0;
|
||||
int32_t dnodeId = 0;
|
||||
bool parseOk = taosCheckBalanceCfgOptions(pCmCfgDnode->config + 8, &vnodeId, &dnodeId);
|
||||
if (!parseOk) {
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
return TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION;
|
||||
}
|
||||
|
||||
int32_t code = balanceAlterDnode(pDnode, vnodeId, dnodeId);
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
return code;
|
||||
} else {
|
||||
SMDCfgDnodeMsg *pMdCfgDnode = rpcMallocCont(sizeof(SMDCfgDnodeMsg));
|
||||
strcpy(pMdCfgDnode->ep, pCmCfgDnode->ep);
|
||||
strcpy(pMdCfgDnode->config, pCmCfgDnode->config);
|
||||
|
@ -314,8 +324,9 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
mInfo("dnode:%s, is configured by %s", pCmCfgDnode->ep, pMsg->pUser->user);
|
||||
dnodeSendMsgToDnode(&epSet, &rpcMdCfgDnodeMsg);
|
||||
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
static void mnodeProcessCfgDnodeMsgRsp(SRpcMsg *rpcMsg) {
|
||||
|
@ -345,6 +356,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
|
|||
pStatus->moduleStatus = htonl(pStatus->moduleStatus);
|
||||
pStatus->lastReboot = htonl(pStatus->lastReboot);
|
||||
pStatus->numOfCores = htons(pStatus->numOfCores);
|
||||
pStatus->clusterId = htonl(pStatus->clusterId);
|
||||
|
||||
uint32_t version = htonl(pStatus->version);
|
||||
if (version != tsVersion) {
|
||||
|
@ -374,10 +386,16 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
|
|||
pDnode->moduleStatus = pStatus->moduleStatus;
|
||||
|
||||
if (pStatus->dnodeId == 0) {
|
||||
mDebug("dnode:%d %s, first access", pDnode->dnodeId, pDnode->dnodeEp);
|
||||
mDebug("dnode:%d %s, first access, set clusterId %d", pDnode->dnodeId, pDnode->dnodeEp, mnodeGetClusterId());
|
||||
} else {
|
||||
if (pStatus->clusterId != mnodeGetClusterId()) {
|
||||
mError("dnode:%d, input clusterId %d not match with exist %d", pDnode->dnodeId, pStatus->clusterId,
|
||||
mnodeGetClusterId());
|
||||
return TSDB_CODE_MND_INVALID_CLUSTER_ID;
|
||||
} else {
|
||||
mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t openVnodes = htons(pStatus->openVnodes);
|
||||
int32_t contLen = sizeof(SDMStatusRsp) + openVnodes * sizeof(SDMVgroupAccess);
|
||||
|
@ -390,6 +408,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
|
|||
pRsp->dnodeCfg.dnodeId = htonl(pDnode->dnodeId);
|
||||
pRsp->dnodeCfg.moduleStatus = htonl((int32_t)pDnode->isMgmt);
|
||||
pRsp->dnodeCfg.numOfVnodes = htonl(openVnodes);
|
||||
pRsp->dnodeCfg.clusterId = htonl(mnodeGetClusterId());
|
||||
SDMVgroupAccess *pAccess = (SDMVgroupAccess *)((char *)pRsp + sizeof(SDMStatusRsp));
|
||||
|
||||
for (int32_t j = 0; j < openVnodes; ++j) {
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "mnodeVgroup.h"
|
||||
#include "mnodeUser.h"
|
||||
#include "mnodeTable.h"
|
||||
#include "mnodeCluster.h"
|
||||
#include "mnodeShow.h"
|
||||
#include "mnodeProfile.h"
|
||||
|
||||
|
@ -46,6 +47,7 @@ static bool tsMgmtIsRunning = false;
|
|||
|
||||
static const SMnodeComponent tsMnodeComponents[] = {
|
||||
{"profile", mnodeInitProfile, mnodeCleanupProfile},
|
||||
{"cluster", mnodeInitCluster, mnodeCleanupCluster},
|
||||
{"accts", mnodeInitAccts, mnodeCleanupAccts},
|
||||
{"users", mnodeInitUsers, mnodeCleanupUsers},
|
||||
{"dnodes", mnodeInitDnodes, mnodeCleanupDnodes},
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "mnodeInt.h"
|
||||
#include "mnodeMnode.h"
|
||||
#include "mnodeDnode.h"
|
||||
#include "mnodeCluster.h"
|
||||
#include "mnodeSdb.h"
|
||||
|
||||
#define SDB_TABLE_LEN 12
|
||||
|
@ -214,6 +215,7 @@ void sdbUpdateMnodeRoles() {
|
|||
}
|
||||
}
|
||||
|
||||
mnodeUpdateClusterId();
|
||||
mnodeUpdateMnodeEpSet();
|
||||
}
|
||||
|
||||
|
@ -406,7 +408,7 @@ void sdbDecRef(void *handle, void *pObj) {
|
|||
int32_t refCount = atomic_sub_fetch_32(pRefCount, 1);
|
||||
sdbTrace("def ref of table:%s record:%p:%s:%d", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
|
||||
|
||||
int8_t *updateEnd = pObj + pTable->refCountPos - 1;
|
||||
int32_t *updateEnd = pObj + pTable->refCountPos - 4;
|
||||
if (refCount <= 0 && *updateEnd) {
|
||||
sdbTrace("table:%s, record:%p:%s:%d is destroyed", pTable->tableName, pObj, sdbGetKeyStrFromObj(pTable, pObj), *pRefCount);
|
||||
SSdbOper oper = {.pObj = pObj};
|
||||
|
@ -453,7 +455,7 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
|
|||
keySize = strlen((char *)key);
|
||||
}
|
||||
|
||||
taosHashPut(pTable->iHandle, key, keySize, &pOper->pObj, sizeof(void **));
|
||||
taosHashPut(pTable->iHandle, key, keySize, &pOper->pObj, sizeof(int64_t));
|
||||
|
||||
sdbIncRef(pTable, pOper->pObj);
|
||||
atomic_add_fetch_32(&pTable->numOfRows, 1);
|
||||
|
@ -472,6 +474,14 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
|
|||
}
|
||||
|
||||
static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
|
||||
int32_t *updateEnd = pOper->pObj + pTable->refCountPos - 4;
|
||||
bool set = atomic_val_compare_exchange_32(updateEnd, 0, 1) == 0;
|
||||
if (!set) {
|
||||
sdbError("table:%s, failed to delete record:%s from hash, for it already removed", pTable->tableName,
|
||||
sdbGetKeyStrFromObj(pTable, pOper->pObj));
|
||||
return TSDB_CODE_MND_SDB_OBJ_NOT_THERE;
|
||||
}
|
||||
|
||||
(*pTable->deleteFp)(pOper);
|
||||
|
||||
void * key = sdbGetObjKey(pTable, pOper->pObj);
|
||||
|
@ -486,8 +496,6 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
|
|||
sdbDebug("table:%s, delete record:%s from hash, numOfRows:%" PRId64 ", msg:%p", pTable->tableName,
|
||||
sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, pOper->pMsg);
|
||||
|
||||
int8_t *updateEnd = pOper->pObj + pTable->refCountPos - 1;
|
||||
*updateEnd = 1;
|
||||
sdbDecRef(pTable, pOper->pObj);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -654,8 +662,9 @@ bool sdbCheckRowDeleted(void *pTableInput, void *pRow) {
|
|||
SSdbTable *pTable = pTableInput;
|
||||
if (pTable == NULL) return false;
|
||||
|
||||
int8_t *updateEnd = pRow + pTable->refCountPos - 1;
|
||||
return (*updateEnd == 1);
|
||||
int32_t *updateEnd = pRow + pTable->refCountPos - 4;
|
||||
return atomic_val_compare_exchange_32(updateEnd, 1, 1) == 1;
|
||||
// return (*updateEnd == 1);
|
||||
}
|
||||
|
||||
int32_t sdbDeleteRow(SSdbOper *pOper) {
|
||||
|
|
|
@ -103,6 +103,8 @@ static char *mnodeGetShowType(int32_t showType) {
|
|||
case TSDB_MGMT_TABLE_SCORES: return "show scores";
|
||||
case TSDB_MGMT_TABLE_GRANTS: return "show grants";
|
||||
case TSDB_MGMT_TABLE_VNODES: return "show vnodes";
|
||||
case TSDB_MGMT_TABLE_CLUSTER: return "show clusters";
|
||||
case TSDB_MGMT_TABLE_STREAMTABLES : return "show streamtables";
|
||||
default: return "undefined";
|
||||
}
|
||||
}
|
||||
|
@ -236,7 +238,7 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) {
|
|||
}
|
||||
|
||||
SCMHeartBeatMsg *pHBMsg = pMsg->rpcMsg.pCont;
|
||||
SRpcConnInfo connInfo;
|
||||
SRpcConnInfo connInfo = {0};
|
||||
rpcGetConnInfo(pMsg->rpcMsg.handle, &connInfo);
|
||||
|
||||
int32_t connId = htonl(pHBMsg->connId);
|
||||
|
@ -284,7 +286,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
|
|||
SCMConnectRsp *pConnectRsp = NULL;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
SRpcConnInfo connInfo;
|
||||
SRpcConnInfo connInfo = {0};
|
||||
if (rpcGetConnInfo(pMsg->rpcMsg.handle, &connInfo) != 0) {
|
||||
mError("thandle:%p is already released while process connect msg", pMsg->rpcMsg.handle);
|
||||
code = TSDB_CODE_MND_INVALID_CONNECTION;
|
||||
|
|
|
@ -72,7 +72,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg);
|
|||
static int32_t mnodeProcessDropTableMsg(SMnodeMsg *mnodeMsg);
|
||||
static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg);
|
||||
static void mnodeProcessDropSuperTableRsp(SRpcMsg *rpcMsg);
|
||||
static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn);
|
||||
static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg);
|
||||
static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg);
|
||||
|
||||
static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *mnodeMsg);
|
||||
|
@ -759,7 +759,7 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
|
|||
SChildTableObj *pCTable = (SChildTableObj *)pMsg->pTable;
|
||||
mInfo("app:%p:%p, table:%s, start to drop ctable, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg,
|
||||
pDrop->tableId, pCTable->vgId, pCTable->sid, pCTable->uid);
|
||||
return mnodeProcessDropChildTableMsg(pMsg, true);
|
||||
return mnodeProcessDropChildTableMsg(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -882,7 +882,7 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
|
|||
static int32_t mnodeDropSuperTableCb(SMnodeMsg *pMsg, int32_t code) {
|
||||
SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable;
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
mError("app:%p:%p, table:%s, failed to drop, sdb error", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId);
|
||||
mError("app:%p:%p, stable:%s, failed to drop, sdb error", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId);
|
||||
} else {
|
||||
mLInfo("app:%p:%p, stable:%s, is dropped from sdb", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId);
|
||||
}
|
||||
|
@ -1223,6 +1223,55 @@ static int32_t mnodeDropSuperTableColumn(SMnodeMsg *pMsg, char *colName) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
|
||||
SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable;
|
||||
mLInfo("app:%p:%p, stable %s, change column result:%s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) {
|
||||
SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable;
|
||||
int32_t col = mnodeFindSuperTableColumnIndex(pStable, oldName);
|
||||
if (col < 0) {
|
||||
mError("app:%p:%p, stable:%s, change column, oldName: %s, newName: %s", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pStable->info.tableId, oldName, newName);
|
||||
return TSDB_CODE_MND_FIELD_NOT_EXIST;
|
||||
}
|
||||
|
||||
// int32_t rowSize = 0;
|
||||
uint32_t len = strlen(newName);
|
||||
if (len >= TSDB_COL_NAME_LEN) {
|
||||
return TSDB_CODE_MND_COL_NAME_TOO_LONG;
|
||||
}
|
||||
|
||||
if (mnodeFindSuperTableColumnIndex(pStable, newName) >= 0) {
|
||||
return TSDB_CODE_MND_FIELD_ALREAY_EXIST;
|
||||
}
|
||||
|
||||
// update
|
||||
SSchema *schema = (SSchema *) (pStable->schema + col);
|
||||
tstrncpy(schema->name, newName, sizeof(schema->name));
|
||||
|
||||
mInfo("app:%p:%p, stable %s, start to modify column %s to %s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId,
|
||||
oldName, newName);
|
||||
|
||||
SSdbOper oper = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
.table = tsSuperTableSdb,
|
||||
.pObj = pStable,
|
||||
.pMsg = pMsg,
|
||||
.cb = mnodeChangeSuperTableColumnCb
|
||||
};
|
||||
|
||||
int32_t code = sdbUpdateRow(&oper);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
// show super tables
|
||||
static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
|
||||
SDbObj *pDb = mnodeGetDb(pShow->db);
|
||||
|
@ -1405,6 +1454,9 @@ static int32_t mnodeSetSchemaFromSuperTable(SSchema *pSchema, SSuperTableObj *pT
|
|||
static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) {
|
||||
SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable;
|
||||
STableMetaMsg *pMeta = rpcMallocCont(sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16));
|
||||
if (pMeta == NULL) {
|
||||
return TSDB_CODE_MND_OUT_OF_MEMORY;
|
||||
}
|
||||
pMeta->uid = htobe64(pTable->uid);
|
||||
pMeta->sversion = htons(pTable->sversion);
|
||||
pMeta->tversion = htons(pTable->tversion);
|
||||
|
@ -1765,18 +1817,13 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
|
||||
static int32_t mnodeSendDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
|
||||
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
|
||||
if (pMsg->pVgroup == NULL) pMsg->pVgroup = mnodeGetVgroup(pTable->vgId);
|
||||
if (pMsg->pVgroup == NULL) {
|
||||
mError("app:%p:%p, table:%s, failed to drop ctable, vgroup not exist", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pTable->info.tableId);
|
||||
return TSDB_CODE_MND_APP_ERROR;
|
||||
}
|
||||
mLInfo("app:%p:%p, ctable:%s, is dropped from sdb", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId);
|
||||
|
||||
SMDDropTableMsg *pDrop = rpcMallocCont(sizeof(SMDDropTableMsg));
|
||||
if (pDrop == NULL) {
|
||||
mError("app:%p:%p, table:%s, failed to drop ctable, no enough memory", pMsg->rpcMsg.ahandle, pMsg,
|
||||
mError("app:%p:%p, ctable:%s, failed to drop ctable, no enough memory", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pTable->info.tableId);
|
||||
return TSDB_CODE_MND_OUT_OF_MEMORY;
|
||||
}
|
||||
|
@ -1789,7 +1836,7 @@ static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
|
|||
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromVgroup(pMsg->pVgroup);
|
||||
|
||||
mInfo("app:%p:%p, table:%s, send drop ctable msg, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg,
|
||||
mInfo("app:%p:%p, ctable:%s, send drop ctable msg, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg,
|
||||
pDrop->tableId, pTable->vgId, pTable->sid, pTable->uid);
|
||||
|
||||
SRpcMsg rpcMsg = {
|
||||
|
@ -1807,6 +1854,40 @@ static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
|
|||
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
||||
}
|
||||
|
||||
static int32_t mnodeDropChildTableCb(SMnodeMsg *pMsg, int32_t code) {
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
|
||||
mError("app:%p:%p, ctable:%s, failed to drop, sdb error", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId);
|
||||
return code;
|
||||
}
|
||||
|
||||
return mnodeSendDropChildTableMsg(pMsg, true);
|
||||
}
|
||||
|
||||
static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg) {
|
||||
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
|
||||
if (pMsg->pVgroup == NULL) pMsg->pVgroup = mnodeGetVgroup(pTable->vgId);
|
||||
if (pMsg->pVgroup == NULL) {
|
||||
mError("app:%p:%p, table:%s, failed to drop ctable, vgroup not exist", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pTable->info.tableId);
|
||||
return TSDB_CODE_MND_APP_ERROR;
|
||||
}
|
||||
|
||||
SSdbOper oper = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
.table = tsChildTableSdb,
|
||||
.pObj = pTable,
|
||||
.pMsg = pMsg,
|
||||
.cb = mnodeDropChildTableCb
|
||||
};
|
||||
|
||||
int32_t code = sdbDeleteRow(&oper);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t mnodeFindNormalTableColumnIndex(SChildTableObj *pTable, char *colName) {
|
||||
SSchema *schema = (SSchema *) pTable->schema;
|
||||
for (int32_t col = 0; col < pTable->numOfColumns; col++) {
|
||||
|
@ -1948,6 +2029,48 @@ static int32_t mnodeDropNormalTableColumn(SMnodeMsg *pMsg, char *colName) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t mnodeChangeNormalTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) {
|
||||
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
|
||||
int32_t col = mnodeFindNormalTableColumnIndex(pTable, oldName);
|
||||
if (col < 0) {
|
||||
mError("app:%p:%p, ctable:%s, change column, oldName: %s, newName: %s", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pTable->info.tableId, oldName, newName);
|
||||
return TSDB_CODE_MND_FIELD_NOT_EXIST;
|
||||
}
|
||||
|
||||
// int32_t rowSize = 0;
|
||||
uint32_t len = strlen(newName);
|
||||
if (len >= TSDB_COL_NAME_LEN) {
|
||||
return TSDB_CODE_MND_COL_NAME_TOO_LONG;
|
||||
}
|
||||
|
||||
if (mnodeFindNormalTableColumnIndex(pTable, newName) >= 0) {
|
||||
return TSDB_CODE_MND_FIELD_ALREAY_EXIST;
|
||||
}
|
||||
|
||||
// update
|
||||
SSchema *schema = (SSchema *) (pTable->schema + col);
|
||||
tstrncpy(schema->name, newName, sizeof(schema->name));
|
||||
|
||||
mInfo("app:%p:%p, ctable %s, start to modify column %s to %s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
|
||||
oldName, newName);
|
||||
|
||||
SSdbOper oper = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
.table = tsChildTableSdb,
|
||||
.pObj = pTable,
|
||||
.pMsg = pMsg,
|
||||
.cb = mnodeAlterNormalTableColumnCb
|
||||
};
|
||||
|
||||
int32_t code = sdbUpdateRow(&oper);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t mnodeSetSchemaFromNormalTable(SSchema *pSchema, SChildTableObj *pTable) {
|
||||
int32_t numOfCols = pTable->numOfColumns;
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
|
@ -2220,19 +2343,6 @@ static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg) {
|
|||
return;
|
||||
}
|
||||
|
||||
SSdbOper oper = {
|
||||
.type = SDB_OPER_GLOBAL,
|
||||
.table = tsChildTableSdb,
|
||||
.pObj = pTable
|
||||
};
|
||||
|
||||
int32_t code = sdbDeleteRow(&oper);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
mError("app:%p:%p, table:%s, update ctables sdb error", mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId);
|
||||
dnodeSendRpcMnodeWriteRsp(mnodeMsg, TSDB_CODE_MND_SDB_ERROR);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mnodeMsg->pVgroup->numOfTables <= 0) {
|
||||
mInfo("app:%p:%p, vgId:%d, all tables is dropped, drop vgroup", mnodeMsg->rpcMsg.ahandle, mnodeMsg,
|
||||
mnodeMsg->pVgroup->vgId);
|
||||
|
@ -2259,7 +2369,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) {
|
|||
if (sdbCheckRowDeleted(tsChildTableSdb, pTable)) {
|
||||
mDebug("app:%p:%p, table:%s, create table rsp received, but a deleting opertion incoming, vgId:%d sid:%d uid:%" PRIu64,
|
||||
mnodeMsg->rpcMsg.ahandle, mnodeMsg, pTable->info.tableId, pTable->vgId, pTable->sid, pTable->uid);
|
||||
mnodeProcessDropChildTableMsg(mnodeMsg, false);
|
||||
mnodeSendDropChildTableMsg(mnodeMsg, false);
|
||||
rpcMsg->code = TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -2580,6 +2690,8 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
|||
code = mnodeAddSuperTableColumn(pMsg, pAlter->schema, 1);
|
||||
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
|
||||
code = mnodeDropSuperTableColumn(pMsg, pAlter->schema[0].name);
|
||||
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
|
||||
code = mnodeChangeSuperTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
|
||||
} else {
|
||||
}
|
||||
} else {
|
||||
|
@ -2590,6 +2702,8 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
|
|||
code = mnodeAddNormalTableColumn(pMsg, pAlter->schema, 1);
|
||||
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
|
||||
code = mnodeDropNormalTableColumn(pMsg, pAlter->schema[0].name);
|
||||
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
|
||||
code = mnodeChangeNormalTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
|
||||
} else {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -358,7 +358,7 @@ static int32_t mnodeRetrieveUsers(SShowObj *pShow, char *data, int32_t rows, voi
|
|||
}
|
||||
|
||||
SUserObj *mnodeGetUserFromConn(void *pConn) {
|
||||
SRpcConnInfo connInfo;
|
||||
SRpcConnInfo connInfo = {0};
|
||||
if (rpcGetConnInfo(pConn, &connInfo) == 0) {
|
||||
return mnodeGetUser(connInfo.user);
|
||||
} else {
|
||||
|
|
|
@ -434,15 +434,22 @@ int32_t mnodeGetAvailableVgroup(SMnodeMsg *pMsg, SVgObj **ppVgroup, int32_t *pSi
|
|||
}
|
||||
|
||||
if (pDb->numOfVgroups < maxVgroupsPerDb) {
|
||||
mDebug("app:%p:%p, db:%s, try to create a new vgroup, numOfVgroups:%d maxVgroupsPerDb:%d", pMsg->rpcMsg.ahandle, pMsg,
|
||||
pDb->name, pDb->numOfVgroups, maxVgroupsPerDb);
|
||||
mDebug("app:%p:%p, db:%s, try to create a new vgroup, numOfVgroups:%d maxVgroupsPerDb:%d", pMsg->rpcMsg.ahandle,
|
||||
pMsg, pDb->name, pDb->numOfVgroups, maxVgroupsPerDb);
|
||||
pthread_mutex_unlock(&pDb->mutex);
|
||||
int32_t code = mnodeCreateVgroup(pMsg);
|
||||
if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return code;
|
||||
if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) {
|
||||
return code;
|
||||
} else {
|
||||
pthread_mutex_lock(&pDb->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
SVgObj *pVgroup = pDb->vgList[0];
|
||||
if (pVgroup == NULL) return TSDB_CODE_MND_NO_ENOUGH_DNODES;
|
||||
if (pVgroup == NULL) {
|
||||
pthread_mutex_unlock(&pDb->mutex);
|
||||
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
|
||||
}
|
||||
|
||||
int32_t code = mnodeAllocVgroupIdPool(pVgroup);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -483,7 +490,7 @@ static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
} else {
|
||||
pVgroup->status = TAOS_VG_STATUS_READY;
|
||||
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .table = tsVgroupSdb};
|
||||
sdbUpdateRow(&desc);
|
||||
(void)sdbUpdateRow(&desc);
|
||||
}
|
||||
|
||||
mInfo("app:%p:%p, vgId:%d, is created in mnode, db:%s replica:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId,
|
||||
|
@ -585,7 +592,7 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
|
|||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 12 + VARSTR_HEADER_SIZE;
|
||||
pShow->bytes[cols] = 8 + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "status");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
|
@ -612,12 +619,6 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
|
|||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 40 + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "end_point");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 9 + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "vstatus");
|
||||
|
@ -709,27 +710,15 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
|
|||
*(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId;
|
||||
cols++;
|
||||
|
||||
SDnodeObj *pDnode = pVgroup->vnodeGid[i].pDnode;
|
||||
|
||||
SDnodeObj * pDnode = pVgroup->vnodeGid[i].pDnode;
|
||||
const char *role = "NULL";
|
||||
if (pDnode != NULL) {
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDnode->dnodeEp, pShow->bytes[cols]);
|
||||
cols++;
|
||||
role = mnodeGetMnodeRoleStr(pVgroup->vnodeGid[i].role);
|
||||
}
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
char *role = mnodeGetMnodeRoleStr(pVgroup->vnodeGid[i].role);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, role, pShow->bytes[cols]);
|
||||
cols++;
|
||||
} else {
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
const char *src = "NULL";
|
||||
STR_WITH_SIZE_TO_VARSTR(pWrite, src, strlen(src));
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
STR_WITH_SIZE_TO_VARSTR(pWrite, src, strlen(src));
|
||||
cols++;
|
||||
}
|
||||
}
|
||||
|
||||
mnodeDecVgroupRef(pVgroup);
|
||||
|
|
|
@ -121,6 +121,10 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
|
|||
|
||||
for (int k = 0; k < numOfRows; ++k) {
|
||||
TAOS_ROW row = taos_fetch_row(result);
|
||||
if (row == NULL) {
|
||||
cmd->numOfRows--;
|
||||
continue;
|
||||
}
|
||||
int32_t* length = taos_fetch_lengths(result);
|
||||
|
||||
// for group by
|
||||
|
|
|
@ -108,7 +108,7 @@ HttpContext *httpCreateContext(int32_t fd) {
|
|||
pContext->lastAccessTime = taosGetTimestampSec();
|
||||
pContext->state = HTTP_CONTEXT_STATE_READY;
|
||||
|
||||
HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &pContext, sizeof(void *), &pContext, sizeof(void *), 3);
|
||||
HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &pContext, sizeof(int64_t), &pContext, sizeof(int64_t), 3);
|
||||
pContext->ppContext = ppContext;
|
||||
httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext);
|
||||
|
||||
|
|
|
@ -94,6 +94,10 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
|
|||
|
||||
for (int k = 0; k < numOfRows; ++k) {
|
||||
TAOS_ROW row = taos_fetch_row(result);
|
||||
if (row == NULL) {
|
||||
cmd->numOfRows--;
|
||||
continue;
|
||||
}
|
||||
int32_t* length = taos_fetch_lengths(result);
|
||||
|
||||
// data row array begin
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include "dnode.h"
|
||||
#include "monitor.h"
|
||||
|
||||
|
||||
#define monitorFatal(...) { if (monitorDebugFlag & DEBUG_FATAL) { taosPrintLog("MON FATAL ", 255, __VA_ARGS__); }}
|
||||
#define monitorError(...) { if (monitorDebugFlag & DEBUG_ERROR) { taosPrintLog("MON ERROR ", 255, __VA_ARGS__); }}
|
||||
#define monitorWarn(...) { if (monitorDebugFlag & DEBUG_WARN) { taosPrintLog("MON WARN ", 255, __VA_ARGS__); }}
|
||||
|
@ -78,6 +77,7 @@ static void monitorStartTimer();
|
|||
static void monitorSaveSystemInfo();
|
||||
extern int32_t (*monitorStartSystemFp)();
|
||||
extern void (*monitorStopSystemFp)();
|
||||
extern void (*monitorExecuteSQLFp)(char *sql);
|
||||
|
||||
static void monitorCheckDiskUsage(void *para, void *unused) {
|
||||
taosGetDisk();
|
||||
|
@ -207,6 +207,7 @@ static void monitorInitDatabase() {
|
|||
taos_query_a(tsMonitorConn.conn, tsMonitorConn.sql, monitorInitDatabaseCb, NULL);
|
||||
} else {
|
||||
tsMonitorConn.state = MONITOR_STATE_INITIALIZED;
|
||||
monitorExecuteSQLFp = monitorExecuteSQL;
|
||||
monitorInfo("monitor service init success");
|
||||
|
||||
monitorStartTimer();
|
||||
|
@ -230,6 +231,7 @@ static void monitorInitDatabaseCb(void *param, TAOS_RES *result, int32_t code) {
|
|||
|
||||
void monitorStopSystem() {
|
||||
monitorInfo("monitor module is stopped");
|
||||
monitorExecuteSQLFp = NULL;
|
||||
tsMonitorConn.state = MONITOR_STATE_STOPPED;
|
||||
if (tsMonitorConn.initTimer != NULL) {
|
||||
taosTmrStopA(&(tsMonitorConn.initTimer));
|
||||
|
@ -248,33 +250,13 @@ static void monitorStartTimer() {
|
|||
taosTmrReset(monitorSaveSystemInfo, tsMonitorInterval * 1000, NULL, tscTmr, &tsMonitorConn.timer);
|
||||
}
|
||||
|
||||
static void dnodeMontiorInsertAcctCallback(void *param, TAOS_RES *result, int32_t code) {
|
||||
static void dnodeMontiorLogCallback(void *param, TAOS_RES *result, int32_t code) {
|
||||
if (code < 0) {
|
||||
monitorError("monitor:%p, save account info failed, code:%s", tsMonitorConn.conn, tstrerror(code));
|
||||
monitorError("monitor:%p, save %s failed, reason:%s", tsMonitorConn.conn, (char *)param, tstrerror(code));
|
||||
} else if (code == 0) {
|
||||
monitorError("monitor:%p, save account info failed, affect rows:%d", tsMonitorConn.conn, code);
|
||||
monitorError("monitor:%p, save %s failed, affect rows:%d", tsMonitorConn.conn, (char *)param, code);
|
||||
} else {
|
||||
monitorDebug("monitor:%p, save account info success, code:%s", tsMonitorConn.conn, tstrerror(code));
|
||||
}
|
||||
}
|
||||
|
||||
static void dnodeMontiorInsertSysCallback(void *param, TAOS_RES *result, int32_t code) {
|
||||
if (code < 0) {
|
||||
monitorError("monitor:%p, save system info failed, code:%s %s", tsMonitorConn.conn, tstrerror(code), tsMonitorConn.sql);
|
||||
} else if (code == 0) {
|
||||
monitorError("monitor:%p, save system info failed, affect rows:%d %s", tsMonitorConn.conn, code, tsMonitorConn.sql);
|
||||
} else {
|
||||
monitorDebug("monitor:%p, save system info success, code:%s %s", tsMonitorConn.conn, tstrerror(code), tsMonitorConn.sql);
|
||||
}
|
||||
}
|
||||
|
||||
static void dnodeMontiorInsertLogCallback(void *param, TAOS_RES *result, int32_t code) {
|
||||
if (code < 0) {
|
||||
monitorError("monitor:%p, save log failed, code:%s", tsMonitorConn.conn, tstrerror(code));
|
||||
} else if (code == 0) {
|
||||
monitorError("monitor:%p, save log failed, affect rows:%d", tsMonitorConn.conn, code);
|
||||
} else {
|
||||
monitorDebug("monitor:%p, save log info success, code:%s", tsMonitorConn.conn, tstrerror(code));
|
||||
monitorDebug("monitor:%p, save %s info success, reason:%s", tsMonitorConn.conn, (char *)param, tstrerror(code));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -359,7 +341,7 @@ static void monitorSaveSystemInfo() {
|
|||
pos += monitorBuildReqSql(sql + pos);
|
||||
|
||||
monitorDebug("monitor:%p, save system info, sql:%s", tsMonitorConn.conn, sql);
|
||||
taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorInsertSysCallback, "log");
|
||||
taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "sys");
|
||||
|
||||
if (tsMonitorConn.timer != NULL && tsMonitorConn.state != MONITOR_STATE_STOPPED) {
|
||||
monitorStartTimer();
|
||||
|
@ -397,7 +379,7 @@ void monitorSaveAcctLog(SAcctMonitorObj *pMon) {
|
|||
pMon->accessState);
|
||||
|
||||
monitorDebug("monitor:%p, save account info, sql %s", tsMonitorConn.conn, sql);
|
||||
taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorInsertAcctCallback, "account");
|
||||
taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "account");
|
||||
}
|
||||
|
||||
void monitorSaveLog(int32_t level, const char *const format, ...) {
|
||||
|
@ -421,14 +403,11 @@ void monitorSaveLog(int32_t level, const char *const format, ...) {
|
|||
sql[len++] = 0;
|
||||
|
||||
monitorDebug("monitor:%p, save log, sql: %s", tsMonitorConn.conn, sql);
|
||||
taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorInsertLogCallback, "log");
|
||||
taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "log");
|
||||
}
|
||||
|
||||
void monitorExecuteSQL(char *sql) {
|
||||
if (tsMonitorConn.state != MONITOR_STATE_INITIALIZED) return;
|
||||
|
||||
monitorDebug("monitor:%p, execute sql: %s", tsMonitorConn.conn, sql);
|
||||
|
||||
// bug while insert binary
|
||||
// taos_query_a(tsMonitorConn.conn, sql, NULL, NULL);
|
||||
taos_query_a(tsMonitorConn.conn, sql, dnodeMontiorLogCallback, "sql");
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ int32_t mqttInitSystem() {
|
|||
}
|
||||
|
||||
char* _begin_hostname = strstr(url, recntStatus.hostname);
|
||||
if (strstr(_begin_hostname, ":") != NULL) {
|
||||
if (_begin_hostname != NULL && strstr(_begin_hostname, ":") != NULL) {
|
||||
recntStatus.port = strbetween(_begin_hostname, ":", "/");
|
||||
} else {
|
||||
recntStatus.port = strbetween("'1883'", "'", "'");
|
||||
|
|
|
@ -42,7 +42,7 @@ typedef struct SSqlGroupbyExpr {
|
|||
} SSqlGroupbyExpr;
|
||||
|
||||
typedef struct SPosInfo {
|
||||
int16_t pageId;
|
||||
int32_t pageId;
|
||||
int16_t rowId;
|
||||
} SPosInfo;
|
||||
|
||||
|
|
|
@ -5913,8 +5913,10 @@ _cleanup_qinfo:
|
|||
tsdbDestroyTableGroup(pTableGroupInfo);
|
||||
|
||||
_cleanup_query:
|
||||
if (pGroupbyExpr != NULL) {
|
||||
taosArrayDestroy(pGroupbyExpr->columnInfo);
|
||||
tfree(pGroupbyExpr);
|
||||
free(pGroupbyExpr);
|
||||
}
|
||||
tfree(pTagCols);
|
||||
for (int32_t i = 0; i < numOfOutput; ++i) {
|
||||
SExprInfo* pExprInfo = &pExprs[i];
|
||||
|
@ -6419,8 +6421,12 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
|
|||
size += sizeof(STableIdInfo) * taosArrayGetSize(pQInfo->arrTableIdInfo);
|
||||
*contLen = size + sizeof(SRetrieveTableRsp);
|
||||
|
||||
// todo handle failed to allocate memory
|
||||
// todo proper handle failed to allocate memory,
|
||||
// current solution only avoid crash, but cannot return error code to client
|
||||
*pRsp = (SRetrieveTableRsp *)rpcMallocCont(*contLen);
|
||||
if (*pRsp == NULL) {
|
||||
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
}
|
||||
(*pRsp)->numOfRows = htonl(pQuery->rec.rows);
|
||||
|
||||
int32_t code = pQInfo->code;
|
||||
|
|
|
@ -52,7 +52,7 @@ int32_t getResBufSize(SDiskbasedResultBuf* pResultBuf) { return pResultBuf->tota
|
|||
#define FILE_SIZE_ON_DISK(_r) (NUM_OF_PAGES_ON_DISK(_r) * (_r)->pageSize)
|
||||
|
||||
static int32_t createDiskResidesBuf(SDiskbasedResultBuf* pResultBuf) {
|
||||
pResultBuf->fd = open(pResultBuf->path, O_CREAT | O_RDWR, 0666);
|
||||
pResultBuf->fd = open(pResultBuf->path, O_CREAT | O_RDWR | O_TRUNC, 0666);
|
||||
if (!FD_VALID(pResultBuf->fd)) {
|
||||
qError("failed to create tmp file: %s on disk. %s", pResultBuf->path, strerror(errno));
|
||||
return TAOS_SYSTEM_ERROR(errno);
|
||||
|
|
|
@ -41,6 +41,9 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
|
|||
pWindowResInfo->type = type;
|
||||
_hash_fn_t fn = taosGetDefaultHashFunction(type);
|
||||
pWindowResInfo->hashList = taosHashInit(threshold, fn, false);
|
||||
if (pWindowResInfo->hashList == NULL) {
|
||||
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pWindowResInfo->curIndex = -1;
|
||||
pWindowResInfo->size = 0;
|
||||
|
|
|
@ -446,6 +446,9 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
|
|||
// set the idle timer to monitor the activity
|
||||
taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
|
||||
rpcSendMsgToPeer(pConn, msg, msgLen);
|
||||
|
||||
// if not set to secured, set it expcet NOT_READY case, since client wont treat it as secured
|
||||
if (pConn->secured == 0 && pMsg->code != TSDB_CODE_RPC_NOT_READY)
|
||||
pConn->secured = 1; // connection shall be secured
|
||||
|
||||
if (pConn->pReqMsg) rpcFreeCont(pConn->pReqMsg);
|
||||
|
@ -657,7 +660,7 @@ static SRpcConn *rpcAllocateClientConn(SRpcInfo *pRpc) {
|
|||
pConn->spi = pRpc->spi;
|
||||
pConn->encrypt = pRpc->encrypt;
|
||||
if (pConn->spi) memcpy(pConn->secret, pRpc->secret, TSDB_KEY_LEN);
|
||||
tDebug("%s %p client connection is allocated", pRpc->label, pConn);
|
||||
tDebug("%s %p client connection is allocated, uid:0x%x", pRpc->label, pConn, pConn->linkUid);
|
||||
}
|
||||
|
||||
return pConn;
|
||||
|
@ -718,7 +721,7 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
|
|||
}
|
||||
|
||||
taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES);
|
||||
tDebug("%s %p server connection is allocated", pRpc->label, pConn);
|
||||
tDebug("%s %p server connection is allocated, uid:0x%x", pRpc->label, pConn, pConn->linkUid);
|
||||
}
|
||||
|
||||
return pConn;
|
||||
|
@ -845,6 +848,16 @@ static int rpcProcessRspHead(SRpcConn *pConn, SRpcHead *pHead) {
|
|||
return TSDB_CODE_RPC_ALREADY_PROCESSED;
|
||||
}
|
||||
|
||||
if (pHead->code == TSDB_CODE_RPC_MISMATCHED_LINK_ID) {
|
||||
tDebug("%s, mismatched linkUid, link shall be restarted", pConn->info);
|
||||
pConn->secured = 0;
|
||||
((SRpcHead *)pConn->pReqMsg)->destId = 0;
|
||||
rpcSendMsgToPeer(pConn, pConn->pReqMsg, pConn->reqMsgLen);
|
||||
if (pConn->connType != RPC_CONN_TCPC)
|
||||
pConn->pTimer = taosTmrStart(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl);
|
||||
return TSDB_CODE_RPC_ALREADY_PROCESSED;
|
||||
}
|
||||
|
||||
if (pHead->code == TSDB_CODE_RPC_ACTION_IN_PROGRESS) {
|
||||
if (pConn->tretry <= tsRpcMaxRetry) {
|
||||
tDebug("%s, peer is still processing the transaction, retry:%d", pConn->info, pConn->tretry);
|
||||
|
|
|
@ -42,6 +42,7 @@ extern int tsdbDebugFlag;
|
|||
#define TSDB_MAX_TABLE_SCHEMAS 16
|
||||
#define TSDB_FILE_HEAD_SIZE 512
|
||||
#define TSDB_FILE_DELIMITER 0xF00AFA0F
|
||||
#define TSDB_FILE_INIT_MAGIC 0xFFFFFFFF
|
||||
|
||||
// Definitions
|
||||
// ------------------ tsdbMeta.c
|
||||
|
@ -69,6 +70,7 @@ typedef struct {
|
|||
pthread_rwlock_t rwLock;
|
||||
|
||||
int32_t nTables;
|
||||
int32_t maxTables;
|
||||
STable** tables;
|
||||
SList* superList;
|
||||
SHashObj* uidMap;
|
||||
|
@ -110,9 +112,11 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
T_REF_DECLARE();
|
||||
SRWLatch latch;
|
||||
TSKEY keyFirst;
|
||||
TSKEY keyLast;
|
||||
int64_t numOfRows;
|
||||
int32_t maxTables;
|
||||
STableData** tData;
|
||||
SList* actList;
|
||||
SList* bufBlockList;
|
||||
|
@ -132,21 +136,30 @@ typedef struct {
|
|||
// ------------------ tsdbFile.c
|
||||
extern const char* tsdbFileSuffix[];
|
||||
typedef enum {
|
||||
#ifdef TSDB_IDX
|
||||
TSDB_FILE_TYPE_IDX = 0,
|
||||
TSDB_FILE_TYPE_HEAD,
|
||||
#else
|
||||
TSDB_FILE_TYPE_HEAD = 0,
|
||||
#endif
|
||||
TSDB_FILE_TYPE_DATA,
|
||||
TSDB_FILE_TYPE_LAST,
|
||||
TSDB_FILE_TYPE_MAX,
|
||||
#ifdef TSDB_IDX
|
||||
TSDB_FILE_TYPE_NIDX,
|
||||
#endif
|
||||
TSDB_FILE_TYPE_NHEAD,
|
||||
TSDB_FILE_TYPE_NLAST
|
||||
} TSDB_FILE_TYPE;
|
||||
|
||||
typedef struct {
|
||||
uint32_t offset;
|
||||
uint32_t magic;
|
||||
uint32_t len;
|
||||
uint64_t size; // total size of the file
|
||||
uint64_t tombSize; // unused file size
|
||||
uint32_t totalBlocks;
|
||||
uint32_t totalSubBlocks;
|
||||
uint32_t offset;
|
||||
uint64_t size; // total size of the file
|
||||
uint64_t tombSize; // unused file size
|
||||
} STsdbFileInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -197,6 +210,7 @@ typedef struct {
|
|||
|
||||
// ------------------ tsdbRWHelper.c
|
||||
typedef struct {
|
||||
int32_t tid;
|
||||
uint32_t len;
|
||||
uint32_t offset;
|
||||
uint32_t hasLast : 2;
|
||||
|
@ -220,7 +234,7 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
int32_t delimiter; // For recovery usage
|
||||
int32_t checksum; // TODO: decide if checksum logic in this file or make it one API
|
||||
int32_t tid;
|
||||
uint64_t uid;
|
||||
SCompBlock blocks[];
|
||||
} SCompInfo;
|
||||
|
@ -249,14 +263,12 @@ typedef struct {
|
|||
typedef enum { TSDB_WRITE_HELPER, TSDB_READ_HELPER } tsdb_rw_helper_t;
|
||||
|
||||
typedef struct {
|
||||
int fid;
|
||||
TSKEY minKey;
|
||||
TSKEY maxKey;
|
||||
// For read/write purpose
|
||||
SFile headF;
|
||||
SFile dataF;
|
||||
SFile lastF;
|
||||
// For write purpose only
|
||||
SFileGroup fGroup;
|
||||
#ifdef TSDB_IDX
|
||||
SFile nIdxF;
|
||||
#endif
|
||||
SFile nHeadF;
|
||||
SFile nLastF;
|
||||
} SHelperFile;
|
||||
|
@ -264,9 +276,14 @@ typedef struct {
|
|||
typedef struct {
|
||||
uint64_t uid;
|
||||
int32_t tid;
|
||||
int32_t sversion;
|
||||
} SHelperTable;
|
||||
|
||||
typedef struct {
|
||||
SCompIdx* pIdxArray;
|
||||
int numOfIdx;
|
||||
int curIdx;
|
||||
} SIdxH;
|
||||
|
||||
typedef struct {
|
||||
tsdb_rw_helper_t type;
|
||||
|
||||
|
@ -274,7 +291,9 @@ typedef struct {
|
|||
int8_t state;
|
||||
// For file set usage
|
||||
SHelperFile files;
|
||||
SCompIdx* pCompIdx;
|
||||
SIdxH idxH;
|
||||
SCompIdx curCompIdx;
|
||||
void* pWIdx;
|
||||
// For table set usage
|
||||
SHelperTable tableInfo;
|
||||
SCompInfo* pCompInfo;
|
||||
|
@ -286,9 +305,9 @@ typedef struct {
|
|||
void* compBuffer; // Buffer for temperary compress/decompress purpose
|
||||
} SRWHelper;
|
||||
|
||||
|
||||
// Operations
|
||||
// ------------------ tsdbMeta.c
|
||||
#define TSDB_INIT_NTABLES 1024
|
||||
#define TABLE_TYPE(t) (t)->type
|
||||
#define TABLE_NAME(t) (t)->name
|
||||
#define TABLE_CHAR_NAME(t) TABLE_NAME(t)->data
|
||||
|
@ -296,6 +315,7 @@ typedef struct {
|
|||
#define TABLE_TID(t) (t)->tableId.tid
|
||||
#define TABLE_SUID(t) (t)->suid
|
||||
#define TABLE_LASTKEY(t) (t)->lastKey
|
||||
#define TSDB_META_FILE_MAGIC(m) KVSTORE_MAGIC((m)->pStore)
|
||||
|
||||
STsdbMeta* tsdbNewMeta(STsdbCfg* pCfg);
|
||||
void tsdbFreeMeta(STsdbMeta* pMeta);
|
||||
|
@ -379,6 +399,7 @@ int tsdbInsertRowToMem(STsdbRepo* pRepo, SDataRow row, STable* pTable);
|
|||
int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
|
||||
int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
|
||||
int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem);
|
||||
void tsdbUnTakeMemSnapShot(STsdbRepo* pRepo, SMemTable* pMem, SMemTable* pIMem);
|
||||
void* tsdbAllocBytes(STsdbRepo* pRepo, int bytes);
|
||||
int tsdbAsyncCommit(STsdbRepo* pRepo);
|
||||
int tsdbLoadDataFromCache(STable* pTable, SSkipListIterator* pIter, TSKEY maxKey, int maxRowsToRead, SDataCols* pCols,
|
||||
|
@ -413,7 +434,7 @@ STsdbFileH* tsdbNewFileH(STsdbCfg* pCfg);
|
|||
void tsdbFreeFileH(STsdbFileH* pFileH);
|
||||
int tsdbOpenFileH(STsdbRepo* pRepo);
|
||||
void tsdbCloseFileH(STsdbRepo* pRepo);
|
||||
SFileGroup* tsdbCreateFGroupIfNeed(STsdbRepo* pRepo, char* dataDir, int fid, int maxTables);
|
||||
SFileGroup* tsdbCreateFGroupIfNeed(STsdbRepo* pRepo, char* dataDir, int fid);
|
||||
void tsdbInitFileGroupIter(STsdbFileH* pFileH, SFileGroupIter* pIter, int direction);
|
||||
void tsdbSeekFileGroupIter(SFileGroupIter* pIter, int fid);
|
||||
SFileGroup* tsdbGetFileGroupNext(SFileGroupIter* pIter);
|
||||
|
@ -445,6 +466,16 @@ void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TS
|
|||
#define helperRepo(h) (h)->pRepo
|
||||
#define helperState(h) (h)->state
|
||||
#define TSDB_NLAST_FILE_OPENED(h) ((h)->files.nLastF.fd > 0)
|
||||
#define helperFileId(h) ((h)->files.fGroup.fileId)
|
||||
#ifdef TSDB_IDX
|
||||
#define helperIdxF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_IDX]))
|
||||
#define helperNewIdxF(h) (&((h)->files.nIdxF))
|
||||
#endif
|
||||
#define helperHeadF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_HEAD]))
|
||||
#define helperDataF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_DATA]))
|
||||
#define helperLastF(h) (&((h)->files.fGroup.files[TSDB_FILE_TYPE_LAST]))
|
||||
#define helperNewHeadF(h) (&((h)->files.nHeadF))
|
||||
#define helperNewLastF(h) (&((h)->files.nLastF))
|
||||
|
||||
int tsdbInitReadHelper(SRWHelper* pHelper, STsdbRepo* pRepo);
|
||||
int tsdbInitWriteHelper(SRWHelper* pHelper, STsdbRepo* pRepo);
|
||||
|
@ -485,6 +516,7 @@ void tsdbGetDataFileName(STsdbRepo* pRepo, int fid, int type, char* fname
|
|||
int tsdbLockRepo(STsdbRepo* pRepo);
|
||||
int tsdbUnlockRepo(STsdbRepo* pRepo);
|
||||
char* tsdbGetDataDirName(char* rootDir);
|
||||
int tsdbGetNextMaxTables(int tid);
|
||||
STsdbMeta* tsdbGetMeta(TSDB_REPO_T* pRepo);
|
||||
STsdbFileH* tsdbGetFile(TSDB_REPO_T* pRepo);
|
||||
|
||||
|
|
|
@ -30,7 +30,11 @@
|
|||
#include "ttime.h"
|
||||
#include "tfile.h"
|
||||
|
||||
#ifdef TSDB_IDX
|
||||
const char *tsdbFileSuffix[] = {".idx", ".head", ".data", ".last", "", ".i", ".h", ".l"};
|
||||
#else
|
||||
const char *tsdbFileSuffix[] = {".head", ".data", ".last", "", ".h", ".l"};
|
||||
#endif
|
||||
|
||||
static int tsdbInitFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type);
|
||||
static void tsdbDestroyFile(SFile *pFile);
|
||||
|
@ -108,7 +112,7 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
|
|||
|
||||
memset((void *)(&fileGroup), 0, sizeof(SFileGroup));
|
||||
fileGroup.fileId = fid;
|
||||
for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) {
|
||||
for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) {
|
||||
if (tsdbInitFile(&fileGroup.files[type], pRepo, fid, type) < 0) {
|
||||
tsdbError("vgId:%d failed to init file fid %d type %d", REPO_ID(pRepo), fid, type);
|
||||
goto _err;
|
||||
|
@ -126,7 +130,7 @@ int tsdbOpenFileH(STsdbRepo *pRepo) {
|
|||
return 0;
|
||||
|
||||
_err:
|
||||
for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) tsdbDestroyFile(&fileGroup.files[type]);
|
||||
for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) tsdbDestroyFile(&fileGroup.files[type]);
|
||||
|
||||
tfree(tDataDir);
|
||||
if (dir != NULL) closedir(dir);
|
||||
|
@ -139,13 +143,13 @@ void tsdbCloseFileH(STsdbRepo *pRepo) {
|
|||
|
||||
for (int i = 0; i < pFileH->nFGroups; i++) {
|
||||
SFileGroup *pFGroup = pFileH->pFGroup + i;
|
||||
for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) {
|
||||
for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) {
|
||||
tsdbDestroyFile(&pFGroup->files[type]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid, int maxTables) {
|
||||
SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) {
|
||||
STsdbFileH *pFileH = pRepo->tsdbFileH;
|
||||
|
||||
if (pFileH->nFGroups >= pFileH->maxFGroups) return NULL;
|
||||
|
@ -156,7 +160,7 @@ SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid, int
|
|||
SFileGroup *pGroup = tsdbSearchFGroup(pFileH, fid, TD_EQ);
|
||||
if (pGroup == NULL) { // if not exists, create one
|
||||
pFGroup->fileId = fid;
|
||||
for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) {
|
||||
for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) {
|
||||
if (tsdbCreateFile(&pFGroup->files[type], pRepo, fid, type) < 0)
|
||||
goto _err;
|
||||
}
|
||||
|
@ -169,7 +173,7 @@ SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid, int
|
|||
return pGroup;
|
||||
|
||||
_err:
|
||||
for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) tsdbDestroyFile(&pGroup->files[type]);
|
||||
for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) tsdbDestroyFile(&pGroup->files[type]);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -260,6 +264,7 @@ int tsdbCreateFile(SFile *pFile, STsdbRepo *pRepo, int fid, int type) {
|
|||
}
|
||||
|
||||
pFile->info.size = TSDB_FILE_HEAD_SIZE;
|
||||
pFile->info.magic = TSDB_FILE_INIT_MAGIC;
|
||||
|
||||
if (tsdbUpdateFileHeader(pFile, 0) < 0) {
|
||||
tsdbCloseFile(pFile);
|
||||
|
@ -323,23 +328,25 @@ int tsdbUpdateFileHeader(SFile *pFile, uint32_t version) {
|
|||
|
||||
int tsdbEncodeSFileInfo(void **buf, const STsdbFileInfo *pInfo) {
|
||||
int tlen = 0;
|
||||
tlen += taosEncodeFixedU32(buf, pInfo->offset);
|
||||
tlen += taosEncodeFixedU32(buf, pInfo->magic);
|
||||
tlen += taosEncodeFixedU32(buf, pInfo->len);
|
||||
tlen += taosEncodeFixedU64(buf, pInfo->size);
|
||||
tlen += taosEncodeFixedU64(buf, pInfo->tombSize);
|
||||
tlen += taosEncodeFixedU32(buf, pInfo->totalBlocks);
|
||||
tlen += taosEncodeFixedU32(buf, pInfo->totalSubBlocks);
|
||||
tlen += taosEncodeFixedU32(buf, pInfo->offset);
|
||||
tlen += taosEncodeFixedU64(buf, pInfo->size);
|
||||
tlen += taosEncodeFixedU64(buf, pInfo->tombSize);
|
||||
|
||||
return tlen;
|
||||
}
|
||||
|
||||
void *tsdbDecodeSFileInfo(void *buf, STsdbFileInfo *pInfo) {
|
||||
buf = taosDecodeFixedU32(buf, &(pInfo->offset));
|
||||
buf = taosDecodeFixedU32(buf, &(pInfo->magic));
|
||||
buf = taosDecodeFixedU32(buf, &(pInfo->len));
|
||||
buf = taosDecodeFixedU64(buf, &(pInfo->size));
|
||||
buf = taosDecodeFixedU64(buf, &(pInfo->tombSize));
|
||||
buf = taosDecodeFixedU32(buf, &(pInfo->totalBlocks));
|
||||
buf = taosDecodeFixedU32(buf, &(pInfo->totalSubBlocks));
|
||||
buf = taosDecodeFixedU32(buf, &(pInfo->offset));
|
||||
buf = taosDecodeFixedU64(buf, &(pInfo->size));
|
||||
buf = taosDecodeFixedU64(buf, &(pInfo->tombSize));
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
@ -358,7 +365,7 @@ void tsdbRemoveFileGroup(STsdbRepo *pRepo, SFileGroup *pFGroup) {
|
|||
pFileH->nFGroups--;
|
||||
ASSERT(pFileH->nFGroups >= 0);
|
||||
|
||||
for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) {
|
||||
for (int type = 0; type < TSDB_FILE_TYPE_MAX; type++) {
|
||||
if (remove(fileGroup.files[type].fname) < 0) {
|
||||
tsdbError("vgId:%d failed to remove file %s", REPO_ID(pRepo), fileGroup.files[type].fname);
|
||||
}
|
||||
|
|
|
@ -62,7 +62,6 @@ static int tsdbRestoreInfo(STsdbRepo *pRepo);
|
|||
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter);
|
||||
static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression);
|
||||
static int tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep);
|
||||
static int tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables);
|
||||
static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks);
|
||||
static int keyFGroupCompFunc(const void *key, const void *fgroup);
|
||||
static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg);
|
||||
|
@ -85,10 +84,10 @@ int32_t tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg) {
|
|||
if (tsdbSetRepoEnv(rootDir, pCfg) < 0) return -1;
|
||||
|
||||
tsdbDebug(
|
||||
"vgId:%d tsdb env create succeed! cacheBlockSize %d totalBlocks %d maxTables %d daysPerFile %d keep "
|
||||
"vgId:%d tsdb env create succeed! cacheBlockSize %d totalBlocks %d daysPerFile %d keep "
|
||||
"%d minRowsPerFileBlock %d maxRowsPerFileBlock %d precision %d compression %d",
|
||||
pCfg->tsdbId, pCfg->cacheBlockSize, pCfg->totalBlocks, pCfg->maxTables, pCfg->daysPerFile, pCfg->keep,
|
||||
pCfg->minRowsPerFileBlock, pCfg->maxRowsPerFileBlock, pCfg->precision, pCfg->compression);
|
||||
pCfg->tsdbId, pCfg->cacheBlockSize, pCfg->totalBlocks, pCfg->daysPerFile, pCfg->keep, pCfg->minRowsPerFileBlock,
|
||||
pCfg->maxRowsPerFileBlock, pCfg->precision, pCfg->compression);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -212,59 +211,61 @@ uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_
|
|||
|
||||
char *sdup = strdup(pRepo->rootDir);
|
||||
char *prefix = dirname(sdup);
|
||||
int prefixLen = strlen(prefix);
|
||||
tfree(sdup);
|
||||
|
||||
if (name[0] == 0) { // get the file from index or after, but not larger than eindex
|
||||
int fid = (*index) / 3;
|
||||
int fid = (*index) / TSDB_FILE_TYPE_MAX;
|
||||
|
||||
if (pFileH->nFGroups == 0 || fid > pFileH->pFGroup[pFileH->nFGroups - 1].fileId) {
|
||||
if (*index <= TSDB_META_FILE_INDEX && TSDB_META_FILE_INDEX <= eindex) {
|
||||
fname = tsdbGetMetaFileName(pRepo->rootDir);
|
||||
*index = TSDB_META_FILE_INDEX;
|
||||
magic = TSDB_META_FILE_MAGIC(pRepo->tsdbMeta);
|
||||
} else {
|
||||
tfree(sdup);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
SFileGroup *pFGroup =
|
||||
taosbsearch(&fid, pFileH->pFGroup, pFileH->nFGroups, sizeof(SFileGroup), keyFGroupCompFunc, TD_GE);
|
||||
if (pFGroup->fileId == fid) {
|
||||
fname = strdup(pFGroup->files[(*index) % 3].fname);
|
||||
fname = strdup(pFGroup->files[(*index) % TSDB_FILE_TYPE_MAX].fname);
|
||||
magic = pFGroup->files[(*index) % TSDB_FILE_TYPE_MAX].info.magic;
|
||||
} else {
|
||||
if (pFGroup->fileId * 3 + 2 < eindex) {
|
||||
if ((pFGroup->fileId + 1) * TSDB_FILE_TYPE_MAX - 1 < eindex) {
|
||||
fname = strdup(pFGroup->files[0].fname);
|
||||
*index = pFGroup->fileId * 3;
|
||||
*index = pFGroup->fileId * TSDB_FILE_TYPE_MAX;
|
||||
magic = pFGroup->files[0].info.magic;
|
||||
} else {
|
||||
tfree(sdup);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
strcpy(name, fname + strlen(prefix));
|
||||
strcpy(name, fname + prefixLen);
|
||||
} else { // get the named file at the specified index. If not there, return 0
|
||||
if (*index == TSDB_META_FILE_INDEX) { // get meta file
|
||||
fname = tsdbGetMetaFileName(pRepo->rootDir);
|
||||
magic = TSDB_META_FILE_MAGIC(pRepo->tsdbMeta);
|
||||
} else {
|
||||
int fid = (*index) / 3;
|
||||
int fid = (*index) / TSDB_FILE_TYPE_MAX;
|
||||
SFileGroup *pFGroup = tsdbSearchFGroup(pFileH, fid, TD_EQ);
|
||||
if (pFGroup == NULL) { // not found
|
||||
tfree(sdup);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SFile *pFile = &pFGroup->files[(*index) % 3];
|
||||
SFile *pFile = &pFGroup->files[(*index) % TSDB_FILE_TYPE_MAX];
|
||||
fname = strdup(pFile->fname);
|
||||
magic = pFile->info.magic;
|
||||
}
|
||||
}
|
||||
|
||||
if (stat(fname, &fState) < 0) {
|
||||
tfree(sdup);
|
||||
tfree(fname);
|
||||
return 0;
|
||||
}
|
||||
|
||||
tfree(sdup);
|
||||
*size = fState.st_size;
|
||||
magic = *size;
|
||||
// magic = *size;
|
||||
|
||||
tfree(fname);
|
||||
return magic;
|
||||
|
@ -305,13 +306,6 @@ int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg) {
|
|||
tsdbAlterCacheTotalBlocks(pRepo, pCfg->totalBlocks);
|
||||
configChanged = true;
|
||||
}
|
||||
if (pRCfg->maxTables != pCfg->maxTables) {
|
||||
if (tsdbAlterMaxTables(pRepo, pCfg->maxTables) < 0) {
|
||||
tsdbError("vgId:%d failed to configure repo when alter maxTables since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
configChanged = true;
|
||||
}
|
||||
|
||||
if (configChanged) {
|
||||
if (tsdbSaveConfig(pRepo->rootDir, &pRepo->config) < 0) {
|
||||
|
@ -383,6 +377,18 @@ char *tsdbGetDataDirName(char *rootDir) {
|
|||
return fname;
|
||||
}
|
||||
|
||||
int tsdbGetNextMaxTables(int tid) {
|
||||
ASSERT(tid >= 1 && tid <= TSDB_MAX_TABLES);
|
||||
int maxTables = TSDB_INIT_NTABLES;
|
||||
while (true) {
|
||||
maxTables = MIN(maxTables, TSDB_MAX_TABLES);
|
||||
if (tid <= maxTables) break;
|
||||
maxTables *= 2;
|
||||
}
|
||||
|
||||
return maxTables + 1;
|
||||
}
|
||||
|
||||
STsdbMeta * tsdbGetMeta(TSDB_REPO_T *pRepo) { return ((STsdbRepo *)pRepo)->tsdbMeta; }
|
||||
STsdbFileH * tsdbGetFile(TSDB_REPO_T *pRepo) { return ((STsdbRepo *)pRepo)->tsdbFileH; }
|
||||
STsdbRepoInfo *tsdbGetStatus(TSDB_REPO_T *pRepo) { return NULL; }
|
||||
|
@ -415,17 +421,6 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) {
|
|||
goto _err;
|
||||
}
|
||||
|
||||
// Check maxTables
|
||||
if (pCfg->maxTables == -1) {
|
||||
pCfg->maxTables = TSDB_DEFAULT_TABLES+1;
|
||||
} else {
|
||||
if (pCfg->maxTables - 1 < TSDB_MIN_TABLES || pCfg->maxTables - 1 > TSDB_MAX_TABLES) {
|
||||
tsdbError("vgId:%d invalid maxTables configuration! maxTables %d TSDB_MIN_TABLES %d TSDB_MAX_TABLES %d",
|
||||
pCfg->tsdbId, pCfg->maxTables - 1, TSDB_MIN_TABLES, TSDB_MAX_TABLES);
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
// Check daysPerFile
|
||||
if (pCfg->daysPerFile == -1) {
|
||||
pCfg->daysPerFile = TSDB_DEFAULT_DAYS_PER_FILE;
|
||||
|
@ -711,6 +706,7 @@ static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY
|
|||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
int64_t points = 0;
|
||||
|
||||
ASSERT(pBlock->tid < pMeta->maxTables);
|
||||
STable *pTable = pMeta->tables[pBlock->tid];
|
||||
ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
|
||||
|
||||
|
@ -777,7 +773,6 @@ static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) {
|
|||
}
|
||||
|
||||
static int tsdbRestoreInfo(STsdbRepo *pRepo) {
|
||||
// TODO
|
||||
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
||||
STsdbFileH *pFileH = pRepo->tsdbFileH;
|
||||
SFileGroup *pFGroup = NULL;
|
||||
|
@ -790,10 +785,11 @@ static int tsdbRestoreInfo(STsdbRepo *pRepo) {
|
|||
tsdbInitFileGroupIter(pFileH, &iter, TSDB_ORDER_DESC);
|
||||
while ((pFGroup = tsdbGetFileGroupNext(&iter)) != NULL) {
|
||||
if (tsdbSetAndOpenHelperFile(&rhelper, pFGroup) < 0) goto _err;
|
||||
for (int i = 1; i < pRepo->config.maxTables; i++) {
|
||||
for (int i = 1; i < pMeta->maxTables; i++) {
|
||||
STable *pTable = pMeta->tables[i];
|
||||
if (pTable == NULL) continue;
|
||||
SCompIdx *pIdx = &rhelper.pCompIdx[i];
|
||||
tsdbSetHelperTable(&rhelper, pTable, pRepo);
|
||||
SCompIdx *pIdx = &(rhelper.curCompIdx);
|
||||
|
||||
if (pIdx->offset > 0 && pTable->lastKey < pIdx->maxKey) pTable->lastKey = pIdx->maxKey;
|
||||
}
|
||||
|
@ -865,36 +861,6 @@ static int tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables) {
|
||||
// TODO
|
||||
int oldMaxTables = pRepo->config.maxTables;
|
||||
if (oldMaxTables < pRepo->config.maxTables) {
|
||||
terrno = TSDB_CODE_TDB_INVALID_ACTION;
|
||||
return -1;
|
||||
}
|
||||
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
|
||||
pMeta->tables = realloc(pMeta->tables, maxTables * sizeof(STable *));
|
||||
memset(&pMeta->tables[oldMaxTables], 0, sizeof(STable *) * (maxTables - oldMaxTables));
|
||||
pRepo->config.maxTables = maxTables;
|
||||
|
||||
if (pRepo->mem) {
|
||||
pRepo->mem->tData = realloc(pRepo->mem->tData, maxTables * sizeof(STableData *));
|
||||
memset(POINTER_SHIFT(pRepo->mem->tData, sizeof(STableData *) * oldMaxTables), 0,
|
||||
sizeof(STableData *) * (maxTables - oldMaxTables));
|
||||
}
|
||||
|
||||
if (pRepo->imem) {
|
||||
pRepo->imem->tData = realloc(pRepo->imem->tData, maxTables * sizeof(STableData *));
|
||||
memset(POINTER_SHIFT(pRepo->imem->tData, sizeof(STableData *) * oldMaxTables), 0,
|
||||
sizeof(STableData *) * (maxTables - oldMaxTables));
|
||||
}
|
||||
|
||||
tsdbDebug("vgId:%d, tsdb maxTables is changed from %d to %d!", pRepo->config.tsdbId, oldMaxTables, maxTables);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int keyFGroupCompFunc(const void *key, const void *fgroup) {
|
||||
int fid = *(int *)key;
|
||||
SFileGroup *pFGroup = (SFileGroup *)fgroup;
|
||||
|
@ -911,7 +877,6 @@ static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg) {
|
|||
tlen += taosEncodeVariantI32(buf, pCfg->tsdbId);
|
||||
tlen += taosEncodeFixedI32(buf, pCfg->cacheBlockSize);
|
||||
tlen += taosEncodeVariantI32(buf, pCfg->totalBlocks);
|
||||
tlen += taosEncodeVariantI32(buf, pCfg->maxTables);
|
||||
tlen += taosEncodeVariantI32(buf, pCfg->daysPerFile);
|
||||
tlen += taosEncodeVariantI32(buf, pCfg->keep);
|
||||
tlen += taosEncodeVariantI32(buf, pCfg->keep1);
|
||||
|
@ -928,7 +893,6 @@ static void *tsdbDecodeCfg(void *buf, STsdbCfg *pCfg) {
|
|||
buf = taosDecodeVariantI32(buf, &(pCfg->tsdbId));
|
||||
buf = taosDecodeFixedI32(buf, &(pCfg->cacheBlockSize));
|
||||
buf = taosDecodeVariantI32(buf, &(pCfg->totalBlocks));
|
||||
buf = taosDecodeVariantI32(buf, &(pCfg->maxTables));
|
||||
buf = taosDecodeVariantI32(buf, &(pCfg->daysPerFile));
|
||||
buf = taosDecodeVariantI32(buf, &(pCfg->keep));
|
||||
buf = taosDecodeVariantI32(buf, &(pCfg->keep1));
|
||||
|
@ -1034,7 +998,7 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
|
|||
pBlock->schemaLen = htonl(pBlock->schemaLen);
|
||||
pBlock->numOfRows = htons(pBlock->numOfRows);
|
||||
|
||||
if (pBlock->tid <= 0 || pBlock->tid >= pRepo->config.maxTables) {
|
||||
if (pBlock->tid <= 0 || pBlock->tid >= pMeta->maxTables) {
|
||||
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
|
||||
pBlock->tid);
|
||||
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
|
||||
|
@ -1117,7 +1081,7 @@ TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid) {
|
|||
static void tsdbStartStream(STsdbRepo *pRepo) {
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
|
||||
for (int i = 0; i < pRepo->config.maxTables; i++) {
|
||||
for (int i = 0; i < pMeta->maxTables; i++) {
|
||||
STable *pTable = pMeta->tables[i];
|
||||
if (pTable && pTable->type == TSDB_STREAM_TABLE) {
|
||||
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql,
|
||||
|
@ -1130,7 +1094,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) {
|
|||
static void tsdbStopStream(STsdbRepo *pRepo) {
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
|
||||
for (int i = 0; i < pRepo->config.maxTables; i++) {
|
||||
for (int i = 0; i < pMeta->maxTables; i++) {
|
||||
STable *pTable = pMeta->tables[i];
|
||||
if (pTable && pTable->type == TSDB_STREAM_TABLE) {
|
||||
(*pRepo->appH.cqDropFunc)(pTable->cqhandle);
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
static FORCE_INLINE STsdbBufBlock *tsdbGetCurrBufBlock(STsdbRepo *pRepo);
|
||||
|
||||
static void tsdbFreeBytes(STsdbRepo *pRepo, void *ptr, int bytes);
|
||||
static SMemTable * tsdbNewMemTable(STsdbCfg *pCfg);
|
||||
static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo);
|
||||
static void tsdbFreeMemTable(SMemTable *pMemTable);
|
||||
static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable);
|
||||
static void tsdbFreeTableData(STableData *pTableData);
|
||||
|
@ -33,10 +33,12 @@ static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY min
|
|||
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols);
|
||||
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo);
|
||||
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables);
|
||||
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables);
|
||||
|
||||
// ---------------- INTERNAL FUNCTIONS ----------------
|
||||
int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
|
||||
STsdbCfg * pCfg = &pRepo->config;
|
||||
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
||||
int32_t level = 0;
|
||||
int32_t headSize = 0;
|
||||
TSKEY key = dataRowKey(row);
|
||||
|
@ -45,7 +47,7 @@ int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
|
|||
SSkipList * pSList = NULL;
|
||||
int bytes = 0;
|
||||
|
||||
if (pMemTable != NULL && pMemTable->tData[TABLE_TID(pTable)] != NULL &&
|
||||
if (pMemTable != NULL && TABLE_TID(pTable) < pMemTable->maxTables && pMemTable->tData[TABLE_TID(pTable)] != NULL &&
|
||||
pMemTable->tData[TABLE_TID(pTable)]->uid == TABLE_UID(pTable)) {
|
||||
pTableData = pMemTable->tData[TABLE_TID(pTable)];
|
||||
pSList = pTableData->pData;
|
||||
|
@ -66,13 +68,20 @@ int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
|
|||
// Operations above may change pRepo->mem, retake those values
|
||||
ASSERT(pRepo->mem != NULL);
|
||||
pMemTable = pRepo->mem;
|
||||
|
||||
if (TABLE_TID(pTable) >= pMemTable->maxTables) {
|
||||
if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) return -1;;
|
||||
}
|
||||
pTableData = pMemTable->tData[TABLE_TID(pTable)];
|
||||
|
||||
if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) {
|
||||
if (pTableData != NULL) { // destroy the table skiplist (may have race condition problem)
|
||||
taosWLockLatch(&(pMemTable->latch));
|
||||
pMemTable->tData[TABLE_TID(pTable)] = NULL;
|
||||
tsdbFreeTableData(pTableData);
|
||||
taosWUnLockLatch(&(pMemTable->latch));
|
||||
}
|
||||
|
||||
pTableData = tsdbNewTableData(pCfg, pTable);
|
||||
if (pTableData == NULL) {
|
||||
tsdbError("vgId:%d failed to insert row with key %" PRId64
|
||||
|
@ -122,7 +131,6 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
|
|||
int ref = T_REF_DEC(pMemTable);
|
||||
tsdbDebug("vgId:%d unref memtable %p ref %d", REPO_ID(pRepo), pMemTable, ref);
|
||||
if (ref == 0) {
|
||||
STsdbCfg * pCfg = &pRepo->config;
|
||||
STsdbBufPool *pBufPool = pRepo->pPool;
|
||||
|
||||
SListNode *pNode = NULL;
|
||||
|
@ -139,7 +147,7 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
|
|||
}
|
||||
if (tsdbUnlockRepo(pRepo) < 0) return -1;
|
||||
|
||||
for (int i = 0; i < pCfg->maxTables; i++) {
|
||||
for (int i = 0; i < pMemTable->maxTables; i++) {
|
||||
if (pMemTable->tData[i] != NULL) {
|
||||
tsdbFreeTableData(pMemTable->tData[i]);
|
||||
}
|
||||
|
@ -161,11 +169,24 @@ int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem) {
|
|||
tsdbRefMemTable(pRepo, *pIMem);
|
||||
|
||||
if (tsdbUnlockRepo(pRepo) < 0) return -1;
|
||||
tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem);
|
||||
|
||||
if (*pMem != NULL) taosRLockLatch(&((*pMem)->latch));
|
||||
|
||||
tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemTable *pMem, SMemTable *pIMem) {
|
||||
if (pMem != NULL) {
|
||||
taosRUnLockLatch(&(pMem->latch));
|
||||
tsdbUnRefMemTable(pRepo, pMem);
|
||||
}
|
||||
|
||||
if (pIMem != NULL) {
|
||||
tsdbUnRefMemTable(pRepo, pIMem);
|
||||
}
|
||||
}
|
||||
|
||||
void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
|
||||
STsdbCfg * pCfg = &pRepo->config;
|
||||
STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo);
|
||||
|
@ -182,7 +203,7 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
|
|||
}
|
||||
|
||||
if (pRepo->mem == NULL) {
|
||||
SMemTable *pMemTable = tsdbNewMemTable(&pRepo->config);
|
||||
SMemTable *pMemTable = tsdbNewMemTable(pRepo);
|
||||
if (pMemTable == NULL) return NULL;
|
||||
|
||||
if (tsdbLockRepo(pRepo) < 0) {
|
||||
|
@ -264,13 +285,11 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey
|
|||
}
|
||||
|
||||
do {
|
||||
if (numOfRows >= maxRowsToRead) break;
|
||||
|
||||
SDataRow row = tsdbNextIterRow(pIter);
|
||||
if (row == NULL) break;
|
||||
|
||||
keyNext = dataRowKey(row);
|
||||
if (keyNext < 0 || keyNext > maxKey) break;
|
||||
if (keyNext > maxKey) break;
|
||||
|
||||
bool keyFiltered = false;
|
||||
if (nFilterKeys != 0) {
|
||||
|
@ -289,6 +308,7 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey
|
|||
}
|
||||
|
||||
if (!keyFiltered) {
|
||||
if (numOfRows >= maxRowsToRead) break;
|
||||
if (pCols) {
|
||||
if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) {
|
||||
pSchema = tsdbGetTableSchemaImpl(pTable, false, false, dataRowVersion(row));
|
||||
|
@ -330,7 +350,9 @@ static void tsdbFreeBytes(STsdbRepo *pRepo, void *ptr, int bytes) {
|
|||
listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain);
|
||||
}
|
||||
|
||||
static SMemTable* tsdbNewMemTable(STsdbCfg* pCfg) {
|
||||
static SMemTable* tsdbNewMemTable(STsdbRepo *pRepo) {
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
|
||||
SMemTable *pMemTable = (SMemTable *)calloc(1, sizeof(*pMemTable));
|
||||
if (pMemTable == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
|
@ -341,7 +363,8 @@ static SMemTable* tsdbNewMemTable(STsdbCfg* pCfg) {
|
|||
pMemTable->keyLast = 0;
|
||||
pMemTable->numOfRows = 0;
|
||||
|
||||
pMemTable->tData = (STableData**)calloc(pCfg->maxTables, sizeof(STableData*));
|
||||
pMemTable->maxTables = pMeta->maxTables;
|
||||
pMemTable->tData = (STableData **)calloc(pMemTable->maxTables, sizeof(STableData *));
|
||||
if (pMemTable->tData == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
|
@ -399,9 +422,6 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable) {
|
|||
goto _err;
|
||||
}
|
||||
|
||||
// TODO: operation here should not be here, remove it
|
||||
pTableData->pData->level = 1;
|
||||
|
||||
return pTableData;
|
||||
|
||||
_err:
|
||||
|
@ -474,7 +494,7 @@ static void *tsdbCommitData(void *arg) {
|
|||
|
||||
_exit:
|
||||
tdFreeDataCols(pDataCols);
|
||||
tsdbDestroyCommitIters(iters, pCfg->maxTables);
|
||||
tsdbDestroyCommitIters(iters, pMem->maxTables);
|
||||
tsdbDestroyHelper(&whelper);
|
||||
tsdbEndCommit(pRepo);
|
||||
tsdbInfo("vgId:%d commit over", pRepo->config.tsdbId);
|
||||
|
@ -553,12 +573,13 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
|
|||
STsdbCfg * pCfg = &pRepo->config;
|
||||
STsdbFileH *pFileH = pRepo->tsdbFileH;
|
||||
SFileGroup *pGroup = NULL;
|
||||
SMemTable * pMem = pRepo->imem;
|
||||
|
||||
TSKEY minKey = 0, maxKey = 0;
|
||||
tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey);
|
||||
|
||||
// Check if there are data to commit to this file
|
||||
int hasDataToCommit = tsdbHasDataToCommit(iters, pCfg->maxTables, minKey, maxKey);
|
||||
int hasDataToCommit = tsdbHasDataToCommit(iters, pMem->maxTables, minKey, maxKey);
|
||||
if (!hasDataToCommit) {
|
||||
tsdbDebug("vgId:%d no data to commit to file %d", REPO_ID(pRepo), fid);
|
||||
return 0;
|
||||
|
@ -571,7 +592,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
|
|||
return -1;
|
||||
}
|
||||
|
||||
if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid, pCfg->maxTables)) == NULL) {
|
||||
if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid)) == NULL) {
|
||||
tsdbError("vgId:%d failed to create file group %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
@ -583,7 +604,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
|
|||
}
|
||||
|
||||
// Loop to commit data in each table
|
||||
for (int tid = 1; tid < pCfg->maxTables; tid++) {
|
||||
for (int tid = 1; tid < pMem->maxTables; tid++) {
|
||||
SCommitIter *pIter = iters + tid;
|
||||
if (pIter->pTable == NULL) continue;
|
||||
|
||||
|
@ -627,9 +648,12 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
|
|||
tsdbCloseHelperFile(pHelper, 0);
|
||||
|
||||
pthread_rwlock_wrlock(&(pFileH->fhlock));
|
||||
pGroup->files[TSDB_FILE_TYPE_HEAD] = pHelper->files.headF;
|
||||
pGroup->files[TSDB_FILE_TYPE_DATA] = pHelper->files.dataF;
|
||||
pGroup->files[TSDB_FILE_TYPE_LAST] = pHelper->files.lastF;
|
||||
#ifdef TSDB_IDX
|
||||
pGroup->files[TSDB_FILE_TYPE_IDX] = *(helperIdxF(pHelper));
|
||||
#endif
|
||||
pGroup->files[TSDB_FILE_TYPE_HEAD] = *(helperHeadF(pHelper));
|
||||
pGroup->files[TSDB_FILE_TYPE_DATA] = *(helperDataF(pHelper));
|
||||
pGroup->files[TSDB_FILE_TYPE_LAST] = *(helperLastF(pHelper));
|
||||
pthread_rwlock_unlock(&(pFileH->fhlock));
|
||||
|
||||
return 0;
|
||||
|
@ -641,11 +665,10 @@ _err:
|
|||
}
|
||||
|
||||
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
|
||||
STsdbCfg * pCfg = &(pRepo->config);
|
||||
SMemTable *pMem = pRepo->imem;
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
|
||||
SCommitIter *iters = (SCommitIter *)calloc(pCfg->maxTables, sizeof(SCommitIter));
|
||||
SCommitIter *iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter));
|
||||
if (iters == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
|
@ -654,7 +677,7 @@ static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
|
|||
if (tsdbRLockRepoMeta(pRepo) < 0) goto _err;
|
||||
|
||||
// reference all tables
|
||||
for (int i = 0; i < pCfg->maxTables; i++) {
|
||||
for (int i = 0; i < pMem->maxTables; i++) {
|
||||
if (pMeta->tables[i] != NULL) {
|
||||
tsdbRefTable(pMeta->tables[i]);
|
||||
iters[i].pTable = pMeta->tables[i];
|
||||
|
@ -663,7 +686,7 @@ static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
|
|||
|
||||
if (tsdbUnlockRepoMeta(pRepo) < 0) goto _err;
|
||||
|
||||
for (int i = 0; i < pCfg->maxTables; i++) {
|
||||
for (int i = 0; i < pMem->maxTables; i++) {
|
||||
if ((iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && (TABLE_UID(iters[i].pTable) == pMem->tData[i]->uid)) {
|
||||
if ((iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
|
@ -677,7 +700,7 @@ static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
|
|||
return iters;
|
||||
|
||||
_err:
|
||||
tsdbDestroyCommitIters(iters, pCfg->maxTables);
|
||||
tsdbDestroyCommitIters(iters, pMem->maxTables);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -693,3 +716,25 @@ static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables) {
|
|||
|
||||
free(iters);
|
||||
}
|
||||
|
||||
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) {
|
||||
ASSERT(pMemTable->maxTables < maxTables);
|
||||
|
||||
STableData **pTableData = (STableData **)calloc(maxTables, sizeof(STableData *));
|
||||
if (pTableData == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
memcpy((void *)pTableData, (void *)pMemTable->tData, sizeof(STableData *) * pMemTable->maxTables);
|
||||
|
||||
STableData **tData = pMemTable->tData;
|
||||
|
||||
taosWLockLatch(&(pMemTable->latch));
|
||||
pMemTable->maxTables = maxTables;
|
||||
pMemTable->tData = pTableData;
|
||||
taosWUnLockLatch(&(pMemTable->latch));
|
||||
|
||||
tfree(tData);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -49,6 +49,7 @@ static int tsdbGetTableEncodeSize(int8_t act, STable *pTable);
|
|||
static void * tsdbInsertTableAct(STsdbRepo *pRepo, int8_t act, void *buf, STable *pTable);
|
||||
static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable);
|
||||
static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable);
|
||||
static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid);
|
||||
|
||||
// ------------------ OUTER FUNCTIONS ------------------
|
||||
int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
|
||||
|
@ -60,13 +61,13 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
|
|||
int tid = pCfg->tableId.tid;
|
||||
STable * pTable = NULL;
|
||||
|
||||
if (tid < 0 || tid >= pRepo->config.maxTables) {
|
||||
if (tid < 1 || tid > TSDB_MAX_TABLES) {
|
||||
tsdbError("vgId:%d failed to create table since invalid tid %d", REPO_ID(pRepo), tid);
|
||||
terrno = TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (pMeta->tables[tid] != NULL) {
|
||||
if (tid < pMeta->maxTables && pMeta->tables[tid] != NULL) {
|
||||
if (TABLE_UID(pMeta->tables[tid]) == pCfg->tableId.uid) {
|
||||
tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
TABLE_TID(pTable), TABLE_UID(pTable));
|
||||
|
@ -123,7 +124,10 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
|
|||
int tlen2 = tsdbGetTableEncodeSize(TSDB_UPDATE_META, table);
|
||||
int tlen = tlen1 + tlen2;
|
||||
void *buf = tsdbAllocBytes(pRepo, tlen);
|
||||
ASSERT(buf != NULL);
|
||||
if (buf == NULL) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (newSuper) {
|
||||
void *pBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, super);
|
||||
ASSERT(POINTER_DISTANCE(pBuf, buf) == tlen1);
|
||||
|
@ -419,7 +423,8 @@ STsdbMeta *tsdbNewMeta(STsdbCfg *pCfg) {
|
|||
goto _err;
|
||||
}
|
||||
|
||||
pMeta->tables = (STable **)calloc(pCfg->maxTables, sizeof(STable *));
|
||||
pMeta->maxTables = TSDB_INIT_NTABLES + 1;
|
||||
pMeta->tables = (STable **)calloc(pMeta->maxTables, sizeof(STable *));
|
||||
if (pMeta->tables == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
|
@ -431,7 +436,7 @@ STsdbMeta *tsdbNewMeta(STsdbCfg *pCfg) {
|
|||
goto _err;
|
||||
}
|
||||
|
||||
pMeta->uidMap = taosHashInit(pCfg->maxTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false);
|
||||
pMeta->uidMap = taosHashInit(TSDB_INIT_NTABLES * 1.1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false);
|
||||
if (pMeta->uidMap == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
|
@ -481,14 +486,13 @@ _err:
|
|||
}
|
||||
|
||||
int tsdbCloseMeta(STsdbRepo *pRepo) {
|
||||
STsdbCfg * pCfg = &pRepo->config;
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
SListNode *pNode = NULL;
|
||||
STable * pTable = NULL;
|
||||
|
||||
if (pMeta == NULL) return 0;
|
||||
tdCloseKVStore(pMeta->pStore);
|
||||
for (int i = 1; i < pCfg->maxTables; i++) {
|
||||
for (int i = 1; i < pMeta->maxTables; i++) {
|
||||
tsdbFreeTable(pMeta->tables[i]);
|
||||
}
|
||||
|
||||
|
@ -621,9 +625,8 @@ static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) {
|
|||
static void tsdbOrgMeta(void *pHandle) {
|
||||
STsdbRepo *pRepo = (STsdbRepo *)pHandle;
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
STsdbCfg * pCfg = &pRepo->config;
|
||||
|
||||
for (int i = 1; i < pCfg->maxTables; i++) {
|
||||
for (int i = 1; i < pMeta->maxTables; i++) {
|
||||
STable *pTable = pMeta->tables[i];
|
||||
if (pTable != NULL && pTable->type == TSDB_CHILD_TABLE) {
|
||||
tsdbAddTableIntoIndex(pMeta, pTable, true);
|
||||
|
@ -778,6 +781,9 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
|
|||
goto _err;
|
||||
}
|
||||
} else {
|
||||
if (TABLE_TID(pTable) >= pMeta->maxTables) {
|
||||
if (tsdbAdjustMetaTables(pRepo, TABLE_TID(pTable)) < 0) goto _err;
|
||||
}
|
||||
if (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE && addIdx) { // add STABLE to the index
|
||||
if (tsdbAddTableIntoIndex(pMeta, pTable, true) < 0) {
|
||||
tsdbDebug("vgId:%d failed to add table %s to meta while add table to index since %s", REPO_ID(pRepo),
|
||||
|
@ -785,6 +791,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
|
|||
goto _err;
|
||||
}
|
||||
}
|
||||
ASSERT(TABLE_TID(pTable) < pMeta->maxTables);
|
||||
pMeta->tables[TABLE_TID(pTable)] = pTable;
|
||||
pMeta->nTables++;
|
||||
}
|
||||
|
@ -824,7 +831,6 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
|
|||
SListIter lIter = {0};
|
||||
SListNode *pNode = NULL;
|
||||
STable * tTable = NULL;
|
||||
STsdbCfg * pCfg = &(pRepo->config);
|
||||
|
||||
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
|
||||
int maxCols = schemaNCols(pSchema);
|
||||
|
@ -857,7 +863,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
|
|||
if (maxCols == pMeta->maxCols || maxRowBytes == pMeta->maxRowBytes) {
|
||||
maxCols = 0;
|
||||
maxRowBytes = 0;
|
||||
for (int i = 0; i < pCfg->maxTables; i++) {
|
||||
for (int i = 0; i < pMeta->maxTables; i++) {
|
||||
STable *pTable = pMeta->tables[i];
|
||||
if (pTable != NULL) {
|
||||
pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
|
||||
|
@ -1212,7 +1218,9 @@ static void *tsdbInsertTableAct(STsdbRepo *pRepo, int8_t act, void *buf, STable
|
|||
static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable) {
|
||||
int tlen = tsdbGetTableEncodeSize(TSDB_DROP_META, pTable);
|
||||
void *buf = tsdbAllocBytes(pRepo, tlen);
|
||||
ASSERT(buf != NULL);
|
||||
if (buf == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
void *pBuf = buf;
|
||||
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
|
||||
|
@ -1265,3 +1273,26 @@ static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable) {
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid) {
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
ASSERT(tid >= pMeta->maxTables);
|
||||
|
||||
int maxTables = tsdbGetNextMaxTables(tid);
|
||||
|
||||
STable **tables = (STable **)calloc(maxTables, sizeof(STable *));
|
||||
if (tables == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy((void *)tables, (void *)pMeta->tables, sizeof(STable *) * pMeta->maxTables);
|
||||
pMeta->maxTables = maxTables;
|
||||
|
||||
STable **tTables = pMeta->tables;
|
||||
pMeta->tables = tables;
|
||||
tfree(tTables);
|
||||
tsdbDebug("vgId:%d tsdb meta maxTables is adjusted as %d", REPO_ID(pRepo), maxTables);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -99,6 +99,7 @@ void tsdbResetHelper(SRWHelper *pHelper) {
|
|||
|
||||
int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) {
|
||||
ASSERT(pHelper != NULL && pGroup != NULL);
|
||||
SFile *pFile = NULL;
|
||||
|
||||
// Clear the helper object
|
||||
tsdbResetHelper(pHelper);
|
||||
|
@ -106,44 +107,52 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) {
|
|||
ASSERT(pHelper->state == TSDB_HELPER_CLEAR_STATE);
|
||||
|
||||
// Set the files
|
||||
pHelper->files.fid = pGroup->fileId;
|
||||
pHelper->files.headF = pGroup->files[TSDB_FILE_TYPE_HEAD];
|
||||
pHelper->files.dataF = pGroup->files[TSDB_FILE_TYPE_DATA];
|
||||
pHelper->files.lastF = pGroup->files[TSDB_FILE_TYPE_LAST];
|
||||
pHelper->files.fGroup = *pGroup;
|
||||
if (helperType(pHelper) == TSDB_WRITE_HELPER) {
|
||||
tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NHEAD, pHelper->files.nHeadF.fname);
|
||||
tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NLAST, pHelper->files.nLastF.fname);
|
||||
#ifdef TSDB_IDX
|
||||
tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NIDX, helperNewIdxF(pHelper)->fname);
|
||||
#endif
|
||||
tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NHEAD, helperNewHeadF(pHelper)->fname);
|
||||
tsdbGetDataFileName(pHelper->pRepo, pGroup->fileId, TSDB_FILE_TYPE_NLAST, helperNewLastF(pHelper)->fname);
|
||||
}
|
||||
|
||||
// Open the files
|
||||
if (tsdbOpenFile(&(pHelper->files.headF), O_RDONLY) < 0) goto _err;
|
||||
#ifdef TSDB_IDX
|
||||
if (tsdbOpenFile(helperIdxF(pHelper), O_RDONLY) < 0) goto _err;
|
||||
#endif
|
||||
if (tsdbOpenFile(helperHeadF(pHelper), O_RDONLY) < 0) goto _err;
|
||||
if (helperType(pHelper) == TSDB_WRITE_HELPER) {
|
||||
if (tsdbOpenFile(&(pHelper->files.dataF), O_RDWR) < 0) goto _err;
|
||||
if (tsdbOpenFile(&(pHelper->files.lastF), O_RDWR) < 0) goto _err;
|
||||
if (tsdbOpenFile(helperDataF(pHelper), O_RDWR) < 0) goto _err;
|
||||
if (tsdbOpenFile(helperLastF(pHelper), O_RDWR) < 0) goto _err;
|
||||
|
||||
#ifdef TSDB_IDX
|
||||
// Create and open .i file
|
||||
pFile = helperNewIdxF(pHelper);
|
||||
if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) return -1;
|
||||
pFile->info.size = TSDB_FILE_HEAD_SIZE;
|
||||
pFile->info.magic = TSDB_FILE_INIT_MAGIC;
|
||||
if (tsdbUpdateFileHeader(pFile, 0) < 0) return -1;
|
||||
#endif
|
||||
|
||||
// Create and open .h
|
||||
if (tsdbOpenFile(&(pHelper->files.nHeadF), O_WRONLY | O_CREAT) < 0) return -1;
|
||||
// size_t tsize = TSDB_FILE_HEAD_SIZE + sizeof(SCompIdx) * pCfg->maxTables + sizeof(TSCKSUM);
|
||||
if (tsendfile(pHelper->files.nHeadF.fd, pHelper->files.headF.fd, NULL, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) {
|
||||
tsdbError("vgId:%d failed to sendfile %d bytes from file %s to %s since %s", REPO_ID(pHelper->pRepo),
|
||||
TSDB_FILE_HEAD_SIZE, pHelper->files.headF.fname, pHelper->files.nHeadF.fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _err;
|
||||
}
|
||||
pFile = helperNewHeadF(pHelper);
|
||||
if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) return -1;
|
||||
pFile->info.size = TSDB_FILE_HEAD_SIZE;
|
||||
pFile->info.magic = TSDB_FILE_INIT_MAGIC;
|
||||
if (tsdbUpdateFileHeader(pFile, 0) < 0) return -1;
|
||||
|
||||
// Create and open .l file if should
|
||||
if (tsdbShouldCreateNewLast(pHelper)) {
|
||||
if (tsdbOpenFile(&(pHelper->files.nLastF), O_WRONLY | O_CREAT) < 0) goto _err;
|
||||
if (tsendfile(pHelper->files.nLastF.fd, pHelper->files.lastF.fd, NULL, TSDB_FILE_HEAD_SIZE) < TSDB_FILE_HEAD_SIZE) {
|
||||
tsdbError("vgId:%d failed to sendfile %d bytes from file %s to %s since %s", REPO_ID(pHelper->pRepo),
|
||||
TSDB_FILE_HEAD_SIZE, pHelper->files.lastF.fname, pHelper->files.nLastF.fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _err;
|
||||
}
|
||||
pFile = helperNewLastF(pHelper);
|
||||
if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) goto _err;
|
||||
pFile->info.size = TSDB_FILE_HEAD_SIZE;
|
||||
pFile->info.magic = TSDB_FILE_INIT_MAGIC;
|
||||
pFile->info.len = 0;
|
||||
if (tsdbUpdateFileHeader(pFile, 0) < 0) return -1;
|
||||
}
|
||||
} else {
|
||||
if (tsdbOpenFile(&(pHelper->files.dataF), O_RDONLY) < 0) goto _err;
|
||||
if (tsdbOpenFile(&(pHelper->files.lastF), O_RDONLY) < 0) goto _err;
|
||||
if (tsdbOpenFile(helperDataF(pHelper), O_RDONLY) < 0) goto _err;
|
||||
if (tsdbOpenFile(helperLastF(pHelper), O_RDONLY) < 0) goto _err;
|
||||
}
|
||||
|
||||
helperSetState(pHelper, TSDB_HELPER_FILE_SET_AND_OPEN);
|
||||
|
@ -155,59 +164,98 @@ _err:
|
|||
}
|
||||
|
||||
int tsdbCloseHelperFile(SRWHelper *pHelper, bool hasError) {
|
||||
if (pHelper->files.headF.fd > 0) {
|
||||
close(pHelper->files.headF.fd);
|
||||
pHelper->files.headF.fd = -1;
|
||||
SFile *pFile = NULL;
|
||||
|
||||
#ifdef TSDB_IDX
|
||||
pFile = helperIdxF(pHelper);
|
||||
if (pFile->fd > 0) {
|
||||
close(pFile->fd);
|
||||
pFile->fd = -1;
|
||||
}
|
||||
if (pHelper->files.dataF.fd > 0) {
|
||||
#endif
|
||||
|
||||
pFile = helperHeadF(pHelper);
|
||||
if (pFile->fd > 0) {
|
||||
close(pFile->fd);
|
||||
pFile->fd = -1;
|
||||
}
|
||||
|
||||
pFile = helperDataF(pHelper);
|
||||
if (pFile->fd > 0) {
|
||||
if (helperType(pHelper) == TSDB_WRITE_HELPER) {
|
||||
tsdbUpdateFileHeader(&(pHelper->files.dataF), 0);
|
||||
fsync(pHelper->files.dataF.fd);
|
||||
tsdbUpdateFileHeader(pFile, 0);
|
||||
fsync(pFile->fd);
|
||||
}
|
||||
close(pHelper->files.dataF.fd);
|
||||
pHelper->files.dataF.fd = -1;
|
||||
close(pFile->fd);
|
||||
pFile->fd = -1;
|
||||
}
|
||||
if (pHelper->files.lastF.fd > 0) {
|
||||
|
||||
pFile = helperLastF(pHelper);
|
||||
if (pFile->fd > 0) {
|
||||
if (helperType(pHelper) == TSDB_WRITE_HELPER && !TSDB_NLAST_FILE_OPENED(pHelper)) {
|
||||
fsync(pFile->fd);
|
||||
}
|
||||
close(pFile->fd);
|
||||
pFile->fd = -1;
|
||||
}
|
||||
|
||||
if (helperType(pHelper) == TSDB_WRITE_HELPER) {
|
||||
fsync(pHelper->files.lastF.fd);
|
||||
}
|
||||
close(pHelper->files.lastF.fd);
|
||||
pHelper->files.lastF.fd = -1;
|
||||
}
|
||||
if (helperType(pHelper) == TSDB_WRITE_HELPER) {
|
||||
if (pHelper->files.nHeadF.fd > 0) {
|
||||
if (!hasError) tsdbUpdateFileHeader(&(pHelper->files.nHeadF), 0);
|
||||
fsync(pHelper->files.nHeadF.fd);
|
||||
close(pHelper->files.nHeadF.fd);
|
||||
pHelper->files.nHeadF.fd = -1;
|
||||
#ifdef TSDB_IDX
|
||||
pFile = helperNewIdxF(pHelper);
|
||||
if (pFile->fd > 0) {
|
||||
if (!hasError) tsdbUpdateFileHeader(pFile, 0);
|
||||
fsync(pFile->fd);
|
||||
close(pFile->fd);
|
||||
pFile->fd = -1;
|
||||
if (hasError) {
|
||||
(void)remove(pHelper->files.nHeadF.fname);
|
||||
(void)remove(pFile->fname);
|
||||
} else {
|
||||
if (rename(pHelper->files.nHeadF.fname, pHelper->files.headF.fname) < 0) {
|
||||
tsdbError("failed to rename file from %s to %s since %s", pHelper->files.nHeadF.fname,
|
||||
pHelper->files.headF.fname, strerror(errno));
|
||||
if (rename(pFile->fname, helperIdxF(pHelper)->fname) < 0) {
|
||||
tsdbError("failed to rename file from %s to %s since %s", pFile->fname, helperIdxF(pHelper)->fname,
|
||||
strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
pHelper->files.headF.info = pHelper->files.nHeadF.info;
|
||||
helperIdxF(pHelper)->info = pFile->info;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
pFile = helperNewHeadF(pHelper);
|
||||
if (pFile->fd > 0) {
|
||||
if (!hasError) tsdbUpdateFileHeader(pFile, 0);
|
||||
fsync(pFile->fd);
|
||||
close(pFile->fd);
|
||||
pFile->fd = -1;
|
||||
if (hasError) {
|
||||
(void)remove(pFile->fname);
|
||||
} else {
|
||||
if (rename(pFile->fname, helperHeadF(pHelper)->fname) < 0) {
|
||||
tsdbError("failed to rename file from %s to %s since %s", pFile->fname, helperHeadF(pHelper)->fname,
|
||||
strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
helperHeadF(pHelper)->info = pFile->info;
|
||||
}
|
||||
}
|
||||
|
||||
if (pHelper->files.nLastF.fd > 0) {
|
||||
if (!hasError) tsdbUpdateFileHeader(&(pHelper->files.nLastF), 0);
|
||||
fsync(pHelper->files.nLastF.fd);
|
||||
close(pHelper->files.nLastF.fd);
|
||||
pHelper->files.nLastF.fd = -1;
|
||||
pFile = helperNewLastF(pHelper);
|
||||
if (pFile->fd > 0) {
|
||||
if (!hasError) tsdbUpdateFileHeader(pFile, 0);
|
||||
fsync(pFile->fd);
|
||||
close(pFile->fd);
|
||||
pFile->fd = -1;
|
||||
if (hasError) {
|
||||
(void)remove(pHelper->files.nLastF.fname);
|
||||
(void)remove(pFile->fname);
|
||||
} else {
|
||||
if (rename(pHelper->files.nLastF.fname, pHelper->files.lastF.fname) < 0) {
|
||||
tsdbError("failed to rename file from %s to %s since %s", pHelper->files.nLastF.fname,
|
||||
pHelper->files.lastF.fname, strerror(errno));
|
||||
if (rename(pFile->fname, helperLastF(pHelper)->fname) < 0) {
|
||||
tsdbError("failed to rename file from %s to %s since %s", pFile->fname, helperLastF(pHelper)->fname,
|
||||
strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
pHelper->files.lastF.info = pHelper->files.nLastF.info;
|
||||
helperLastF(pHelper)->info = helperNewLastF(pHelper)->info;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -224,18 +272,39 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) {
|
|||
pHelper->tableInfo.tid = pTable->tableId.tid;
|
||||
pHelper->tableInfo.uid = pTable->tableId.uid;
|
||||
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
|
||||
pHelper->tableInfo.sversion = schemaVersion(pSchema);
|
||||
|
||||
tdInitDataCols(pHelper->pDataCols[0], pSchema);
|
||||
tdInitDataCols(pHelper->pDataCols[1], pSchema);
|
||||
|
||||
SCompIdx *pIdx = pHelper->pCompIdx + pTable->tableId.tid;
|
||||
if (pIdx->offset > 0) {
|
||||
if (pIdx->uid != TABLE_UID(pTable)) {
|
||||
memset((void *)pIdx, 0, sizeof(SCompIdx));
|
||||
} else {
|
||||
if (pIdx->hasLast) pHelper->hasOldLastBlock = true;
|
||||
if (pHelper->idxH.numOfIdx > 0) {
|
||||
while (true) {
|
||||
if (pHelper->idxH.curIdx >= pHelper->idxH.numOfIdx) {
|
||||
memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx));
|
||||
break;
|
||||
}
|
||||
|
||||
SCompIdx *pIdx = &(pHelper->idxH.pIdxArray[pHelper->idxH.curIdx]);
|
||||
if (pIdx->tid == TABLE_TID(pTable)) {
|
||||
if (pIdx->uid == TABLE_UID(pTable)) {
|
||||
pHelper->curCompIdx = *pIdx;
|
||||
} else {
|
||||
memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx));
|
||||
}
|
||||
pHelper->idxH.curIdx++;
|
||||
break;
|
||||
} else if (pIdx->tid > TABLE_TID(pTable)) {
|
||||
memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx));
|
||||
break;
|
||||
} else {
|
||||
pHelper->idxH.curIdx++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx));
|
||||
}
|
||||
|
||||
if (helperType(pHelper) == TSDB_WRITE_HELPER && pHelper->curCompIdx.hasLast) {
|
||||
pHelper->hasOldLastBlock = true;
|
||||
}
|
||||
|
||||
helperSetState(pHelper, TSDB_HELPER_TABLE_SET);
|
||||
|
@ -245,7 +314,7 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) {
|
|||
int tsdbCommitTableData(SRWHelper *pHelper, SCommitIter *pCommitIter, SDataCols *pDataCols, TSKEY maxKey) {
|
||||
ASSERT(helperType(pHelper) == TSDB_WRITE_HELPER);
|
||||
|
||||
SCompIdx * pIdx = &(pHelper->pCompIdx[TABLE_TID(pCommitIter->pTable)]);
|
||||
SCompIdx *pIdx = &(pHelper->curCompIdx);
|
||||
int blkIdx = 0;
|
||||
|
||||
ASSERT(pIdx->offset == 0 || pIdx->uid == TABLE_UID(pCommitIter->pTable));
|
||||
|
@ -271,44 +340,53 @@ int tsdbMoveLastBlockIfNeccessary(SRWHelper *pHelper) {
|
|||
STsdbCfg *pCfg = &pHelper->pRepo->config;
|
||||
|
||||
ASSERT(helperType(pHelper) == TSDB_WRITE_HELPER);
|
||||
SCompIdx * pIdx = pHelper->pCompIdx + pHelper->tableInfo.tid;
|
||||
SCompIdx * pIdx = &(pHelper->curCompIdx);
|
||||
SCompBlock compBlock = {0};
|
||||
if (TSDB_NLAST_FILE_OPENED(pHelper) && (pHelper->hasOldLastBlock)) {
|
||||
if (tsdbLoadCompInfo(pHelper, NULL) < 0) return -1;
|
||||
|
||||
SCompBlock *pCompBlock = blockAtIdx(pHelper, pIdx->numOfBlocks - 1);
|
||||
ASSERT(pCompBlock->last);
|
||||
if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1;
|
||||
ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows &&
|
||||
pHelper->pDataCols[0]->numOfRows < pCfg->minRowsPerFileBlock);
|
||||
if (tsdbWriteBlockToFile(pHelper, helperNewLastF(pHelper), pHelper->pDataCols[0], &compBlock, true, true) < 0)
|
||||
return -1;
|
||||
|
||||
if (tsdbUpdateSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1) < 0) return -1;
|
||||
|
||||
#if 0
|
||||
if (pCompBlock->numOfSubBlocks > 1) {
|
||||
if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1;
|
||||
ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows &&
|
||||
pHelper->pDataCols[0]->numOfRows < pCfg->minRowsPerFileBlock);
|
||||
if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.nLastF), pHelper->pDataCols[0], &compBlock, true, true) < 0)
|
||||
if (tsdbWriteBlockToFile(pHelper, helperNewLastF(pHelper), pHelper->pDataCols[0], &compBlock, true, true) < 0)
|
||||
return -1;
|
||||
|
||||
if (tsdbUpdateSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1) < 0) return -1;
|
||||
} else {
|
||||
if (lseek(pHelper->files.lastF.fd, pCompBlock->offset, SEEK_SET) < 0) {
|
||||
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pHelper->files.lastF.fname,
|
||||
if (lseek(helperLastF(pHelper)->fd, pCompBlock->offset, SEEK_SET) < 0) {
|
||||
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), helperLastF(pHelper)->fname,
|
||||
strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
pCompBlock->offset = lseek(pHelper->files.nLastF.fd, 0, SEEK_END);
|
||||
pCompBlock->offset = lseek(helperNewLastF(pHelper)->fd, 0, SEEK_END);
|
||||
if (pCompBlock->offset < 0) {
|
||||
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pHelper->files.nLastF.fname,
|
||||
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), helperNewLastF(pHelper)->fname,
|
||||
strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tsendfile(pHelper->files.nLastF.fd, pHelper->files.lastF.fd, NULL, pCompBlock->len) < pCompBlock->len) {
|
||||
if (tsendfile(helperNewLastF(pHelper)->fd, helperLastF(pHelper)->fd, NULL, pCompBlock->len) < pCompBlock->len) {
|
||||
tsdbError("vgId:%d failed to sendfile from file %s to file %s since %s", REPO_ID(pHelper->pRepo),
|
||||
pHelper->files.lastF.fname, pHelper->files.nLastF.fname, strerror(errno));
|
||||
helperLastF(pHelper)->fname, helperNewLastF(pHelper)->fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
pHelper->hasOldLastBlock = false;
|
||||
}
|
||||
|
@ -317,164 +395,174 @@ int tsdbMoveLastBlockIfNeccessary(SRWHelper *pHelper) {
|
|||
}
|
||||
|
||||
int tsdbWriteCompInfo(SRWHelper *pHelper) {
|
||||
SCompIdx *pIdx = &(pHelper->curCompIdx);
|
||||
off_t offset = 0;
|
||||
SCompIdx *pIdx = pHelper->pCompIdx + pHelper->tableInfo.tid;
|
||||
if (!helperHasState(pHelper, TSDB_HELPER_INFO_LOAD)) {
|
||||
if (pIdx->offset > 0) {
|
||||
offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END);
|
||||
if (offset < 0) {
|
||||
tsdbError("vgId:%d failed to lseed file %s since %s", REPO_ID(pHelper->pRepo), pHelper->files.nHeadF.fname,
|
||||
strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
SFile * pFile = helperNewHeadF(pHelper);
|
||||
|
||||
pIdx->offset = offset;
|
||||
ASSERT(pIdx->offset >= TSDB_FILE_HEAD_SIZE);
|
||||
|
||||
if (tsendfile(pHelper->files.nHeadF.fd, pHelper->files.headF.fd, NULL, pIdx->len) < pIdx->len) {
|
||||
tsdbError("vgId:%d failed to send %d bytes from file %s to %s since %s", REPO_ID(pHelper->pRepo), pIdx->len,
|
||||
pHelper->files.headF.fname, pHelper->files.nHeadF.fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (pIdx->len > 0) {
|
||||
if (!helperHasState(pHelper, TSDB_HELPER_INFO_LOAD)) {
|
||||
if (tsdbLoadCompInfo(pHelper, NULL) < 0) return -1;
|
||||
} else {
|
||||
pHelper->pCompInfo->delimiter = TSDB_FILE_DELIMITER;
|
||||
pHelper->pCompInfo->uid = pHelper->tableInfo.uid;
|
||||
pHelper->pCompInfo->checksum = 0;
|
||||
pHelper->pCompInfo->tid = pHelper->tableInfo.tid;
|
||||
ASSERT(pIdx->len > sizeof(SCompInfo) + sizeof(TSCKSUM) &&
|
||||
(pIdx->len - sizeof(SCompInfo) - sizeof(TSCKSUM)) % sizeof(SCompBlock) == 0);
|
||||
taosCalcChecksumAppend(0, (uint8_t *)pHelper->pCompInfo, pIdx->len);
|
||||
offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END);
|
||||
}
|
||||
|
||||
pFile->info.magic = taosCalcChecksum(
|
||||
pFile->info.magic, (uint8_t *)POINTER_SHIFT(pHelper->pCompInfo, pIdx->len - sizeof(TSCKSUM)), sizeof(TSCKSUM));
|
||||
offset = lseek(pFile->fd, 0, SEEK_END);
|
||||
if (offset < 0) {
|
||||
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pHelper->files.nHeadF.fname,
|
||||
strerror(errno));
|
||||
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
pIdx->offset = offset;
|
||||
pIdx->uid = pHelper->tableInfo.uid;
|
||||
pIdx->tid = pHelper->tableInfo.tid;
|
||||
ASSERT(pIdx->offset >= TSDB_FILE_HEAD_SIZE);
|
||||
|
||||
if (twrite(pHelper->files.nHeadF.fd, (void *)(pHelper->pCompInfo), pIdx->len) < pIdx->len) {
|
||||
if (twrite(pFile->fd, (void *)(pHelper->pCompInfo), pIdx->len) < pIdx->len) {
|
||||
tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(pHelper->pRepo), pIdx->len,
|
||||
pHelper->files.nHeadF.fname, strerror(errno));
|
||||
pFile->fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
|
||||
#ifdef TSDB_IDX
|
||||
pFile = helperNewIdxF(pHelper);
|
||||
#endif
|
||||
|
||||
if (tsizeof(pHelper->pWIdx) < pFile->info.len + sizeof(SCompIdx) + 12) {
|
||||
pHelper->pWIdx = trealloc(pHelper->pWIdx, tsizeof(pHelper->pWIdx) == 0 ? 1024 : tsizeof(pHelper->pWIdx) * 2);
|
||||
if (pHelper->pWIdx == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
void *pBuf = POINTER_SHIFT(pHelper->pWIdx, pFile->info.len);
|
||||
pFile->info.len += tsdbEncodeSCompIdx(&pBuf, &(pHelper->curCompIdx));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tsdbWriteCompIdx(SRWHelper *pHelper) {
|
||||
STsdbCfg *pCfg = &pHelper->pRepo->config;
|
||||
|
||||
ASSERT(helperType(pHelper) == TSDB_WRITE_HELPER);
|
||||
off_t offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END);
|
||||
if (offset < 0) {
|
||||
tsdbError("vgId:%d failed to lseek file %s to end since %s", REPO_ID(pHelper->pRepo), pHelper->files.nHeadF.fname,
|
||||
strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
off_t offset = 0;
|
||||
|
||||
SFile *pFile = &(pHelper->files.nHeadF);
|
||||
pFile->info.offset = offset;
|
||||
#ifdef TSDB_IDX
|
||||
SFile *pFile = helperNewIdxF(pHelper);
|
||||
#else
|
||||
SFile *pFile = helperNewHeadF(pHelper);
|
||||
#endif
|
||||
|
||||
void *buf = pHelper->pBuffer;
|
||||
for (uint32_t i = 0; i < pCfg->maxTables; i++) {
|
||||
SCompIdx *pCompIdx = pHelper->pCompIdx + i;
|
||||
if (pCompIdx->offset > 0) {
|
||||
int drift = POINTER_DISTANCE(buf, pHelper->pBuffer);
|
||||
if (tsizeof(pHelper->pBuffer) - drift < 128) {
|
||||
pHelper->pBuffer = trealloc(pHelper->pBuffer, tsizeof(pHelper->pBuffer) * 2);
|
||||
if (pHelper->pBuffer == NULL) {
|
||||
pFile->info.len += sizeof(TSCKSUM);
|
||||
if (tsizeof(pHelper->pWIdx) < pFile->info.len) {
|
||||
pHelper->pWIdx = trealloc(pHelper->pWIdx, pFile->info.len);
|
||||
if (pHelper->pWIdx == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
buf = POINTER_SHIFT(pHelper->pBuffer, drift);
|
||||
taosEncodeVariantU32(&buf, i);
|
||||
tsdbEncodeSCompIdx(&buf, pCompIdx);
|
||||
}
|
||||
}
|
||||
taosCalcChecksumAppend(0, (uint8_t *)pHelper->pWIdx, pFile->info.len);
|
||||
pFile->info.magic = taosCalcChecksum(
|
||||
pFile->info.magic, (uint8_t *)POINTER_SHIFT(pHelper->pWIdx, pFile->info.len - sizeof(TSCKSUM)), sizeof(TSCKSUM));
|
||||
|
||||
int tsize = (char *)buf - (char *)pHelper->pBuffer + sizeof(TSCKSUM);
|
||||
taosCalcChecksumAppend(0, (uint8_t *)pHelper->pBuffer, tsize);
|
||||
|
||||
if (twrite(pHelper->files.nHeadF.fd, (void *)pHelper->pBuffer, tsize) < tsize) {
|
||||
tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(pHelper->pRepo), tsize,
|
||||
pHelper->files.nHeadF.fname, strerror(errno));
|
||||
offset = lseek(pFile->fd, 0, SEEK_END);
|
||||
if (offset < 0) {
|
||||
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
pFile->info.len = tsize;
|
||||
|
||||
pFile->info.offset = offset;
|
||||
|
||||
if (twrite(pFile->fd, (void *)pHelper->pWIdx, pFile->info.len) < pFile->info.len) {
|
||||
tsdbError("vgId:%d failed to write %d bytes to file %s since %s", REPO_ID(pHelper->pRepo), pFile->info.len,
|
||||
pFile->fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) {
|
||||
STsdbCfg *pCfg = &(pHelper->pRepo->config);
|
||||
|
||||
ASSERT(pHelper->state == TSDB_HELPER_FILE_SET_AND_OPEN);
|
||||
#ifdef TSDB_IDX
|
||||
SFile *pFile = helperIdxF(pHelper);
|
||||
#else
|
||||
SFile *pFile = helperHeadF(pHelper);
|
||||
#endif
|
||||
int fd = pFile->fd;
|
||||
|
||||
if (!helperHasState(pHelper, TSDB_HELPER_IDX_LOAD)) {
|
||||
// If not load from file, just load it in object
|
||||
SFile *pFile = &(pHelper->files.headF);
|
||||
int fd = pFile->fd;
|
||||
|
||||
memset(pHelper->pCompIdx, 0, tsizeof(pHelper->pCompIdx));
|
||||
if (pFile->info.offset > 0) {
|
||||
ASSERT(pFile->info.offset > TSDB_FILE_HEAD_SIZE);
|
||||
|
||||
if (lseek(fd, pFile->info.offset, SEEK_SET) < 0) {
|
||||
tsdbError("vgId:%d failed to lseek file %s to %u since %s", REPO_ID(pHelper->pRepo), pFile->fname,
|
||||
pFile->info.offset, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
if (pFile->info.len > 0) {
|
||||
if ((pHelper->pBuffer = trealloc(pHelper->pBuffer, pFile->info.len)) == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (lseek(fd, pFile->info.offset, SEEK_SET) < 0) {
|
||||
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tread(fd, (void *)(pHelper->pBuffer), pFile->info.len) < pFile->info.len) {
|
||||
tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pHelper->pRepo), pFile->info.len,
|
||||
pFile->fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!taosCheckChecksumWhole((uint8_t *)(pHelper->pBuffer), pFile->info.len)) {
|
||||
tsdbError("vgId:%d file %s SCompIdx part is corrupted. offset %u len %u", REPO_ID(pHelper->pRepo), pFile->fname,
|
||||
pFile->info.offset, pFile->info.len);
|
||||
tsdbError("vgId:%d file %s SCompIdx part is corrupted. len %u", REPO_ID(pHelper->pRepo), pFile->fname,
|
||||
pFile->info.len);
|
||||
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Decode it
|
||||
pHelper->idxH.numOfIdx = 0;
|
||||
void *ptr = pHelper->pBuffer;
|
||||
while (POINTER_DISTANCE(ptr, pHelper->pBuffer) < (pFile->info.len - sizeof(TSCKSUM))) {
|
||||
uint32_t tid = 0;
|
||||
if ((ptr = taosDecodeVariantU32(ptr, &tid)) == NULL) return -1;
|
||||
ASSERT(tid > 0 && tid < pCfg->maxTables);
|
||||
size_t tlen = tsizeof(pHelper->idxH.pIdxArray);
|
||||
pHelper->idxH.numOfIdx++;
|
||||
|
||||
if ((ptr = tsdbDecodeSCompIdx(ptr, pHelper->pCompIdx + tid)) == NULL) return -1;
|
||||
|
||||
ASSERT(POINTER_DISTANCE(ptr, pHelper->pBuffer) <= pFile->info.len - sizeof(TSCKSUM));
|
||||
if (tlen < pHelper->idxH.numOfIdx * sizeof(SCompIdx)) {
|
||||
pHelper->idxH.pIdxArray = (SCompIdx *)trealloc(pHelper->idxH.pIdxArray, (tlen == 0) ? 1024 : tlen * 2);
|
||||
if (pHelper->idxH.pIdxArray == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (lseek(fd, TSDB_FILE_HEAD_SIZE, SEEK_SET) < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
ptr = tsdbDecodeSCompIdx(ptr, &(pHelper->idxH.pIdxArray[pHelper->idxH.numOfIdx - 1]));
|
||||
if (ptr == NULL) {
|
||||
tsdbError("vgId:%d file %s SCompIdx part is corrupted. len %u", REPO_ID(pHelper->pRepo), pFile->fname,
|
||||
pFile->info.len);
|
||||
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
|
||||
return -1;
|
||||
}
|
||||
|
||||
ASSERT(pHelper->idxH.numOfIdx == 1 || pHelper->idxH.pIdxArray[pHelper->idxH.numOfIdx - 1].tid >
|
||||
pHelper->idxH.pIdxArray[pHelper->idxH.numOfIdx - 2].tid);
|
||||
|
||||
ASSERT(POINTER_DISTANCE(ptr, pHelper->pBuffer) <= pFile->info.len - sizeof(TSCKSUM));
|
||||
}
|
||||
}
|
||||
}
|
||||
helperSetState(pHelper, TSDB_HELPER_IDX_LOAD);
|
||||
|
||||
// Copy the memory for outside usage
|
||||
if (target) memcpy(target, pHelper->pCompIdx, tsizeof(pHelper->pCompIdx));
|
||||
if (target && pHelper->idxH.numOfIdx > 0)
|
||||
memcpy(target, pHelper->idxH.pIdxArray, sizeof(SCompIdx) * pHelper->idxH.numOfIdx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -482,15 +570,15 @@ int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) {
|
|||
int tsdbLoadCompInfo(SRWHelper *pHelper, void *target) {
|
||||
ASSERT(helperHasState(pHelper, TSDB_HELPER_TABLE_SET));
|
||||
|
||||
SCompIdx *pIdx = pHelper->pCompIdx + pHelper->tableInfo.tid;
|
||||
SCompIdx *pIdx = &(pHelper->curCompIdx);
|
||||
|
||||
int fd = pHelper->files.headF.fd;
|
||||
int fd = helperHeadF(pHelper)->fd;
|
||||
|
||||
if (!helperHasState(pHelper, TSDB_HELPER_INFO_LOAD)) {
|
||||
if (pIdx->offset > 0) {
|
||||
ASSERT(pIdx->uid == pHelper->tableInfo.uid);
|
||||
if (lseek(fd, pIdx->offset, SEEK_SET) < 0) {
|
||||
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pHelper->files.headF.fname,
|
||||
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), helperHeadF(pHelper)->fname,
|
||||
strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
|
@ -499,18 +587,18 @@ int tsdbLoadCompInfo(SRWHelper *pHelper, void *target) {
|
|||
pHelper->pCompInfo = trealloc((void *)pHelper->pCompInfo, pIdx->len);
|
||||
if (tread(fd, (void *)(pHelper->pCompInfo), pIdx->len) < pIdx->len) {
|
||||
tsdbError("vgId:%d failed to read %d bytes from file %s since %s", REPO_ID(pHelper->pRepo), pIdx->len,
|
||||
pHelper->files.headF.fname, strerror(errno));
|
||||
helperHeadF(pHelper)->fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
if (!taosCheckChecksumWhole((uint8_t *)pHelper->pCompInfo, pIdx->len)) {
|
||||
tsdbError("vgId:%d file %s SCompInfo part is corrupted, tid %d uid %" PRIu64, REPO_ID(pHelper->pRepo),
|
||||
pHelper->files.headF.fname, pHelper->tableInfo.tid, pHelper->tableInfo.uid);
|
||||
helperHeadF(pHelper)->fname, pHelper->tableInfo.tid, pHelper->tableInfo.uid);
|
||||
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
|
||||
return -1;
|
||||
}
|
||||
|
||||
ASSERT(pIdx->uid == pHelper->pCompInfo->uid);
|
||||
ASSERT(pIdx->uid == pHelper->pCompInfo->uid && pIdx->tid == pHelper->pCompInfo->tid);
|
||||
}
|
||||
|
||||
helperSetState(pHelper, TSDB_HELPER_INFO_LOAD);
|
||||
|
@ -523,7 +611,7 @@ int tsdbLoadCompInfo(SRWHelper *pHelper, void *target) {
|
|||
|
||||
int tsdbLoadCompData(SRWHelper *pHelper, SCompBlock *pCompBlock, void *target) {
|
||||
ASSERT(pCompBlock->numOfSubBlocks <= 1);
|
||||
SFile *pFile = (pCompBlock->last) ? &(pHelper->files.lastF) : &(pHelper->files.dataF);
|
||||
SFile *pFile = (pCompBlock->last) ? helperLastF(pHelper) : helperDataF(pHelper);
|
||||
|
||||
if (lseek(pFile->fd, pCompBlock->offset, SEEK_SET) < 0) {
|
||||
tsdbError("vgId:%d failed to lseek file %s since %s", REPO_ID(pHelper->pRepo), pFile->fname, strerror(errno));
|
||||
|
@ -642,9 +730,9 @@ _err:
|
|||
|
||||
// ---------------------- INTERNAL FUNCTIONS ----------------------
|
||||
static bool tsdbShouldCreateNewLast(SRWHelper *pHelper) {
|
||||
ASSERT(pHelper->files.lastF.fd > 0);
|
||||
ASSERT(helperLastF(pHelper)->fd > 0);
|
||||
struct stat st;
|
||||
if (fstat(pHelper->files.lastF.fd, &st) < 0) return true;
|
||||
if (fstat(helperLastF(pHelper)->fd, &st) < 0) return true;
|
||||
if (st.st_size > 32 * 1024 + TSDB_FILE_HEAD_SIZE) return true;
|
||||
return false;
|
||||
}
|
||||
|
@ -729,6 +817,8 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
|
|||
ASSERT(flen > 0);
|
||||
flen += sizeof(TSCKSUM);
|
||||
taosCalcChecksumAppend(0, (uint8_t *)tptr, flen);
|
||||
pFile->info.magic =
|
||||
taosCalcChecksum(pFile->info.magic, (uint8_t *)POINTER_SHIFT(tptr, flen - sizeof(TSCKSUM)), sizeof(TSCKSUM));
|
||||
|
||||
if (ncol != 0) {
|
||||
pCompCol->offset = toffset;
|
||||
|
@ -747,6 +837,8 @@ static int tsdbWriteBlockToFile(SRWHelper *pHelper, SFile *pFile, SDataCols *pDa
|
|||
pCompData->numOfCols = nColsNotAllNull;
|
||||
|
||||
taosCalcChecksumAppend(0, (uint8_t *)pCompData, tsize);
|
||||
pFile->info.magic = taosCalcChecksum(pFile->info.magic, (uint8_t *)POINTER_SHIFT(pCompData, tsize - sizeof(TSCKSUM)),
|
||||
sizeof(TSCKSUM));
|
||||
|
||||
// Write the whole block to file
|
||||
if (twrite(pFile->fd, (void *)pCompData, lsize) < lsize) {
|
||||
|
@ -804,7 +896,7 @@ static int tsdbAdjustInfoSizeIfNeeded(SRWHelper *pHelper, size_t esize) {
|
|||
}
|
||||
|
||||
static int tsdbInsertSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx) {
|
||||
SCompIdx *pIdx = pHelper->pCompIdx + pHelper->tableInfo.tid;
|
||||
SCompIdx *pIdx = &(pHelper->curCompIdx);
|
||||
|
||||
ASSERT(blkIdx >= 0 && blkIdx <= pIdx->numOfBlocks);
|
||||
ASSERT(pCompBlock->numOfSubBlocks == 1);
|
||||
|
@ -851,7 +943,7 @@ _err:
|
|||
static int tsdbAddSubBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx, int rowsAdded) {
|
||||
ASSERT(pCompBlock->numOfSubBlocks == 0);
|
||||
|
||||
SCompIdx *pIdx = pHelper->pCompIdx + pHelper->tableInfo.tid;
|
||||
SCompIdx *pIdx = &(pHelper->curCompIdx);
|
||||
ASSERT(blkIdx >= 0 && blkIdx < pIdx->numOfBlocks);
|
||||
|
||||
SCompBlock *pSCompBlock = pHelper->pCompInfo->blocks + blkIdx;
|
||||
|
@ -935,7 +1027,7 @@ _err:
|
|||
static int tsdbUpdateSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int blkIdx) {
|
||||
ASSERT(pCompBlock->numOfSubBlocks == 1);
|
||||
|
||||
SCompIdx *pIdx = pHelper->pCompIdx + pHelper->tableInfo.tid;
|
||||
SCompIdx *pIdx = &(pHelper->curCompIdx);
|
||||
|
||||
ASSERT(blkIdx >= 0 && blkIdx < pIdx->numOfBlocks);
|
||||
|
||||
|
@ -971,24 +1063,21 @@ static int tsdbUpdateSuperBlock(SRWHelper *pHelper, SCompBlock *pCompBlock, int
|
|||
}
|
||||
|
||||
static void tsdbResetHelperFileImpl(SRWHelper *pHelper) {
|
||||
pHelper->idxH.numOfIdx = 0;
|
||||
pHelper->idxH.curIdx = 0;
|
||||
memset((void *)&pHelper->files, 0, sizeof(pHelper->files));
|
||||
pHelper->files.fid = -1;
|
||||
pHelper->files.headF.fd = -1;
|
||||
pHelper->files.dataF.fd = -1;
|
||||
pHelper->files.lastF.fd = -1;
|
||||
pHelper->files.nHeadF.fd = -1;
|
||||
pHelper->files.nLastF.fd = -1;
|
||||
helperHeadF(pHelper)->fd = -1;
|
||||
helperDataF(pHelper)->fd = -1;
|
||||
helperLastF(pHelper)->fd = -1;
|
||||
helperNewHeadF(pHelper)->fd = -1;
|
||||
helperNewLastF(pHelper)->fd = -1;
|
||||
#ifdef TSDB_IDX
|
||||
helperIdxF(pHelper)->fd = -1;
|
||||
helperNewIdxF(pHelper)->fd = -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int tsdbInitHelperFile(SRWHelper *pHelper) {
|
||||
STsdbCfg *pCfg = &pHelper->pRepo->config;
|
||||
size_t tsize = sizeof(SCompIdx) * pCfg->maxTables + sizeof(TSCKSUM);
|
||||
pHelper->pCompIdx = (SCompIdx *)tmalloc(tsize);
|
||||
if (pHelper->pCompIdx == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
tsdbResetHelperFileImpl(pHelper);
|
||||
return 0;
|
||||
}
|
||||
|
@ -996,7 +1085,8 @@ static int tsdbInitHelperFile(SRWHelper *pHelper) {
|
|||
static void tsdbDestroyHelperFile(SRWHelper *pHelper) {
|
||||
tsdbCloseHelperFile(pHelper, false);
|
||||
tsdbResetHelperFileImpl(pHelper);
|
||||
tzfree(pHelper->pCompIdx);
|
||||
tzfree(pHelper->idxH.pIdxArray);
|
||||
tzfree(pHelper->pWIdx);
|
||||
}
|
||||
|
||||
// ---------- Operations on Helper Table part
|
||||
|
@ -1142,8 +1232,8 @@ static int tsdbLoadColData(SRWHelper *pHelper, SFile *pFile, SCompBlock *pCompBl
|
|||
if (tsdbCheckAndDecodeColumnData(pDataCol, pHelper->pBuffer, pCompCol->len, pCompBlock->algorithm,
|
||||
pCompBlock->numOfRows, pHelper->pRepo->config.maxRowsPerFileBlock,
|
||||
pHelper->compBuffer, tsizeof(pHelper->compBuffer)) < 0) {
|
||||
tsdbError("vgId:%d file %s is broken at column %d offset %" PRId64, REPO_ID(pHelper->pRepo), pFile->fname, pCompCol->colId,
|
||||
(int64_t)pCompCol->offset);
|
||||
tsdbError("vgId:%d file %s is broken at column %d offset %" PRId64, REPO_ID(pHelper->pRepo), pFile->fname,
|
||||
pCompCol->colId, offset);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1154,7 +1244,7 @@ static int tsdbLoadBlockDataColsImpl(SRWHelper *pHelper, SCompBlock *pCompBlock,
|
|||
ASSERT(pCompBlock->numOfSubBlocks <= 1);
|
||||
ASSERT(colIds[0] == 0);
|
||||
|
||||
SFile * pFile = (pCompBlock->last) ? &(pHelper->files.lastF) : &(pHelper->files.dataF);
|
||||
SFile * pFile = (pCompBlock->last) ? helperLastF(pHelper) : helperDataF(pHelper);
|
||||
SCompCol compCol = {0};
|
||||
|
||||
// If only load timestamp column, no need to load SCompData part
|
||||
|
@ -1170,13 +1260,21 @@ static int tsdbLoadBlockDataColsImpl(SRWHelper *pHelper, SCompBlock *pCompBlock,
|
|||
SCompCol *pCompCol = NULL;
|
||||
|
||||
while (true) {
|
||||
ASSERT(dcol < pDataCols->numOfCols);
|
||||
if (dcol >= pDataCols->numOfCols) {
|
||||
pDataCol = NULL;
|
||||
break;
|
||||
}
|
||||
pDataCol = &pDataCols->cols[dcol];
|
||||
ASSERT(pDataCol->colId <= colId);
|
||||
if (pDataCol->colId == colId) break;
|
||||
if (pDataCol->colId > colId) {
|
||||
pDataCol = NULL;
|
||||
break;
|
||||
} else {
|
||||
dcol++;
|
||||
if (pDataCol->colId == colId) break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pDataCol == NULL) continue;
|
||||
ASSERT(pDataCol->colId == colId);
|
||||
|
||||
if (colId == 0) { // load the key row
|
||||
|
@ -1186,15 +1284,24 @@ static int tsdbLoadBlockDataColsImpl(SRWHelper *pHelper, SCompBlock *pCompBlock,
|
|||
compCol.offset = TSDB_KEY_COL_OFFSET;
|
||||
pCompCol = &compCol;
|
||||
} else { // load non-key rows
|
||||
while (ccol < pCompBlock->numOfCols) {
|
||||
pCompCol = &pHelper->pCompData->cols[ccol];
|
||||
if (pCompCol->colId >= colId) break;
|
||||
ccol++;
|
||||
while (true) {
|
||||
if (ccol >= pCompBlock->numOfCols) {
|
||||
pCompCol = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ccol >= pCompBlock->numOfCols || pCompCol->colId > colId) {
|
||||
pCompCol = &(pHelper->pCompData->cols[ccol]);
|
||||
if (pCompCol->colId > colId) {
|
||||
pCompCol = NULL;
|
||||
break;
|
||||
} else {
|
||||
ccol++;
|
||||
if (pCompCol->colId == colId) break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pCompCol == NULL) {
|
||||
dataColSetNEleNull(pDataCol, pCompBlock->numOfRows, pDataCols->maxPoints);
|
||||
dcol++;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1202,8 +1309,6 @@ static int tsdbLoadBlockDataColsImpl(SRWHelper *pHelper, SCompBlock *pCompBlock,
|
|||
}
|
||||
|
||||
if (tsdbLoadColData(pHelper, pFile, pCompBlock, pCompCol, pDataCol) < 0) goto _err;
|
||||
dcol++;
|
||||
if (colId != 0) ccol++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1215,7 +1320,7 @@ _err:
|
|||
static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDataCols *pDataCols) {
|
||||
ASSERT(pCompBlock->numOfSubBlocks <= 1);
|
||||
|
||||
SFile *pFile = (pCompBlock->last) ? &(pHelper->files.lastF) : &(pHelper->files.dataF);
|
||||
SFile *pFile = (pCompBlock->last) ? helperLastF(pHelper) : helperDataF(pHelper);
|
||||
|
||||
pHelper->pBuffer = trealloc(pHelper->pBuffer, pCompBlock->len);
|
||||
if (pHelper->pBuffer == NULL) {
|
||||
|
@ -1314,6 +1419,7 @@ _err:
|
|||
static int tsdbEncodeSCompIdx(void **buf, SCompIdx *pIdx) {
|
||||
int tlen = 0;
|
||||
|
||||
tlen += taosEncodeVariantI32(buf, pIdx->tid);
|
||||
tlen += taosEncodeVariantU32(buf, pIdx->len);
|
||||
tlen += taosEncodeVariantU32(buf, pIdx->offset);
|
||||
tlen += taosEncodeFixedU8(buf, pIdx->hasLast);
|
||||
|
@ -1329,6 +1435,7 @@ static void *tsdbDecodeSCompIdx(void *buf, SCompIdx *pIdx) {
|
|||
uint32_t numOfBlocks = 0;
|
||||
uint64_t value = 0;
|
||||
|
||||
if ((buf = taosDecodeVariantI32(buf, &(pIdx->tid))) == NULL) return NULL;
|
||||
if ((buf = taosDecodeVariantU32(buf, &(pIdx->len))) == NULL) return NULL;
|
||||
if ((buf = taosDecodeVariantU32(buf, &(pIdx->offset))) == NULL) return NULL;
|
||||
if ((buf = taosDecodeFixedU8(buf, &(hasLast))) == NULL) return NULL;
|
||||
|
@ -1346,7 +1453,7 @@ static void *tsdbDecodeSCompIdx(void *buf, SCompIdx *pIdx) {
|
|||
static int tsdbProcessAppendCommit(SRWHelper *pHelper, SCommitIter *pCommitIter, SDataCols *pDataCols, TSKEY maxKey) {
|
||||
STsdbCfg * pCfg = &(pHelper->pRepo->config);
|
||||
STable * pTable = pCommitIter->pTable;
|
||||
SCompIdx * pIdx = pHelper->pCompIdx + TABLE_TID(pTable);
|
||||
SCompIdx * pIdx = &(pHelper->curCompIdx);
|
||||
TSKEY keyFirst = tsdbNextIterKey(pCommitIter->pIter);
|
||||
int defaultRowsInBlock = pCfg->maxRowsPerFileBlock * 4 / 5;
|
||||
SCompBlock compBlock = {0};
|
||||
|
@ -1362,7 +1469,7 @@ static int tsdbProcessAppendCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
|
|||
ASSERT(rowsRead > 0 && rowsRead == pDataCols->numOfRows);
|
||||
if (rowsRead + pCompBlock->numOfRows < pCfg->minRowsPerFileBlock &&
|
||||
pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && !TSDB_NLAST_FILE_OPENED(pHelper)) {
|
||||
if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.lastF), pDataCols, &compBlock, true, false) < 0) return -1;
|
||||
if (tsdbWriteBlockToFile(pHelper, helperLastF(pHelper), pDataCols, &compBlock, true, false) < 0) return -1;
|
||||
if (tsdbAddSubBlock(pHelper, &compBlock, pIdx->numOfBlocks - 1, rowsRead) < 0) return -1;
|
||||
} else {
|
||||
if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1;
|
||||
|
@ -1386,6 +1493,11 @@ static int tsdbProcessAppendCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
|
|||
if (tsdbInsertSuperBlock(pHelper, &compBlock, pIdx->numOfBlocks) < 0) return -1;
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
TSKEY keyNext = tsdbNextIterKey(pCommitIter->pIter);
|
||||
ASSERT(keyNext < 0 || keyNext > pIdx->maxKey);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1393,7 +1505,7 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
|
|||
int *blkIdx) {
|
||||
STsdbCfg * pCfg = &(pHelper->pRepo->config);
|
||||
STable * pTable = pCommitIter->pTable;
|
||||
SCompIdx * pIdx = pHelper->pCompIdx + TABLE_TID(pTable);
|
||||
SCompIdx * pIdx = &(pHelper->curCompIdx);
|
||||
SCompBlock compBlock = {0};
|
||||
TSKEY keyFirst = tsdbNextIterKey(pCommitIter->pIter);
|
||||
int defaultRowsInBlock = pCfg->maxRowsPerFileBlock * 4 / 5;
|
||||
|
@ -1421,13 +1533,13 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
|
|||
if (rows2 == 0) { // all data filtered out
|
||||
*(pCommitIter->pIter) = slIter;
|
||||
} else {
|
||||
if (rows1 + rows2 < pCfg->minRowsPerFileBlock && pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS &&
|
||||
!TSDB_NLAST_FILE_OPENED(pHelper)) {
|
||||
if (pCompBlock->numOfRows + rows2 < pCfg->minRowsPerFileBlock &&
|
||||
pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && !TSDB_NLAST_FILE_OPENED(pHelper)) {
|
||||
tdResetDataCols(pDataCols);
|
||||
int rowsRead = tsdbLoadDataFromCache(pTable, pCommitIter->pIter, maxKey, rows1, pDataCols,
|
||||
pDataCols0->cols[0].pData, pDataCols0->numOfRows);
|
||||
ASSERT(rowsRead == rows2 && rowsRead == pDataCols->numOfRows);
|
||||
if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.lastF), pDataCols, &compBlock, true, false) < 0) return -1;
|
||||
if (tsdbWriteBlockToFile(pHelper, helperLastF(pHelper), pDataCols, &compBlock, true, false) < 0) return -1;
|
||||
if (tsdbAddSubBlock(pHelper, &compBlock, tblkIdx, rowsRead) < 0) return -1;
|
||||
tblkIdx++;
|
||||
} else {
|
||||
|
@ -1466,15 +1578,16 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
|
|||
if (rowsRead == 0) break;
|
||||
|
||||
ASSERT(rowsRead == pDataCols->numOfRows);
|
||||
if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.dataF), pDataCols, &compBlock, false, true) < 0) return -1;
|
||||
if (tsdbWriteBlockToFile(pHelper, helperDataF(pHelper), pDataCols, &compBlock, false, true) < 0) return -1;
|
||||
if (tsdbInsertSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1;
|
||||
tblkIdx++;
|
||||
}
|
||||
ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) < 0 ||
|
||||
tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast));
|
||||
} else {
|
||||
ASSERT(keyFirst <= blkKeyLast);
|
||||
int16_t colId = 0;
|
||||
if (tsdbLoadBlockDataCols(pHelper, pCompBlock, NULL, &colId, 1) < 0) return -1;
|
||||
ASSERT(pDataCols0->numOfRows == pCompBlock->numOfRows);
|
||||
|
||||
slIter = *(pCommitIter->pIter);
|
||||
int rows1 = (pCfg->maxRowsPerFileBlock - pCompBlock->numOfRows);
|
||||
|
@ -1483,9 +1596,10 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
|
|||
|
||||
if (rows2 == 0) { // all filtered out
|
||||
*(pCommitIter->pIter) = slIter;
|
||||
ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) < 0 ||
|
||||
tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast));
|
||||
} else {
|
||||
int rows3 = tsdbLoadDataFromCache(pTable, &slIter, keyLimit, INT_MAX, NULL, NULL, 0) + rows2;
|
||||
ASSERT(rows3 >= rows2);
|
||||
|
||||
if (pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && rows1 >= rows2) {
|
||||
int rows = (rows1 >= rows3) ? rows3 : rows2;
|
||||
|
@ -1493,10 +1607,12 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
|
|||
int rowsRead = tsdbLoadDataFromCache(pTable, pCommitIter->pIter, keyLimit, rows, pDataCols,
|
||||
pDataCols0->cols[0].pData, pDataCols0->numOfRows);
|
||||
ASSERT(rowsRead == rows && rowsRead == pDataCols->numOfRows);
|
||||
if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.dataF), pDataCols, &compBlock, false, false) < 0)
|
||||
if (tsdbWriteBlockToFile(pHelper, helperDataF(pHelper), pDataCols, &compBlock, false, false) < 0)
|
||||
return -1;
|
||||
if (tsdbAddSubBlock(pHelper, &compBlock, tblkIdx, rowsRead) < 0) return -1;
|
||||
tblkIdx++;
|
||||
ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) < 0 ||
|
||||
tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast));
|
||||
} else {
|
||||
if (tsdbLoadBlockData(pHelper, pCompBlock, NULL) < 0) return -1;
|
||||
int round = 0;
|
||||
|
@ -1506,7 +1622,7 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
|
|||
tsdbLoadAndMergeFromCache(pDataCols0, &dIter, pCommitIter, pDataCols, keyLimit, defaultRowsInBlock);
|
||||
if (rowsRead == 0) break;
|
||||
|
||||
if (tsdbWriteBlockToFile(pHelper, &(pHelper->files.dataF), pDataCols, &compBlock, false, true) < 0)
|
||||
if (tsdbWriteBlockToFile(pHelper, helperDataF(pHelper), pDataCols, &compBlock, false, true) < 0)
|
||||
return -1;
|
||||
if (round == 0) {
|
||||
if (tsdbUpdateSuperBlock(pHelper, &compBlock, tblkIdx) < 0) return -1;
|
||||
|
@ -1517,6 +1633,8 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
|
|||
round++;
|
||||
tblkIdx++;
|
||||
}
|
||||
ASSERT(tblkIdx == 0 || (tsdbNextIterKey(pCommitIter->pIter) < 0 ||
|
||||
tsdbNextIterKey(pCommitIter->pIter) > blockAtIdx(pHelper, tblkIdx - 1)->keyLast));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1577,10 +1695,10 @@ static int tsdbWriteBlockToProperFile(SRWHelper *pHelper, SDataCols *pDataCols,
|
|||
ASSERT(pDataCols->numOfRows > 0);
|
||||
|
||||
if (pDataCols->numOfRows >= pCfg->minRowsPerFileBlock) {
|
||||
pFile = &(pHelper->files.dataF);
|
||||
pFile = helperDataF(pHelper);
|
||||
} else {
|
||||
isLast = true;
|
||||
pFile = TSDB_NLAST_FILE_OPENED(pHelper) ? &(pHelper->files.nLastF) : &(pHelper->files.lastF);
|
||||
pFile = TSDB_NLAST_FILE_OPENED(pHelper) ? helperNewLastF(pHelper) : helperLastF(pHelper);
|
||||
}
|
||||
|
||||
ASSERT(pFile->fd > 0);
|
||||
|
|
|
@ -132,6 +132,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
|
|||
static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
|
||||
static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, STimeWindow* win,
|
||||
STsdbQueryHandle* pQueryHandle);
|
||||
static int tsdbCheckInfoCompar(const void* key1, const void* key2);
|
||||
|
||||
static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) {
|
||||
pBlockLoadInfo->slot = -1;
|
||||
|
@ -174,6 +175,9 @@ static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS
|
|||
|
||||
TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) {
|
||||
STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
|
||||
if (pQueryHandle == NULL) {
|
||||
goto out_of_memory;
|
||||
}
|
||||
pQueryHandle->order = pCond->order;
|
||||
pQueryHandle->window = pCond->twindow;
|
||||
pQueryHandle->pTsdb = tsdb;
|
||||
|
@ -187,8 +191,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
|
|||
pQueryHandle->allocSize = 0;
|
||||
|
||||
if (tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb) != 0) {
|
||||
free(pQueryHandle);
|
||||
return NULL;
|
||||
goto out_of_memory;
|
||||
}
|
||||
|
||||
tsdbTakeMemSnapshot(pQueryHandle->pTsdb, &pQueryHandle->mem, &pQueryHandle->imem);
|
||||
|
@ -200,18 +203,30 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
|
|||
int32_t numOfCols = pCond->numOfCols;
|
||||
|
||||
pQueryHandle->statis = calloc(numOfCols, sizeof(SDataStatis));
|
||||
if (pQueryHandle->statis == NULL) {
|
||||
goto out_of_memory;
|
||||
}
|
||||
pQueryHandle->pColumns = taosArrayInit(numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array?
|
||||
if (pQueryHandle->pColumns == NULL) {
|
||||
goto out_of_memory;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData colInfo = {{0}, 0};
|
||||
|
||||
colInfo.info = pCond->colList[i];
|
||||
colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes);
|
||||
if (colInfo.pData == NULL) {
|
||||
goto out_of_memory;
|
||||
}
|
||||
taosArrayPush(pQueryHandle->pColumns, &colInfo);
|
||||
pQueryHandle->statis[i].colId = colInfo.info.colId;
|
||||
}
|
||||
|
||||
pQueryHandle->pTableCheckInfo = taosArrayInit(groupList->numOfTables, sizeof(STableCheckInfo));
|
||||
if (pQueryHandle->pTableCheckInfo == NULL) {
|
||||
goto out_of_memory;
|
||||
}
|
||||
STsdbMeta* pMeta = tsdbGetMeta(tsdb);
|
||||
assert(pMeta != NULL);
|
||||
|
||||
|
@ -237,6 +252,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
|
|||
}
|
||||
}
|
||||
|
||||
taosArraySort(pQueryHandle->pTableCheckInfo, tsdbCheckInfoCompar);
|
||||
pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true);
|
||||
|
||||
tsdbDebug("%p total numOfTable:%zu in query, %p", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qinfo);
|
||||
|
@ -245,6 +261,11 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
|
|||
tsdbInitCompBlockLoadInfo(&pQueryHandle->compBlockLoadInfo);
|
||||
|
||||
return (TsdbQueryHandleT) pQueryHandle;
|
||||
|
||||
out_of_memory:
|
||||
tsdbCleanupQueryHandle(pQueryHandle);
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
TsdbQueryHandleT tsdbQueryLastRow(TSDB_REPO_T *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo) {
|
||||
|
@ -300,14 +321,17 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
|
|||
|
||||
assert(pCheckInfo->iter == NULL && pCheckInfo->iiter == NULL);
|
||||
|
||||
if (pHandle->mem && pHandle->mem->tData[pCheckInfo->tableId.tid] != NULL) {
|
||||
// TODO: add uid check
|
||||
if (pHandle->mem && pCheckInfo->tableId.tid < pHandle->mem->maxTables &&
|
||||
pHandle->mem->tData[pCheckInfo->tableId.tid] != NULL) {
|
||||
pCheckInfo->iter = tSkipListCreateIterFromVal(pHandle->mem->tData[pCheckInfo->tableId.tid]->pData,
|
||||
(const char*) &pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
|
||||
(const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
|
||||
}
|
||||
|
||||
if (pHandle->imem && pHandle->imem->tData[pCheckInfo->tableId.tid] != NULL) {
|
||||
if (pHandle->imem && pCheckInfo->tableId.tid < pHandle->imem->maxTables &&
|
||||
pHandle->imem->tData[pCheckInfo->tableId.tid] != NULL) {
|
||||
pCheckInfo->iiter = tSkipListCreateIterFromVal(pHandle->imem->tData[pCheckInfo->tableId.tid]->pData,
|
||||
(const char*) &pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
|
||||
(const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
|
||||
}
|
||||
|
||||
// both iterators are NULL, no data in buffer right now
|
||||
|
@ -554,7 +578,9 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
|
|||
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
|
||||
pCheckInfo->numOfBlocks = 0;
|
||||
|
||||
SCompIdx* compIndex = &pQueryHandle->rhelper.pCompIdx[pCheckInfo->tableId.tid];
|
||||
tsdbSetHelperTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj, pQueryHandle->pTsdb);
|
||||
|
||||
SCompIdx* compIndex = &pQueryHandle->rhelper.curCompIdx;
|
||||
|
||||
// no data block in this file, try next file
|
||||
if (compIndex->len == 0 || compIndex->numOfBlocks == 0 || compIndex->uid != pCheckInfo->tableId.uid) {
|
||||
|
@ -571,8 +597,6 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
|
|||
pCheckInfo->compSize = compIndex->len;
|
||||
}
|
||||
|
||||
tsdbSetHelperTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj, pQueryHandle->pTsdb);
|
||||
|
||||
tsdbLoadCompInfo(&(pQueryHandle->rhelper), (void *)(pCheckInfo->pCompInfo));
|
||||
SCompInfo* pCompInfo = pCheckInfo->pCompInfo;
|
||||
|
||||
|
@ -1511,7 +1535,6 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists
|
|||
|
||||
static bool doHasDataInBuffer(STsdbQueryHandle* pQueryHandle) {
|
||||
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
assert(numOfTables <= ((STsdbRepo*)pQueryHandle->pTsdb)->config.maxTables);
|
||||
|
||||
while (pQueryHandle->activeIndex < numOfTables) {
|
||||
if (hasMoreDataInCache(pQueryHandle)) {
|
||||
|
@ -2370,6 +2393,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (pQueryHandle->pTableCheckInfo != NULL) {
|
||||
size_t size = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
STableCheckInfo* pTableCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
|
||||
|
@ -2382,23 +2406,24 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
|
|||
tfree(pTableCheckInfo->pDataCols);
|
||||
tfree(pTableCheckInfo->pCompInfo);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pQueryHandle->pTableCheckInfo);
|
||||
}
|
||||
|
||||
if (pQueryHandle->pColumns != NULL) {
|
||||
size_t cols = taosArrayGetSize(pQueryHandle->pColumns);
|
||||
for (int32_t i = 0; i < cols; ++i) {
|
||||
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
|
||||
tfree(pColInfo->pData);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pQueryHandle->pColumns);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pQueryHandle->defaultLoadColumn);
|
||||
tfree(pQueryHandle->pDataBlockInfo);
|
||||
tfree(pQueryHandle->statis);
|
||||
|
||||
// todo check error
|
||||
tsdbUnRefMemTable(pQueryHandle->pTsdb, pQueryHandle->mem);
|
||||
tsdbUnRefMemTable(pQueryHandle->pTsdb, pQueryHandle->imem);
|
||||
tsdbUnTakeMemSnapShot(pQueryHandle->pTsdb, pQueryHandle->mem, pQueryHandle->imem);
|
||||
|
||||
tsdbDestroyHelper(&pQueryHandle->rhelper);
|
||||
|
||||
|
@ -2431,3 +2456,13 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) {
|
|||
taosArrayDestroy(pGroupList->pGroupList);
|
||||
}
|
||||
|
||||
static int tsdbCheckInfoCompar(const void* key1, const void* key2) {
|
||||
if (((STableCheckInfo*)key1)->tableId.tid < ((STableCheckInfo*)key2)->tableId.tid) {
|
||||
return -1;
|
||||
} else if (((STableCheckInfo*)key1)->tableId.tid > ((STableCheckInfo*)key2)->tableId.tid) {
|
||||
return 1;
|
||||
} else {
|
||||
ASSERT(false);
|
||||
return 0;
|
||||
}
|
||||
}
|
|
@ -98,7 +98,7 @@ static void tsdbSetCfg(STsdbCfg *pCfg, int32_t tsdbId, int32_t cacheBlockSize, i
|
|||
pCfg->tsdbId = tsdbId;
|
||||
pCfg->cacheBlockSize = cacheBlockSize;
|
||||
pCfg->totalBlocks = totalBlocks;
|
||||
pCfg->maxTables = maxTables;
|
||||
// pCfg->maxTables = maxTables;
|
||||
pCfg->daysPerFile = daysPerFile;
|
||||
pCfg->keep = keep;
|
||||
pCfg->minRowsPerFileBlock = minRows;
|
||||
|
|
|
@ -29,6 +29,7 @@ typedef struct {
|
|||
int64_t tombSize;
|
||||
int64_t nRecords;
|
||||
int64_t nDels;
|
||||
uint32_t magic;
|
||||
} SStoreInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -45,6 +46,8 @@ typedef struct {
|
|||
SStoreInfo info;
|
||||
} SKVStore;
|
||||
|
||||
#define KVSTORE_MAGIC(s) (s)->info.magic
|
||||
|
||||
int tdCreateKVStore(char *fname);
|
||||
int tdDestroyKVStore(char *fname);
|
||||
SKVStore *tdOpenKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH);
|
||||
|
|
|
@ -308,11 +308,21 @@ bool taosReadGlobalCfg() {
|
|||
|
||||
sprintf(fileName, "%s/taos.cfg", configDir);
|
||||
FILE* fp = fopen(fileName, "r");
|
||||
if (fp == NULL) {
|
||||
struct stat s;
|
||||
if (stat(configDir, &s) != 0 || (!S_ISREG(s.st_mode) && !S_ISLNK(s.st_mode))) {
|
||||
//return true to follow behavior before file support
|
||||
return true;
|
||||
}
|
||||
fp = fopen(configDir, "r");
|
||||
if (fp == NULL) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
size_t len = 1024;
|
||||
line = calloc(1, len);
|
||||
|
||||
if (fp != NULL) {
|
||||
while (!feof(fp)) {
|
||||
memset(line, 0, len);
|
||||
|
||||
|
@ -338,7 +348,6 @@ bool taosReadGlobalCfg() {
|
|||
}
|
||||
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
tfree(line);
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#define TD_KVSTORE_MAINOR_VERSION 0
|
||||
#define TD_KVSTORE_SNAP_SUFFIX ".snap"
|
||||
#define TD_KVSTORE_NEW_SUFFIX ".new"
|
||||
#define TD_KVSTORE_INIT_MAGIC 0xFFFFFFFF
|
||||
|
||||
typedef struct {
|
||||
uint64_t uid;
|
||||
|
@ -140,6 +141,7 @@ SKVStore *tdOpenKVStore(char *fname, iterFunc iFunc, afterFunc aFunc, void *appH
|
|||
if (tdLoadKVStoreHeader(pStore->fd, pStore->fname, &info) < 0) goto _err;
|
||||
|
||||
pStore->info.size = TD_KVSTORE_HEADER_SIZE;
|
||||
pStore->info.magic = info.magic;
|
||||
|
||||
if (tdRestoreKVStore(pStore) < 0) goto _err;
|
||||
|
||||
|
@ -251,6 +253,8 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe
|
|||
return -1;
|
||||
}
|
||||
|
||||
pStore->info.magic =
|
||||
taosCalcChecksum(pStore->info.magic, (uint8_t *)POINTER_SHIFT(cont, contLen - sizeof(TSCKSUM)), sizeof(TSCKSUM));
|
||||
pStore->info.size += (sizeof(SKVRecord) + contLen);
|
||||
SKVRecord *pRecord = taosHashGet(pStore->map, (void *)&uid, sizeof(uid));
|
||||
if (pRecord != NULL) { // just to insert
|
||||
|
@ -288,6 +292,7 @@ int tdDropKVStoreRecord(SKVStore *pStore, uint64_t uid) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
pStore->info.magic = taosCalcChecksum(pStore->info.magic, (uint8_t *)buf, POINTER_DISTANCE(pBuf, buf));
|
||||
pStore->info.size += POINTER_DISTANCE(pBuf, buf);
|
||||
pStore->info.nDels++;
|
||||
pStore->info.nRecords--;
|
||||
|
@ -371,7 +376,7 @@ static int tdUpdateKVStoreHeader(int fd, char *fname, SStoreInfo *pInfo) {
|
|||
}
|
||||
|
||||
static int tdInitKVStoreHeader(int fd, char *fname) {
|
||||
SStoreInfo info = {TD_KVSTORE_HEADER_SIZE, 0, 0, 0};
|
||||
SStoreInfo info = {TD_KVSTORE_HEADER_SIZE, 0, 0, 0, TD_KVSTORE_INIT_MAGIC};
|
||||
|
||||
return tdUpdateKVStoreHeader(fd, fname, &info);
|
||||
}
|
||||
|
@ -382,6 +387,7 @@ static int tdEncodeStoreInfo(void **buf, SStoreInfo *pInfo) {
|
|||
tlen += taosEncodeVariantI64(buf, pInfo->tombSize);
|
||||
tlen += taosEncodeVariantI64(buf, pInfo->nRecords);
|
||||
tlen += taosEncodeVariantI64(buf, pInfo->nDels);
|
||||
tlen += taosEncodeFixedU32(buf, pInfo->magic);
|
||||
|
||||
return tlen;
|
||||
}
|
||||
|
@ -391,6 +397,7 @@ static void *tdDecodeStoreInfo(void *buf, SStoreInfo *pInfo) {
|
|||
buf = taosDecodeVariantI64(buf, &(pInfo->tombSize));
|
||||
buf = taosDecodeVariantI64(buf, &(pInfo->nRecords));
|
||||
buf = taosDecodeVariantI64(buf, &(pInfo->nDels));
|
||||
buf = taosDecodeFixedU32(buf, &(pInfo->magic));
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
|
|||
tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId;
|
||||
tsdbCfg.cacheBlockSize = pVnodeCfg->cfg.cacheBlockSize;
|
||||
tsdbCfg.totalBlocks = pVnodeCfg->cfg.totalBlocks;
|
||||
tsdbCfg.maxTables = pVnodeCfg->cfg.maxTables;
|
||||
// tsdbCfg.maxTables = pVnodeCfg->cfg.maxTables;
|
||||
tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile;
|
||||
tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep;
|
||||
tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock;
|
||||
|
@ -630,14 +630,14 @@ static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) {
|
|||
len += snprintf(content + len, maxLen - len, " \"cfgVersion\": %d,\n", pVnodeCfg->cfg.cfgVersion);
|
||||
len += snprintf(content + len, maxLen - len, " \"cacheBlockSize\": %d,\n", pVnodeCfg->cfg.cacheBlockSize);
|
||||
len += snprintf(content + len, maxLen - len, " \"totalBlocks\": %d,\n", pVnodeCfg->cfg.totalBlocks);
|
||||
len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnodeCfg->cfg.maxTables);
|
||||
// len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnodeCfg->cfg.maxTables);
|
||||
len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pVnodeCfg->cfg.daysPerFile);
|
||||
len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pVnodeCfg->cfg.daysToKeep);
|
||||
len += snprintf(content + len, maxLen - len, " \"daysToKeep1\": %d,\n", pVnodeCfg->cfg.daysToKeep1);
|
||||
len += snprintf(content + len, maxLen - len, " \"daysToKeep2\": %d,\n", pVnodeCfg->cfg.daysToKeep2);
|
||||
len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.minRowsPerFileBlock);
|
||||
len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.maxRowsPerFileBlock);
|
||||
len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnodeCfg->cfg.commitTime);
|
||||
// len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnodeCfg->cfg.commitTime);
|
||||
len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnodeCfg->cfg.precision);
|
||||
len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnodeCfg->cfg.compression);
|
||||
len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pVnodeCfg->cfg.walLevel);
|
||||
|
@ -729,12 +729,12 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
|
|||
}
|
||||
pVnode->tsdbCfg.totalBlocks = totalBlocks->valueint;
|
||||
|
||||
cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
|
||||
if (!maxTables || maxTables->type != cJSON_Number) {
|
||||
vError("vgId:%d, failed to read vnode cfg, maxTables not found", pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.maxTables = maxTables->valueint;
|
||||
// cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
|
||||
// if (!maxTables || maxTables->type != cJSON_Number) {
|
||||
// vError("vgId:%d, failed to read vnode cfg, maxTables not found", pVnode->vgId);
|
||||
// goto PARSE_OVER;
|
||||
// }
|
||||
// pVnode->tsdbCfg.maxTables = maxTables->valueint;
|
||||
|
||||
cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
|
||||
if (!daysPerFile || daysPerFile->type != cJSON_Number) {
|
||||
|
@ -778,12 +778,12 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
|
|||
}
|
||||
pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint;
|
||||
|
||||
cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
|
||||
if (!commitTime || commitTime->type != cJSON_Number) {
|
||||
vError("vgId:%d, failed to read vnode cfg, commitTime not found", pVnode->vgId);
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
|
||||
// cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
|
||||
// if (!commitTime || commitTime->type != cJSON_Number) {
|
||||
// vError("vgId:%d, failed to read vnode cfg, commitTime not found", pVnode->vgId);
|
||||
// goto PARSE_OVER;
|
||||
// }
|
||||
// pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
|
||||
|
||||
cJSON *precision = cJSON_GetObjectItem(root, "precision");
|
||||
if (!precision || precision->type != cJSON_Number) {
|
||||
|
|
|
@ -26,7 +26,6 @@ if __name__ == '__main__':
|
|||
|
||||
# Generate a cursor object to run SQL commands
|
||||
c1 = conn.cursor()
|
||||
|
||||
# Create a database named db
|
||||
try:
|
||||
c1.execute('create database if not exists db ')
|
||||
|
@ -50,9 +49,11 @@ if __name__ == '__main__':
|
|||
raise(err)
|
||||
|
||||
# insert data
|
||||
for i in range(10000):
|
||||
for i in range(10):
|
||||
try:
|
||||
c1.execute("insert into t values ('%s', %d, %f, '%s')" % (start_time, random.randint(1,10), random.randint(1,10)/10.0, 'hello'))
|
||||
value = c1.execute("insert into t values ('%s', %d, %f, '%s')" % (start_time, random.randint(1,10), random.randint(1,10)/10.0, 'hello'))
|
||||
#if insert, value is the affected rows
|
||||
print(value)
|
||||
except Exception as err:
|
||||
conn.close()
|
||||
raise(err)
|
||||
|
@ -70,6 +71,11 @@ if __name__ == '__main__':
|
|||
# Use fetchall to fetch data in a list
|
||||
data = c1.fetchall()
|
||||
|
||||
for col in data:
|
||||
print(col)
|
||||
|
||||
print('Another query method ')
|
||||
|
||||
try:
|
||||
c1.execute('select * from db.t')
|
||||
except Exception as err:
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
#!/bin/bash
|
||||
|
||||
DATA_DIR=/mnt/root/testdata
|
||||
NUM_LOOP=1
|
||||
NUM_OF_FILES=100
|
||||
OUT_FILE=cassandraWrite.out
|
||||
|
||||
rowsPerRequest=(1 10 50 100 500 1000 2000)
|
||||
|
||||
function printTo {
|
||||
if $verbose ; then
|
||||
echo $1
|
||||
fi
|
||||
}
|
||||
|
||||
function runTest {
|
||||
for c in `seq 1 $clients`; do
|
||||
avgRPR[$c]=0
|
||||
done
|
||||
|
||||
printf "R/R, "
|
||||
for c in `seq 1 $clients`; do
|
||||
if [ "$c" == "1" ]; then
|
||||
printf "$c client, "
|
||||
else
|
||||
printf "$c clients, "
|
||||
fi
|
||||
done
|
||||
printf "\n"
|
||||
|
||||
for r in ${rowsPerRequest[@]}; do
|
||||
printf "$r, "
|
||||
for c in `seq 1 $clients`; do
|
||||
totalRPR=0
|
||||
for i in `seq 1 $NUM_LOOP`; do
|
||||
printTo "loop i:$i, java -jar $CAS_TEST_DIR/cassandratest/target/cassandratest-1.0-SNAPSHOT-jar-with-dependencies.jar \
|
||||
-datadir $DATA_DIR \
|
||||
-numofFiles $NUM_OF_FILES \
|
||||
-rowsperrequest $r \
|
||||
-writeclients $c \
|
||||
-conf $CAS_TEST_DIR/application.conf"
|
||||
java -jar $CAS_TEST_DIR/cassandratest/target/cassandratest-1.0-SNAPSHOT-jar-with-dependencies.jar \
|
||||
-datadir $DATA_DIR \
|
||||
-numofFiles $NUM_OF_FILES \
|
||||
-rowsperrequest $r \
|
||||
-writeclients $c \
|
||||
-conf $CAS_TEST_DIR/application.conf \
|
||||
2>&1 > $OUT_FILE
|
||||
RPR=`cat $OUT_FILE | grep "insertation speed:" | awk '{print $(NF-1)}'`
|
||||
totalRPR=`echo "scale=4; $totalRPR + $RPR" | bc`
|
||||
printTo "rows:$r, clients:$c, i:$i RPR:$RPR"
|
||||
done
|
||||
avgRPR[$c]=`echo "scale=4; $totalRPR / $NUM_LOOP" | bc`
|
||||
done
|
||||
for c in `seq 1 $clients`; do
|
||||
printf "${avgRPR[$c]}, "
|
||||
done
|
||||
printf "\n"
|
||||
done
|
||||
}
|
||||
|
||||
################ Main ################
|
||||
|
||||
verbose=false
|
||||
clients=1
|
||||
|
||||
while : ; do
|
||||
case $1 in
|
||||
-v)
|
||||
verbose=true
|
||||
shift ;;
|
||||
|
||||
-c)
|
||||
clients=$2
|
||||
shift 2;;
|
||||
*)
|
||||
break ;;
|
||||
esac
|
||||
done
|
||||
|
||||
printTo "Cassandra Test begin.."
|
||||
|
||||
WORK_DIR=/mnt/root/TDengine
|
||||
CAS_TEST_DIR=$WORK_DIR/tests/comparisonTest/cassandra
|
||||
|
||||
runTest
|
||||
|
||||
printTo "Cassandra Test done!"
|
|
@ -0,0 +1,196 @@
|
|||
#!/bin/bash
|
||||
|
||||
today=`date +"%Y%m%d"`
|
||||
TDENGINE_DIR=/home/shuduo/work/taosdata/TDengine.cover
|
||||
TDENGINE_COVERAGE_REPORT=$TDENGINE_DIR/tests/coverage-report-$today.log
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
function buildTDengine {
|
||||
echo "check if TDengine need build"
|
||||
cd $TDENGINE_DIR
|
||||
git remote prune origin > /dev/null
|
||||
git remote update > /dev/null
|
||||
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||
LOCAL_COMMIT=`git rev-parse --short @`
|
||||
echo " LOCAL: $LOCAL_COMMIT"
|
||||
echo "REMOTE: $REMOTE_COMMIT"
|
||||
|
||||
# reset counter
|
||||
lcov -d . --zerocounters
|
||||
|
||||
cd $TDENGINE_DIR/debug
|
||||
|
||||
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
|
||||
echo "repo up-to-date"
|
||||
else
|
||||
echo "repo need to pull"
|
||||
git reset --hard
|
||||
git pull
|
||||
|
||||
LOCAL_COMMIT=`git rev-parse --short @`
|
||||
|
||||
rm -rf *
|
||||
cmake -DCOVER=true -DRANDOM_FILE_FAIL=true .. > /dev/null
|
||||
make > /dev/null
|
||||
fi
|
||||
|
||||
make install > /dev/null
|
||||
}
|
||||
|
||||
function runGeneralCaseOneByOne {
|
||||
while read -r line; do
|
||||
if [[ $line =~ ^./test.sh* ]]; then
|
||||
general_case=`echo $line | grep -w general`
|
||||
|
||||
if [ -n "$general_case" ]; then
|
||||
case=`echo $line |grep general| awk '{print $NF}'`
|
||||
./test.sh -f $case > /dev/null 2>&1 && \
|
||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \
|
||||
echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
fi
|
||||
fi
|
||||
done < $1
|
||||
}
|
||||
|
||||
function runTest {
|
||||
echo "run Test"
|
||||
|
||||
cd $TDENGINE_DIR/tests/script
|
||||
|
||||
[ -d ../../sim ] && rm -rf ../../sim
|
||||
[ -f $TDENGINE_COVERAGE_REPORT ] && rm $TDENGINE_COVERAGE_REPORT
|
||||
|
||||
runGeneralCaseOneByOne jenkins/basic.txt
|
||||
|
||||
totalSuccess=`grep 'success' $TDENGINE_COVERAGE_REPORT | wc -l`
|
||||
|
||||
if [ "$totalSuccess" -gt "0" ]; then
|
||||
echo -e "\n${GREEN} ### Total $totalSuccess coverage test case(s) succeed! ### ${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
fi
|
||||
|
||||
totalFailed=`grep 'failed\|fault' $TDENGINE_COVERAGE_REPORT | wc -l`
|
||||
if [ "$totalFailed" -ne "0" ]; then
|
||||
echo -e "${RED} ### Total $totalFailed coverage test case(s) failed! ### ${NC}\n" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
# exit $totalPyFailed
|
||||
fi
|
||||
|
||||
cd $TDENGINE_DIR/tests
|
||||
rm -rf ../sim
|
||||
./test-all.sh full python | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
|
||||
# Test Connector
|
||||
stopTaosd
|
||||
$TDENGINE_DIR/debug/build/bin/taosd -c $TDENGINE_DIR/debug/test/cfg > /dev/null &
|
||||
sleep 10
|
||||
|
||||
cd $TDENGINE_DIR/src/connector/jdbc
|
||||
mvn clean package
|
||||
mvn test | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
|
||||
# Test C Demo
|
||||
stopTaosd
|
||||
$TDENGINE_DIR/debug/build/bin/taosd -c $TDENGINE_DIR/debug/test/cfg > /dev/null &
|
||||
sleep 10
|
||||
yes | $TDENGINE_DIR/debug/build/bin/demo 127.0.0.1 | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
|
||||
# Test waltest
|
||||
dataDir=`grep dataDir $TDENGINE_DIR/debug/test/cfg/taos.cfg|awk '{print $2}'`
|
||||
walDir=`find $dataDir -name "wal"|head -n1`
|
||||
echo "dataDir: $dataDir" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
echo "walDir: $walDir" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
if [ -n "$walDir" ]; then
|
||||
yes | $TDENGINE_DIR/debug/build/bin/waltest -p $walDir | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
fi
|
||||
|
||||
# run Unit Test
|
||||
echo "Run Unit Test: utilTest, queryTest and cliTest"
|
||||
$TDENGINE_DIR/debug/build/bin/utilTest > /dev/null && echo "utilTest pass!" || echo "utilTest failed!"
|
||||
$TDENGINE_DIR/debug/build/bin/queryTest > /dev/null && echo "queryTest pass!" || echo "queryTest failed!"
|
||||
$TDENGINE_DIR/debug/build/bin/cliTest > /dev/null && echo "cliTest pass!" || echo "cliTest failed!"
|
||||
|
||||
stopTaosd
|
||||
}
|
||||
|
||||
function lcovFunc {
|
||||
echo "collect data by lcov"
|
||||
cd $TDENGINE_DIR
|
||||
|
||||
# collect data
|
||||
lcov -d . --capture --rc lcov_branch_coverage=1 --rc genhtml_branch_coverage=1 --no-external -b $TDENGINE_DIR -o coverage.info
|
||||
|
||||
# remove exclude paths
|
||||
lcov --remove coverage.info \
|
||||
'*/tests/*' '*/test/*' '*/deps/*' '*/plugins/*' '*/taosdef.h' \
|
||||
--rc lcov_branch_coverage=1 -o coverage.info
|
||||
|
||||
# generate result
|
||||
lcov -l --rc lcov_branch_coverage=1 coverage.info | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
|
||||
# push result to coveralls.io
|
||||
coveralls-lcov coverage.info | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
}
|
||||
|
||||
function sendReport {
|
||||
echo "send report"
|
||||
receiver="sdsang@taosdata.com, sangshuduo@gmail.com, pxiao@taosdata.com"
|
||||
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
|
||||
|
||||
cd $TDENGINE_DIR
|
||||
|
||||
sed -i 's/\x1b\[[0-9;]*m//g' $TDENGINE_COVERAGE_REPORT
|
||||
BODY_CONTENT=`cat $TDENGINE_COVERAGE_REPORT`
|
||||
echo -e "to: ${receiver}\nsubject: Coverage test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
|
||||
(cat - && uuencode $TDENGINE_COVERAGE_REPORT coverage-report-$today.log) | \
|
||||
ssmtp "${receiver}" && echo "Report Sent!"
|
||||
}
|
||||
|
||||
function stopTaosd {
|
||||
echo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
}
|
||||
|
||||
function runTestRandomFail {
|
||||
exec_random_fail_sh=$1
|
||||
default_exec_sh=$TDENGINE_DIR/tests/script/sh/exec.sh
|
||||
[ -f $exec_random_fail_sh ] && cp $exec_random_fail_sh $default_exec_sh || exit 1
|
||||
|
||||
dnodes_random_fail_py=$TDENGINE_DIR/tests/pytest/util/dnodes-no-random-fail.py
|
||||
default_dnodes_py=$TDENGINE_DIR/tests/pytest/util/dnodes.py
|
||||
[ -f $dnodes_random_fail_py ] && cp $dnodes_random_fail_py $default_dnodes_py || exit 1
|
||||
|
||||
runTest NoRandomFail
|
||||
}
|
||||
|
||||
WORK_DIR=/home/shuduo/work/taosdata
|
||||
|
||||
date >> $WORK_DIR/cron.log
|
||||
echo "Run Coverage Test" | tee -a $WORK_DIR/cron.log
|
||||
|
||||
rm /tmp/core-*
|
||||
|
||||
stopTaosd
|
||||
buildTDengine
|
||||
|
||||
runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-random-fail.sh
|
||||
runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-default.sh
|
||||
runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-no-random-fail.sh
|
||||
|
||||
lcovFunc
|
||||
sendReport
|
||||
stopTaosd
|
||||
|
||||
date >> $WORK_DIR/cron.log
|
||||
echo "End of Coverage Test" | tee -a $WORK_DIR/cron.log
|
|
@ -0,0 +1,191 @@
|
|||
#!/bin/bash
|
||||
|
||||
today=`date +"%Y%m%d"`
|
||||
TDENGINE_DIR=/home/shuduo/work/taosdata/TDengine.orig
|
||||
TDENGINE_FULLTEST_REPORT=$TDENGINE_DIR/tests/full-report-$today.log
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
function buildTDengine {
|
||||
echo "check if TDengine need build"
|
||||
|
||||
need_rebuild=false
|
||||
|
||||
if [ ! -d $TDENGINE_DIR ]; then
|
||||
echo "No TDengine source code found!"
|
||||
git clone https://github.com/taosdata/TDengine $TDENGINE_DIR
|
||||
need_rebuild=true
|
||||
fi
|
||||
|
||||
cd $TDENGINE_DIR
|
||||
git remote prune origin > /dev/null
|
||||
git remote update > /dev/null
|
||||
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||
LOCAL_COMMIT=`git rev-parse --short @`
|
||||
echo " LOCAL: $LOCAL_COMMIT"
|
||||
echo "REMOTE: $REMOTE_COMMIT"
|
||||
|
||||
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
|
||||
echo "repo up-to-date"
|
||||
else
|
||||
echo "repo need to pull"
|
||||
git pull
|
||||
need_rebuild=true
|
||||
fi
|
||||
|
||||
[ -d $TDENGINE_DIR/debug ] || mkdir $TDENGINE_DIR/debug
|
||||
cd $TDENGINE_DIR/debug
|
||||
[ -f $TDENGINE_DIR/debug/build/bin/taosd ] || need_rebuild=true
|
||||
|
||||
if $need_rebuild ; then
|
||||
echo "rebuild.."
|
||||
|
||||
LOCAL_COMMIT=`git rev-parse --short @`
|
||||
rm -rf *
|
||||
cmake .. > /dev/null
|
||||
make > /dev/null
|
||||
fi
|
||||
|
||||
make install > /dev/null
|
||||
}
|
||||
|
||||
function runGeneralCaseOneByOne {
|
||||
while read -r line; do
|
||||
if [[ $line =~ ^./test.sh* ]]; then
|
||||
general_case=`echo $line | grep -w general`
|
||||
|
||||
if [ -n "$general_case" ]; then
|
||||
case=`echo $line | awk '{print $NF}'`
|
||||
|
||||
start_time=`date +%s`
|
||||
./test.sh -f $case > /dev/null 2>&1 && ret=0 || ret = 1
|
||||
end_time=`date +%s`
|
||||
|
||||
if [[ ret -eq 0 ]]; then
|
||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_FULLTEST_REPORT
|
||||
else
|
||||
casename=`echo $case|sed 's/\//\-/g'`
|
||||
find $TDENGINE_DIR/sim -name "*log" -exec tar czf $TDENGINE_DIR/fulltest-$today-$casename.log.tar.gz {} +
|
||||
echo -e "${RED}$case failed and log saved${NC}" | tee -a $TDENGINE_FULLTEST_REPORT
|
||||
fi
|
||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_FULLTEST_REPORT
|
||||
fi
|
||||
fi
|
||||
done < $1
|
||||
}
|
||||
|
||||
function runPyCaseOneByOne {
|
||||
while read -r line; do
|
||||
if [[ $line =~ ^python.* ]]; then
|
||||
if [[ $line != *sleep* ]]; then
|
||||
case=`echo $line|awk '{print $NF}'`
|
||||
start_time=`date +%s`
|
||||
$line > /dev/null 2>&1 && ret=0 || ret=1
|
||||
end_time=`date +%s`
|
||||
|
||||
if [[ ret -eq 0 ]]; then
|
||||
echo -e "${GREEN}$case success${NC}" | tee -a pytest-out.log
|
||||
else
|
||||
casename=`echo $case|sed 's/\//\-/g'`
|
||||
find $TDENGINE_DIR/sim -name "*log" -exec tar czf $TDENGINE_DIR/fulltest-$today-$casename.log.tar.gz {} +
|
||||
echo -e "${RED}$case failed and log saved${NC}" | tee -a pytest-out.log
|
||||
fi
|
||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log
|
||||
else
|
||||
$line > /dev/null 2>&1
|
||||
fi
|
||||
fi
|
||||
done < $1
|
||||
}
|
||||
|
||||
function runTest {
|
||||
echo "Run Test"
|
||||
cd $TDENGINE_DIR/tests/script
|
||||
|
||||
[ -d $TDENGINE_DIR/sim ] && rm -rf $TDENGINE_DIR/sim
|
||||
[ -f $TDENGINE_FULLTEST_REPORT ] && rm $TDENGINE_FULLTEST_REPORT
|
||||
|
||||
runGeneralCaseOneByOne jenkins/basic.txt
|
||||
|
||||
totalSuccess=`grep 'success' $TDENGINE_FULLTEST_REPORT | wc -l`
|
||||
|
||||
if [ "$totalSuccess" -gt "0" ]; then
|
||||
echo -e "\n${GREEN} ### Total $totalSuccess SIM case(s) succeed! ### ${NC}" \
|
||||
| tee -a $TDENGINE_FULLTEST_REPORT
|
||||
fi
|
||||
|
||||
totalFailed=`grep 'failed\|fault' $TDENGINE_FULLTEST_REPORT | wc -l`
|
||||
if [ "$totalFailed" -ne "0" ]; then
|
||||
echo -e "${RED} ### Total $totalFailed SIM case(s) failed! ### ${NC}\n" \
|
||||
| tee -a $TDENGINE_FULLTEST_REPORT
|
||||
fi
|
||||
|
||||
cd $TDENGINE_DIR/tests/pytest
|
||||
[ -d $TDENGINE_DIR/sim ] && rm -rf $TDENGINE_DIR/sim
|
||||
[ -f pytest-out.log ] && rm -f pytest-out.log
|
||||
runPyCaseOneByOne fulltest.sh
|
||||
|
||||
totalPySuccess=`grep 'success' pytest-out.log | wc -l`
|
||||
totalPyFailed=`grep 'failed\|fault' pytest-out.log | wc -l`
|
||||
|
||||
cat pytest-out.log >> $TDENGINE_FULLTEST_REPORT
|
||||
if [ "$totalPySuccess" -gt "0" ]; then
|
||||
echo -e "\n${GREEN} ### Total $totalPySuccess python case(s) succeed! ### ${NC}" \
|
||||
| tee -a $TDENGINE_FULLTEST_REPORT
|
||||
fi
|
||||
|
||||
if [ "$totalPyFailed" -ne "0" ]; then
|
||||
echo -e "\n${RED} ### Total $totalPyFailed python case(s) failed! ### ${NC}" \
|
||||
| tee -a $TDENGINE_FULLTEST_REPORT
|
||||
fi
|
||||
}
|
||||
|
||||
function sendReport {
|
||||
echo "Send Report"
|
||||
receiver="sdsang@taosdata.com, sangshuduo@gmail.com, pxiao@taosdata.com"
|
||||
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
|
||||
|
||||
cd $TDENGINE_DIR/tests
|
||||
|
||||
sed -i 's/\x1b\[[0-9;]*m//g' $TDENGINE_FULLTEST_REPORT
|
||||
BODY_CONTENT=`cat $TDENGINE_FULLTEST_REPORT`
|
||||
|
||||
cd $TDENGINE_DIR
|
||||
tar czf fulltest-$today.tar.gz fulltest-$today-*.log.tar.gz
|
||||
|
||||
echo -e "to: ${receiver}\nsubject: Full test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
|
||||
(cat - && uuencode $TDENGINE_FULLTEST_REPORT fulltest-report-$today.log) | \
|
||||
(cat - && uuencode $TDENGINE_DIR/fulltest-$today.tar.gz fulltest-$today.tar.gz) | \
|
||||
ssmtp "${receiver}" && echo "Report Sent!"
|
||||
}
|
||||
|
||||
function stopTaosd {
|
||||
echo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
}
|
||||
|
||||
WORK_DIR=/home/shuduo/work/taosdata
|
||||
|
||||
date >> $WORK_DIR/cron.log
|
||||
echo "Run Full Test" | tee -a $WORK_DIR/cron.log
|
||||
|
||||
stopTaosd
|
||||
buildTDengine
|
||||
runTest
|
||||
sendReport
|
||||
stopTaosd
|
||||
|
||||
date >> $WORK_DIR/cron.log
|
||||
echo "End of Full Test" | tee -a $WORK_DIR/cron.log
|
|
@ -0,0 +1,186 @@
|
|||
#!/bin/bash
|
||||
|
||||
today=`date +"%Y%m%d"`
|
||||
TDINTERNAL_DIR=/home/shuduo/work/taosdata/TDinternal.cover
|
||||
TDINTERNAL_COVERAGE_REPORT=$TDINTERNAL_DIR/community/tests/tdinternal-coverage-report-$today.log
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
function buildTDinternal {
|
||||
echo "check if TDinternal need build"
|
||||
cd $TDINTERNAL_DIR
|
||||
NEED_COMPILE=0
|
||||
# git remote update
|
||||
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||
LOCAL_COMMIT=`git rev-parse --short @`
|
||||
echo " LOCAL: $LOCAL_COMMIT"
|
||||
echo "REMOTE: $REMOTE_COMMIT"
|
||||
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
|
||||
echo "TDinternal repo is up-to-date"
|
||||
else
|
||||
echo "repo need to pull"
|
||||
# git pull
|
||||
|
||||
# NEED_COMPILE=1
|
||||
fi
|
||||
|
||||
lcov -d . --zerocounters
|
||||
# git submodule update --init --recursive
|
||||
cd $TDINTERNAL_DIR/community
|
||||
TDENGINE_REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||
TDENGINE_LOCAL_COMMIT=`git rev-parse --short @`
|
||||
if [ "$TDENGINE_LOCAL_COMMIT" == "$TDENGINE_REMOTE_COMMIT" ]; then
|
||||
echo "community repo is up-to-date"
|
||||
else
|
||||
echo "repo need to pull"
|
||||
# git checkout develop
|
||||
# git pull
|
||||
# NEED_COMPILE=1
|
||||
fi
|
||||
|
||||
cd $TDINTERNAL_DIR/debug
|
||||
|
||||
if [[ $NEED_COMPILE -eq 1 ]]; then
|
||||
LOCAL_COMMIT=`git rev-parse --short @`
|
||||
rm -rf *
|
||||
cmake .. > /dev/null
|
||||
make > /dev/null
|
||||
fi
|
||||
|
||||
make install > /dev/null
|
||||
}
|
||||
|
||||
function runUniqueCaseOneByOne {
|
||||
while read -r line; do
|
||||
if [[ $line =~ ^./test.sh* ]]; then
|
||||
case=`echo $line | awk '{print $NF}'`
|
||||
start_time=`date +%s`
|
||||
./test.sh -f $case > /dev/null 2>&1 && \
|
||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDINTERNAL_COVERAGE_REPORT || \
|
||||
echo -e "${RED}$case failed${NC}" | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||
end_time=`date +%s`
|
||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||
fi
|
||||
done < $1
|
||||
}
|
||||
|
||||
function runTest {
|
||||
echo "Run Test"
|
||||
cd $TDINTERNAL_DIR/community/tests/script
|
||||
[ -d ../../sim ] && rm -rf ../../sim
|
||||
|
||||
[ -f $TDINTERNAL_COVERAGE_REPORT ] && rm $TDINTERNAL_COVERAGE_REPORT
|
||||
|
||||
runUniqueCaseOneByOne jenkins/basic.txt
|
||||
|
||||
totalSuccess=`grep 'success' $TDINTERNAL_COVERAGE_REPORT | wc -l`
|
||||
|
||||
if [ "$totalSuccess" -gt "0" ]; then
|
||||
echo -e "\n${GREEN} ### Total $totalSuccess TDinternal case(s) succeed! ### ${NC}" | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||
fi
|
||||
|
||||
totalFailed=`grep 'failed\|fault' $TDINTERNAL_COVERAGE_REPORT | wc -l`
|
||||
if [ "$totalFailed" -ne "0" ]; then
|
||||
echo -e "${RED} ### Total $totalFailed TDinternal case(s) failed! ### ${NC}\n" | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||
# exit $totalPyFailed
|
||||
fi
|
||||
|
||||
# Test Python test case
|
||||
cd $TDINTERNAL_DIR/community/tests
|
||||
/usr/bin/time -f "Total spent: %e" ./test-all.sh full python | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||
|
||||
# Test Connector
|
||||
stopTaosd
|
||||
$TDINTERNAL_DIR/debug/build/bin/taosd -c $TDINTERNAL_DIR/debug/test/cfg > /dev/null &
|
||||
sleep 10
|
||||
|
||||
cd $TDINTERNAL_DIR/community/src/connector/jdbc
|
||||
mvn clean package
|
||||
mvn test | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||
|
||||
# Test C Demo
|
||||
stopTaosd
|
||||
$TDINTERNAL_DIR/debug/build/bin/taosd -c $TDINTERNAL_DIR/debug/test/cfg > /dev/null &
|
||||
sleep 10
|
||||
yes | $TDINTERNAL_DIR/debug/build/bin/demo 127.0.0.1 | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||
|
||||
# Test waltest
|
||||
dataDir=`grep dataDir $TDINTERNAL_DIR/debug/test/cfg/taos.cfg|awk '{print $2}'`
|
||||
walDir=`find $dataDir -name "wal"|head -n1`
|
||||
echo "dataDir: $dataDir\nwalDir: $walDir" | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||
if [ -n "$walDir" ]; then
|
||||
yes | $TDINTERNAL_DIR/debug/build/bin/waltest -p $walDir | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||
fi
|
||||
|
||||
stopTaosd
|
||||
}
|
||||
|
||||
function sendReport {
|
||||
echo "Send Report"
|
||||
receiver="sdsang@taosdata.com, sangshuduo@gmail.com, pxiao@taosdata.com"
|
||||
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
|
||||
|
||||
cd $TDINTERNAL_DIR
|
||||
|
||||
sed -i 's/\x1b\[[0-9;]*m//g' $TDINTERNAL_COVERAGE_REPORT
|
||||
|
||||
BODY_CONTENT=`cat $TDINTERNAL_COVERAGE_REPORT`
|
||||
echo -e "to: ${receiver}\nsubject: TDinternal coverage test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
|
||||
(cat - && uuencode tdinternal-coverage-report-$today.tar.gz tdinternal-coverage-report-$today.tar.gz) | \
|
||||
(cat - && uuencode $TDINTERNAL_COVERAGE_REPORT tdinternal-coverage-report-$today.log) | \
|
||||
ssmtp "${receiver}" && echo "Report Sent!"
|
||||
}
|
||||
|
||||
function lcovFunc {
|
||||
echo "collect data by lcov"
|
||||
cd $TDINTERNAL_DIR
|
||||
|
||||
sed -i 's/\x1b\[[0-9;]*m//g' $TDINTERNAL_COVERAGE_REPORT
|
||||
# collect data
|
||||
lcov -d . --capture --rc lcov_branch_coverage=1 --rc genhtmml_branch_coverage=1 --no-external -b $TDINTERNAL_DIR -o coverage.info
|
||||
|
||||
# remove exclude paths
|
||||
lcov --remove coverage.info '*/tests/*' '*/test/*' '*/deps/*' '*/plugins/*' '*/taosdef.h' \
|
||||
--rc lcov_branch_coverage=1 -o coverage.info
|
||||
|
||||
# generate result
|
||||
lcov -l --rc lcov_branch_coverage=1 coverage.info | tee -a $TDINTERNAL_COVERAGE_REPORT
|
||||
|
||||
genhtml -o html coverage.info
|
||||
|
||||
tar czf tdinternal-coverage-report-$today.tar.gz html coverage.info $TDINTERNAL_COVERAGE_REPORT
|
||||
# push result to coveralls.io
|
||||
# coveralls-lcov coverage.info | tee -a tdinternal-coverage-report-$today.log
|
||||
}
|
||||
|
||||
function stopTaosd {
|
||||
echo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
}
|
||||
|
||||
WORK_DIR=/home/shuduo/work/taosdata
|
||||
|
||||
date >> $WORK_DIR/cron.log
|
||||
echo "Run Coverage Test for TDinternal" | tee -a $WORK_DIR/cron.log
|
||||
|
||||
stopTaosd
|
||||
buildTDinternal
|
||||
runTest
|
||||
lcovFunc
|
||||
sendReport
|
||||
stopTaosd
|
||||
|
||||
date >> $WORK_DIR/cron.log
|
||||
echo "End of TDinternal Coverage Test" | tee -a $WORK_DIR/cron.log
|
|
@ -0,0 +1,179 @@
|
|||
#!/bin/bash
|
||||
|
||||
today=`date +"%Y%m%d"`
|
||||
TDINTERNAL_DIR=/home/shuduo/work/taosdata/TDinternal
|
||||
TDINTERNAL_TEST_REPORT=$TDINTERNAL_DIR/community/tests/tdinternal-report-$today.log
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
function buildTDinternal {
|
||||
echo "check if TDinternal need build"
|
||||
cd $TDINTERNAL_DIR
|
||||
NEED_COMPILE=0
|
||||
# git remote update
|
||||
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||
LOCAL_COMMIT=`git rev-parse --short @`
|
||||
echo " LOCAL: $LOCAL_COMMIT"
|
||||
echo "REMOTE: $REMOTE_COMMIT"
|
||||
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
|
||||
echo "TDinternal repo is up-to-date"
|
||||
else
|
||||
echo "repo need to pull"
|
||||
# git pull
|
||||
|
||||
# NEED_COMPILE=1
|
||||
fi
|
||||
|
||||
# git submodule update --init --recursive
|
||||
cd $TDINTERNAL_DIR/community
|
||||
TDENGINE_REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
|
||||
TDENGINE_LOCAL_COMMIT=`git rev-parse --short @`
|
||||
if [ "$TDENGINE_LOCAL_COMMIT" == "$TDENGINE_REMOTE_COMMIT" ]; then
|
||||
echo "community repo is up-to-date"
|
||||
else
|
||||
echo "repo need to pull"
|
||||
# git checkout develop
|
||||
# git pull
|
||||
# NEED_COMPILE=1
|
||||
fi
|
||||
|
||||
cd $TDINTERNAL_DIR/debug
|
||||
|
||||
if [[ $NEED_COMPILE -eq 1 ]]; then
|
||||
LOCAL_COMMIT=`git rev-parse --short @`
|
||||
rm -rf *
|
||||
cmake .. > /dev/null
|
||||
make > /dev/null
|
||||
fi
|
||||
|
||||
make install > /dev/null
|
||||
}
|
||||
|
||||
function runUniqueCaseOneByOne {
|
||||
while read -r line; do
|
||||
if [[ $line =~ ^./test.sh* ]]; then
|
||||
case=`echo $line | awk '{print $NF}'`
|
||||
start_time=`date +%s`
|
||||
./test.sh -f $case > /dev/null 2>&1 && \
|
||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDINTERNAL_TEST_REPORT || \
|
||||
echo -e "${RED}$case failed${NC}" | tee -a $TDINTERNAL_TEST_REPORT
|
||||
end_time=`date +%s`
|
||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDINTERNAL_TEST_REPORT
|
||||
fi
|
||||
done < $1
|
||||
}
|
||||
|
||||
function runPyCaseOneByOne {
|
||||
while read -r line; do
|
||||
if [[ $line =~ ^python.* ]]; then
|
||||
if [[ $line != *sleep* ]]; then
|
||||
case=`echo $line|awk '{print $NF}'`
|
||||
start_time=`date +%s`
|
||||
$line > /dev/null 2>&1 && ret=0 || ret=1
|
||||
end_time=`date +%s`
|
||||
|
||||
if [[ ret -eq 0 ]]; then
|
||||
echo -e "${GREEN}$case success${NC}" | tee -a pytest-out.log
|
||||
else
|
||||
casename=`echo $case|sed 's/\//\-/g'`
|
||||
find $TDINTERNAL_DIR/community/sim -name "*log" -exec tar czf $TDINTERNAL_DIR/fulltest-$today-$casename.log.tar.gz {} +
|
||||
echo -e "${RED}$case failed and log saved${NC}" | tee -a pytest-out.log
|
||||
fi
|
||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log
|
||||
else
|
||||
$line > /dev/null 2>&1
|
||||
fi
|
||||
fi
|
||||
done < $1
|
||||
}
|
||||
|
||||
function runTest {
|
||||
echo "Run Test"
|
||||
cd $TDINTERNAL_DIR/community/tests/script
|
||||
[ -d $TDINTERNAL_DIR/sim ] && rm -rf $TDINTERNAL_DIR/sim
|
||||
|
||||
[ -f $TDINTERNAL_TEST_REPORT ] && rm $TDINTERNAL_TEST_REPORT
|
||||
|
||||
runUniqueCaseOneByOne jenkins/basic.txt
|
||||
|
||||
totalSuccess=`grep 'success' $TDINTERNAL_TEST_REPORT | wc -l`
|
||||
|
||||
if [ "$totalSuccess" -gt "0" ]; then
|
||||
echo -e "\n${GREEN} ### Total $totalSuccess TDinternal case(s) succeed! ### ${NC}" | tee -a $TDINTERNAL_TEST_REPORT
|
||||
fi
|
||||
|
||||
totalFailed=`grep 'failed\|fault' $TDINTERNAL_TEST_REPORT | wc -l`
|
||||
if [ "$totalFailed" -ne "0" ]; then
|
||||
echo -e "${RED} ### Total $totalFailed TDinternal case(s) failed! ### ${NC}\n" | tee -a $TDINTERNAL_TEST_REPORT
|
||||
# exit $totalPyFailed
|
||||
fi
|
||||
|
||||
cd $TDINTERNAL_DIR/community/tests/pytest
|
||||
[ -d $TDINTERNAL_DIR/community/sim ] && rm -rf $TDINTERNAL_DIR/community/sim
|
||||
[ -f pytest-out.log ] && rm -f pytest-out.log
|
||||
|
||||
/usr/bin/time -f "Total spent: %e" ./test-all.sh full python | tee -a $TDINTERNAL_TEST_REPORT
|
||||
runPyCaseOneByOne fulltest.sh
|
||||
|
||||
totalPySuccess=`grep 'success' pytest-out.log | wc -l`
|
||||
totalPyFailed=`grep 'failed\|fault' pytest-out.log | wc -l`
|
||||
|
||||
cat pytest-out.log >> $TDINTERNAL_TEST_REPORT
|
||||
if [ "$totalPySuccess" -gt "0" ]; then
|
||||
echo -e "\n${GREEN} ### Total $totalPySuccess python case(s) succeed! ### ${NC}" \
|
||||
| tee -a $TDINTERNAL_TEST_REPORT
|
||||
fi
|
||||
|
||||
if [ "$totalPyFailed" -ne "0" ]; then
|
||||
echo -e "\n${RED} ### Total $totalPyFailed python case(s) failed! ### ${NC}" \
|
||||
| tee -a $TDINTERNAL_TEST_REPORT
|
||||
fi
|
||||
}
|
||||
|
||||
function sendReport {
|
||||
echo "Send Report"
|
||||
receiver="sdsang@taosdata.com, sangshuduo@gmail.com, pxiao@taosdata.com"
|
||||
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
|
||||
|
||||
cd $TDINTERNAL_DIR
|
||||
|
||||
sed -i 's/\x1b\[[0-9;]*m//g' $TDINTERNAL_TEST_REPORT
|
||||
BODY_CONTENT=`cat $TDINTERNAL_TEST_REPORT`
|
||||
|
||||
cd $TDINTERNAL_DIR
|
||||
tar czf fulltest-$today.tar.gz fulltest-$today-*.log.tar.gz
|
||||
|
||||
echo -e "to: ${receiver}\nsubject: TDinternal test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
|
||||
(cat - && uuencode $TDINTERNAL_TEST_REPORT tdinternal-report-$today.log) | \
|
||||
ssmtp "${receiver}" && echo "Report Sent!"
|
||||
}
|
||||
|
||||
function stopTaosd {
|
||||
echo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -KILL -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
}
|
||||
|
||||
WORK_DIR=/home/shuduo/work/taosdata
|
||||
|
||||
date >> $WORK_DIR/cron.log
|
||||
echo "Run Test for TDinternal" | tee -a $WORK_DIR/cron.log
|
||||
|
||||
buildTDinternal
|
||||
runTest
|
||||
sendReport
|
||||
stopTaosd
|
||||
|
||||
date >> $WORK_DIR/cron.log
|
||||
echo "End of TDinternal Test" | tee -a $WORK_DIR/cron.log
|
|
@ -144,6 +144,7 @@ python3 ./test.py -f query/querySort.py
|
|||
python3 ./test.py -f query/queryJoin.py
|
||||
python3 ./test.py -f query/select_last_crash.py
|
||||
python3 ./test.py -f query/queryNullValueTest.py
|
||||
python3 ./test.py -f query/queryInsertValue.py
|
||||
|
||||
#stream
|
||||
python3 ./test.py -f stream/metric_1.py
|
||||
|
@ -161,3 +162,23 @@ python3 ./test.py -f client/client.py
|
|||
# Misc
|
||||
python3 testCompress.py
|
||||
python3 testNoCompress.py
|
||||
python3 testMinTablesPerVnode.py
|
||||
|
||||
# functions
|
||||
python3 ./test.py -f functions/function_avg.py
|
||||
python3 ./test.py -f functions/function_bottom.py
|
||||
python3 ./test.py -f functions/function_count.py
|
||||
python3 ./test.py -f functions/function_diff.py
|
||||
python3 ./test.py -f functions/function_first.py
|
||||
python3 ./test.py -f functions/function_last.py
|
||||
python3 ./test.py -f functions/function_last_row.py
|
||||
python3 ./test.py -f functions/function_leastsquares.py
|
||||
python3 ./test.py -f functions/function_max.py
|
||||
python3 ./test.py -f functions/function_min.py
|
||||
python3 ./test.py -f functions/function_operations.py
|
||||
python3 ./test.py -f functions/function_percentile.py
|
||||
python3 ./test.py -f functions/function_spread.py
|
||||
python3 ./test.py -f functions/function_stddev.py
|
||||
python3 ./test.py -f functions/function_sum.py
|
||||
python3 ./test.py -f functions/function_top.py
|
||||
python3 ./test.py -f functions/function_twa.py
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
intData = []
|
||||
floatData = []
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
|
||||
# average verifacation
|
||||
tdSql.error("select avg(ts) from test")
|
||||
tdSql.error("select avg(ts) from test1")
|
||||
tdSql.error("select avg(col7) from test")
|
||||
tdSql.error("select avg(col7) from test1")
|
||||
tdSql.error("select avg(col8) from test")
|
||||
tdSql.error("select avg(col8) from test1")
|
||||
tdSql.error("select avg(col9) from test")
|
||||
tdSql.error("select avg(col9) from test1")
|
||||
|
||||
tdSql.query("select avg(col1) from test")
|
||||
tdSql.checkData(0, 0, np.average(intData))
|
||||
tdSql.query("select avg(col2) from test")
|
||||
tdSql.checkData(0, 0, np.average(intData))
|
||||
tdSql.query("select avg(col3) from test")
|
||||
tdSql.checkData(0, 0, np.average(intData))
|
||||
tdSql.query("select avg(col4) from test")
|
||||
tdSql.checkData(0, 0, np.average(intData))
|
||||
tdSql.query("select avg(col5) from test")
|
||||
tdSql.checkData(0, 0, np.average(floatData))
|
||||
tdSql.query("select avg(col6) from test")
|
||||
tdSql.checkData(0, 0, np.average(floatData))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,93 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
|
||||
# bottom verifacation
|
||||
tdSql.error("select bottom(ts, 10) from test")
|
||||
tdSql.error("select bottom(col1, 0) from test")
|
||||
tdSql.error("select bottom(col1, 101) from test")
|
||||
tdSql.error("select bottom(col2, 0) from test")
|
||||
tdSql.error("select bottom(col2, 101) from test")
|
||||
tdSql.error("select bottom(col3, 0) from test")
|
||||
tdSql.error("select bottom(col3, 101) from test")
|
||||
tdSql.error("select bottom(col4, 0) from test")
|
||||
tdSql.error("select bottom(col4, 101) from test")
|
||||
tdSql.error("select bottom(col5, 0) from test")
|
||||
tdSql.error("select bottom(col5, 101) from test")
|
||||
tdSql.error("select bottom(col6, 0) from test")
|
||||
tdSql.error("select bottom(col6, 101) from test")
|
||||
tdSql.error("select bottom(col7, 10) from test")
|
||||
tdSql.error("select bottom(col8, 10) from test")
|
||||
tdSql.error("select bottom(col9, 10) from test")
|
||||
|
||||
tdSql.query("select bottom(col1, 2) from test")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 1)
|
||||
tdSql.checkData(1, 1, 2)
|
||||
|
||||
tdSql.query("select bottom(col2, 2) from test")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 1)
|
||||
tdSql.checkData(1, 1, 2)
|
||||
|
||||
tdSql.query("select bottom(col3, 2) from test")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 1)
|
||||
tdSql.checkData(1, 1, 2)
|
||||
|
||||
tdSql.query("select bottom(col4, 2) from test")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 1)
|
||||
tdSql.checkData(1, 1, 2)
|
||||
|
||||
tdSql.query("select bottom(col5, 2) from test")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 0.1)
|
||||
tdSql.checkData(1, 1, 1.1)
|
||||
|
||||
tdSql.query("select bottom(col6, 2) from test")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 0.1)
|
||||
tdSql.checkData(1, 1, 1.1)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,79 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
|
||||
# Count verifacation
|
||||
tdSql.query("select count(*) from test")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.query("select count(ts) from test")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.query("select count(col1) from test")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.query("select count(col2) from test")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.query("select count(col3) from test")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.query("select count(col4) from test")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.query("select count(col5) from test")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.query("select count(col6) from test")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.query("select count(col7) from test")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.query("select count(col8) from test")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.query("select count(col9) from test")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.execute("alter table test add column col10 int")
|
||||
tdSql.query("select count(col10) from test")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.execute("insert into test1 values(now, 1, 2, 3, 4, 1.1, 2.2, false, 'test', 'test' 1)")
|
||||
tdSql.query("select count(col10) from test")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,99 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
tdSql.execute("insert into test1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ')" % (self.ts - 1))
|
||||
|
||||
# diff verifacation
|
||||
tdSql.query("select diff(col1) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select diff(col2) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select diff(col3) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select diff(col4) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select diff(col5) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select diff(col6) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
|
||||
tdSql.error("select diff(ts) from test")
|
||||
tdSql.error("select diff(ts) from test1")
|
||||
tdSql.error("select diff(col1) from test")
|
||||
tdSql.error("select diff(col2) from test")
|
||||
tdSql.error("select diff(col3) from test")
|
||||
tdSql.error("select diff(col4) from test")
|
||||
tdSql.error("select diff(col5) from test")
|
||||
tdSql.error("select diff(col6) from test")
|
||||
tdSql.error("select diff(col7) from test")
|
||||
tdSql.error("select diff(col7) from test1")
|
||||
tdSql.error("select diff(col8) from test")
|
||||
tdSql.error("select diff(col8) from test1")
|
||||
tdSql.error("select diff(col9) from test")
|
||||
tdSql.error("select diff(col9) from test1")
|
||||
|
||||
tdSql.query("select diff(col1) from test1")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.query("select diff(col2) from test1")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.query("select diff(col3) from test1")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.query("select diff(col4) from test1")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.query("select diff(col5) from test1")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
tdSql.query("select diff(col6) from test1")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,119 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1))
|
||||
|
||||
# first verifacation
|
||||
tdSql.query("select first(*) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 1, None)
|
||||
|
||||
tdSql.query("select first(col1) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select first(col2) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select first(col3) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select first(col4) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select first(col5) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select first(col6) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select first(col7) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select first(col8) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select first(col9) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
|
||||
tdSql.query("select first(*) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 1, 1)
|
||||
|
||||
tdSql.query("select first(col1) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
tdSql.query("select first(col2) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
tdSql.query("select first(col3) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
tdSql.query("select first(col4) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 1)
|
||||
|
||||
tdSql.query("select first(col5) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 0.1)
|
||||
|
||||
tdSql.query("select first(col6) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 0.1)
|
||||
|
||||
tdSql.query("select first(col7) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, False)
|
||||
|
||||
tdSql.query("select first(col8) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 'taosdata1')
|
||||
|
||||
tdSql.query("select first(col9) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, '涛思数据1')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,119 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1))
|
||||
|
||||
# last verifacation
|
||||
tdSql.query("select last(*) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 1, None)
|
||||
|
||||
tdSql.query("select last(col1) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select last(col2) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select last(col3) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select last(col4) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select last(col5) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select last(col6) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select last(col7) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select last(col8) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select last(col9) from test1")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
|
||||
tdSql.query("select last(*) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 1, 10)
|
||||
|
||||
tdSql.query("select last(col1) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.query("select last(col2) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.query("select last(col3) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.query("select last(col4) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.query("select last(col5) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 9.1)
|
||||
|
||||
tdSql.query("select last(col6) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 9.1)
|
||||
|
||||
tdSql.query("select last(col7) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, True)
|
||||
|
||||
tdSql.query("select last(col8) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 'taosdata10')
|
||||
|
||||
tdSql.query("select last(col9) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, '涛思数据10')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,128 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1))
|
||||
|
||||
# last_row verifacation
|
||||
tdSql.query("select last_row(*) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 1, None)
|
||||
|
||||
tdSql.query("select last_row(col1) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
||||
tdSql.query("select last_row(col2) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
||||
tdSql.query("select last_row(col3) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
||||
tdSql.query("select last_row(col4) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
||||
tdSql.query("select last_row(col5) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
||||
tdSql.query("select last_row(col6) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
||||
tdSql.query("select last_row(col7) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
||||
tdSql.query("select last_row(col8) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
||||
tdSql.query("select last_row(col9) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
|
||||
tdSql.query("select last_row(*) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 1, 10)
|
||||
|
||||
tdSql.query("select last_row(col1) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.query("select last_row(col2) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.query("select last_row(col3) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.query("select last_row(col4) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.query("select last_row(col5) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 9.1)
|
||||
|
||||
tdSql.query("select last_row(col6) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 9.1)
|
||||
|
||||
tdSql.query("select last_row(col7) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, True)
|
||||
|
||||
tdSql.query("select last_row(col8) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 'taosdata10')
|
||||
|
||||
tdSql.query("select last_row(col9) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, '涛思数据10')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,75 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
|
||||
# leastsquares verifacation
|
||||
tdSql.error("select leastsquares(ts, 1, 1) from test1")
|
||||
tdSql.error("select leastsquares(col1, 1, 1) from test")
|
||||
tdSql.error("select leastsquares(col2, 1, 1) from test")
|
||||
tdSql.error("select leastsquares(col3, 1, 1) from test")
|
||||
tdSql.error("select leastsquares(col4, 1, 1) from test")
|
||||
tdSql.error("select leastsquares(col5, 1, 1) from test")
|
||||
tdSql.error("select leastsquares(col6, 1, 1) from test")
|
||||
tdSql.error("select leastsquares(col7, 1, 1) from test1")
|
||||
tdSql.error("select leastsquares(col8, 1, 1) from test1")
|
||||
tdSql.error("select leastsquares(col9, 1, 1) from test1")
|
||||
|
||||
tdSql.query("select leastsquares(col1, 1, 1) from test1")
|
||||
tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
|
||||
|
||||
tdSql.query("select leastsquares(col2, 1, 1) from test1")
|
||||
tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
|
||||
|
||||
tdSql.query("select leastsquares(col3, 1, 1) from test1")
|
||||
tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
|
||||
|
||||
tdSql.query("select leastsquares(col4, 1, 1) from test1")
|
||||
tdSql.checkData(0, 0, '{slop:1.000000, intercept:0.000000}')
|
||||
|
||||
tdSql.query("select leastsquares(col5, 1, 1) from test1")
|
||||
tdSql.checkData(0, 0, '{slop:1.000000, intercept:-0.900000}')
|
||||
|
||||
tdSql.query("select leastsquares(col6, 1, 1) from test1")
|
||||
tdSql.checkData(0, 0, '{slop:1.000000, intercept:-0.900000}')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,78 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
intData = []
|
||||
floatData = []
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
|
||||
# max verifacation
|
||||
tdSql.error("select max(ts) from test")
|
||||
tdSql.error("select max(ts) from test1")
|
||||
tdSql.error("select max(col7) from test")
|
||||
tdSql.error("select max(col7) from test1")
|
||||
tdSql.error("select max(col8) from test")
|
||||
tdSql.error("select max(col8) from test1")
|
||||
tdSql.error("select max(col9) from test")
|
||||
tdSql.error("select max(col9) from test1")
|
||||
|
||||
tdSql.query("select max(col1) from test1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
|
||||
tdSql.query("select max(col2) from test1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
|
||||
tdSql.query("select max(col3) from test1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
|
||||
tdSql.query("select max(col4) from test1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
|
||||
tdSql.query("select max(col5) from test1")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
|
||||
tdSql.query("select max(col6) from test1")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,78 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
intData = []
|
||||
floatData = []
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
|
||||
# min verifacation
|
||||
tdSql.error("select min(ts) from test")
|
||||
tdSql.error("select min(ts) from test1")
|
||||
tdSql.error("select min(col7) from test")
|
||||
tdSql.error("select min(col7) from test1")
|
||||
tdSql.error("select min(col8) from test")
|
||||
tdSql.error("select min(col8) from test1")
|
||||
tdSql.error("select min(col9) from test")
|
||||
tdSql.error("select min(col9) from test1")
|
||||
|
||||
tdSql.query("select min(col1) from test1")
|
||||
tdSql.checkData(0, 0, np.min(intData))
|
||||
|
||||
tdSql.query("select min(col2) from test1")
|
||||
tdSql.checkData(0, 0, np.min(intData))
|
||||
|
||||
tdSql.query("select min(col3) from test1")
|
||||
tdSql.checkData(0, 0, np.min(intData))
|
||||
|
||||
tdSql.query("select min(col4) from test1")
|
||||
tdSql.checkData(0, 0, np.min(intData))
|
||||
|
||||
tdSql.query("select min(col5) from test1")
|
||||
tdSql.checkData(0, 0, np.min(floatData))
|
||||
|
||||
tdSql.query("select min(col6) from test1")
|
||||
tdSql.checkData(0, 0, np.min(floatData))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,81 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
|
||||
# min verifacation
|
||||
tdSql.error("select ts + col1 from test")
|
||||
tdSql.error("select ts + col1 from test1")
|
||||
tdSql.error("select col1 + col7 from test")
|
||||
tdSql.error("select col1 + col7 from test1")
|
||||
tdSql.error("select col1 + col8 from test")
|
||||
tdSql.error("select col1 + col8 from test1")
|
||||
tdSql.error("select col1 + col9 from test")
|
||||
tdSql.error("select col1 + col9 from test1")
|
||||
|
||||
tdSql.query("select col1 + col2 from test1")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.checkData(0, 0, 2.0)
|
||||
|
||||
tdSql.query("select col1 + col2 * col3 from test1")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.checkData(1, 0, 6.0)
|
||||
|
||||
tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 from test1")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.checkData(0, 0, 3.2)
|
||||
|
||||
tdSql.execute("insert into test1(ts, col1) values(%d, 11)" % (self.ts + 11))
|
||||
tdSql.query("select col1 + col2 from test1")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkData(10, 0, None)
|
||||
|
||||
tdSql.query("select col1 + col2 * col3 from test1")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkData(10, 0, None)
|
||||
|
||||
tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 from test1")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkData(10, 0, None)
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,140 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
intData = []
|
||||
floatData = []
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20))''')
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
|
||||
# percentile verifacation
|
||||
tdSql.error("select percentile(ts 20) from test")
|
||||
tdSql.error("select apercentile(ts 20) from test")
|
||||
tdSql.error("select percentile(col7 20) from test")
|
||||
tdSql.error("select apercentile(col7 20) from test")
|
||||
tdSql.error("select percentile(col8 20) from test")
|
||||
tdSql.error("select apercentile(col8 20) from test")
|
||||
tdSql.error("select percentile(col9 20) from test")
|
||||
tdSql.error("select apercentile(col9 20) from test")
|
||||
|
||||
tdSql.query("select percentile(col1, 0) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(intData, 0))
|
||||
tdSql.query("select apercentile(col1, 0) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
tdSql.query("select percentile(col1, 50) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(intData, 50))
|
||||
tdSql.query("select apercentile(col1, 50) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
tdSql.query("select percentile(col1, 100) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(intData, 100))
|
||||
tdSql.query("select apercentile(col1, 100) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
|
||||
tdSql.query("select percentile(col2, 0) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(intData, 0))
|
||||
tdSql.query("select apercentile(col2, 0) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
tdSql.query("select percentile(col2, 50) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(intData, 50))
|
||||
tdSql.query("select apercentile(col2, 50) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
tdSql.query("select percentile(col2, 100) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(intData, 100))
|
||||
tdSql.query("select apercentile(col2, 100) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
|
||||
tdSql.query("select percentile(col3, 0) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(intData, 0))
|
||||
tdSql.query("select apercentile(col3, 0) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
tdSql.query("select percentile(col3, 50) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(intData, 50))
|
||||
tdSql.query("select apercentile(col3, 50) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
tdSql.query("select percentile(col3, 100) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(intData, 100))
|
||||
tdSql.query("select apercentile(col3, 100) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
|
||||
tdSql.query("select percentile(col4, 0) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(intData, 0))
|
||||
tdSql.query("select apercentile(col4, 0) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
tdSql.query("select percentile(col4, 50) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(intData, 50))
|
||||
tdSql.query("select apercentile(col4, 50) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
tdSql.query("select percentile(col4, 100) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(intData, 100))
|
||||
tdSql.query("select apercentile(col4, 100) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
|
||||
tdSql.query("select percentile(col5, 0) from test")
|
||||
print("query result: %s" % tdSql.getData(0, 0))
|
||||
print("array result: %s" % np.percentile(floatData, 0))
|
||||
tdSql.query("select apercentile(col5, 0) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
tdSql.query("select percentile(col5, 50) from test")
|
||||
print("query result: %s" % tdSql.getData(0, 0))
|
||||
print("array result: %s" % np.percentile(floatData, 50))
|
||||
tdSql.query("select apercentile(col5, 50) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
tdSql.query("select percentile(col5, 100) from test")
|
||||
print("query result: %s" % tdSql.getData(0, 0))
|
||||
print("array result: %s" % np.percentile(floatData, 100))
|
||||
tdSql.query("select apercentile(col5, 100) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
|
||||
tdSql.query("select percentile(col6, 0) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(floatData, 0))
|
||||
tdSql.query("select apercentile(col6, 0) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
tdSql.query("select percentile(col6, 50) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(floatData, 50))
|
||||
tdSql.query("select apercentile(col6, 50) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
tdSql.query("select percentile(col6, 100) from test")
|
||||
tdSql.checkData(0, 0, np.percentile(floatData, 100))
|
||||
tdSql.query("select apercentile(col6, 100) from test")
|
||||
print("apercentile result: %s" % tdSql.getData(0, 0))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,106 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
tdSql.execute("insert into test1 values(%d, 0, 0, 0, 0, 0.0, 0.0, False, ' ', ' ')" % (self.ts - 1))
|
||||
|
||||
# spread verifacation
|
||||
tdSql.query("select spread(ts) from test1")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.query("select spread(col1) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 0)
|
||||
|
||||
tdSql.query("select spread(col2) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 0)
|
||||
|
||||
tdSql.query("select spread(col3) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 0)
|
||||
|
||||
tdSql.query("select spread(col4) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 0)
|
||||
|
||||
tdSql.query("select spread(col5) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 0)
|
||||
|
||||
tdSql.query("select spread(col6) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 0)
|
||||
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
|
||||
tdSql.error("select spread(col7) from test")
|
||||
tdSql.error("select spread(col7) from test1")
|
||||
tdSql.error("select spread(col8) from test")
|
||||
tdSql.error("select spread(col8) from test1")
|
||||
tdSql.error("select spread(col9) from test")
|
||||
tdSql.error("select spread(col9) from test1")
|
||||
|
||||
tdSql.query("select spread(col1) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.query("select spread(col2) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.query("select spread(col3) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.query("select spread(col4) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
tdSql.query("select spread(col5) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 9.1)
|
||||
|
||||
tdSql.query("select spread(col6) from test1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 9.1)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,80 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
intData = []
|
||||
floatData = []
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
|
||||
# stddev verifacation
|
||||
tdSql.error("select stddev(ts) from test1")
|
||||
tdSql.error("select stddev(col1) from test")
|
||||
tdSql.error("select stddev(col2) from test")
|
||||
tdSql.error("select stddev(col3) from test")
|
||||
tdSql.error("select stddev(col4) from test")
|
||||
tdSql.error("select stddev(col5) from test")
|
||||
tdSql.error("select stddev(col6) from test")
|
||||
tdSql.error("select stddev(col7) from test1")
|
||||
tdSql.error("select stddev(col8) from test1")
|
||||
tdSql.error("select stddev(col9) from test1")
|
||||
|
||||
tdSql.query("select stddev(col1) from test1")
|
||||
tdSql.checkData(0, 0, np.std(intData))
|
||||
|
||||
tdSql.query("select stddev(col2) from test1")
|
||||
tdSql.checkData(0, 0, np.std(intData))
|
||||
|
||||
tdSql.query("select stddev(col3) from test1")
|
||||
tdSql.checkData(0, 0, np.std(intData))
|
||||
|
||||
tdSql.query("select stddev(col4) from test1")
|
||||
tdSql.checkData(0, 0, np.std(intData))
|
||||
|
||||
tdSql.query("select stddev(col5) from test1")
|
||||
tdSql.checkData(0, 0, np.std(floatData))
|
||||
|
||||
tdSql.query("select stddev(col6) from test1")
|
||||
tdSql.checkData(0, 0, np.std(floatData))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,69 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
intData = []
|
||||
floatData = []
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
|
||||
# sum verifacation
|
||||
tdSql.error("select sum(ts) from test")
|
||||
tdSql.error("select sum(col7) from test")
|
||||
tdSql.error("select sum(col8) from test")
|
||||
tdSql.error("select sum(col9) from test")
|
||||
|
||||
tdSql.query("select sum(col1) from test")
|
||||
tdSql.checkData(0, 0, np.sum(intData))
|
||||
tdSql.query("select sum(col2) from test")
|
||||
tdSql.checkData(0, 0, np.sum(intData))
|
||||
tdSql.query("select sum(col3) from test")
|
||||
tdSql.checkData(0, 0, np.sum(intData))
|
||||
tdSql.query("select sum(col4) from test")
|
||||
tdSql.checkData(0, 0, np.sum(intData))
|
||||
tdSql.query("select sum(col5) from test")
|
||||
tdSql.checkData(0, 0, np.sum(floatData))
|
||||
tdSql.query("select sum(col6) from test")
|
||||
tdSql.checkData(0, 0, np.sum(floatData))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,98 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
intData = []
|
||||
floatData = []
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
|
||||
# top verifacation
|
||||
tdSql.error("select top(ts, 10) from test")
|
||||
tdSql.error("select top(col1, 0) from test")
|
||||
tdSql.error("select top(col1, 101) from test")
|
||||
tdSql.error("select top(col2, 0) from test")
|
||||
tdSql.error("select top(col2, 101) from test")
|
||||
tdSql.error("select top(col3, 0) from test")
|
||||
tdSql.error("select top(col3, 101) from test")
|
||||
tdSql.error("select top(col4, 0) from test")
|
||||
tdSql.error("select top(col4, 101) from test")
|
||||
tdSql.error("select top(col5, 0) from test")
|
||||
tdSql.error("select top(col5, 101) from test")
|
||||
tdSql.error("select top(col6, 0) from test")
|
||||
tdSql.error("select top(col6, 101) from test")
|
||||
tdSql.error("select top(col7, 10) from test")
|
||||
tdSql.error("select top(col8, 10) from test")
|
||||
tdSql.error("select top(col9, 10) from test")
|
||||
|
||||
tdSql.query("select top(col1, 2) from test")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 9)
|
||||
tdSql.checkData(1, 1, 10)
|
||||
|
||||
tdSql.query("select top(col2, 2) from test")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 9)
|
||||
tdSql.checkData(1, 1, 10)
|
||||
|
||||
tdSql.query("select top(col3, 2) from test")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 9)
|
||||
tdSql.checkData(1, 1, 10)
|
||||
|
||||
tdSql.query("select top(col4, 2) from test")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 9)
|
||||
tdSql.checkData(1, 1, 10)
|
||||
|
||||
tdSql.query("select top(col5, 2) from test")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 8.1)
|
||||
tdSql.checkData(1, 1, 9.1)
|
||||
|
||||
tdSql.query("select top(col6, 2) from test")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 8.1)
|
||||
tdSql.checkData(1, 1, 9.1)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,135 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
intData = []
|
||||
floatData = []
|
||||
|
||||
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table test1 using test tags('beijing')")
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
|
||||
# twa verifacation
|
||||
tdSql.error("select twa(ts) from test")
|
||||
tdSql.error("select twa(ts) from test1")
|
||||
|
||||
tdSql.error("select twa(col1) from test")
|
||||
tdSql.error("select twa(col1) from test1")
|
||||
|
||||
tdSql.error("select twa(col2) from test")
|
||||
tdSql.error("select twa(col2) from test1")
|
||||
|
||||
tdSql.error("select twa(col3) from test")
|
||||
tdSql.error("select twa(col3) from test1")
|
||||
|
||||
tdSql.error("select twa(col4) from test")
|
||||
tdSql.error("select twa(col4) from test1")
|
||||
|
||||
tdSql.error("select twa(col5) from test")
|
||||
tdSql.error("select twa(col5) from test1")
|
||||
|
||||
tdSql.error("select twa(col6) from test")
|
||||
tdSql.error("select twa(col6) from test1")
|
||||
|
||||
tdSql.error("select twa(col7) from test")
|
||||
tdSql.error("select twa(col7) from test1")
|
||||
|
||||
tdSql.error("select twa(col8) from test")
|
||||
tdSql.error("select twa(col8) from test1")
|
||||
|
||||
tdSql.error("select twa(col9) from test")
|
||||
tdSql.error("select twa(col9) from test1")
|
||||
|
||||
tdSql.error("select twa(col1) from test where ts > %d" % self.ts)
|
||||
tdSql.error("select twa(col1) from test1 where ts > %d" % self.ts)
|
||||
|
||||
tdSql.error("select twa(col2) from test where ts > %d" % self.ts)
|
||||
tdSql.error("select twa(col2) from test1 where ts > %d" % self.ts)
|
||||
|
||||
tdSql.error("select twa(col3) from test where ts > %d" % self.ts)
|
||||
tdSql.error("select twa(col3) from test1 where ts > %d" % self.ts)
|
||||
|
||||
tdSql.error("select twa(col4) from test where ts > %d" % self.ts)
|
||||
tdSql.error("select twa(col4) from test1 where ts > %d" % self.ts)
|
||||
|
||||
tdSql.error("select twa(col5) from test where ts > %d" % self.ts)
|
||||
tdSql.error("select twa(col5) from test1 where ts > %d" % self.ts)
|
||||
|
||||
tdSql.error("select twa(col6) from test where ts > %d" % self.ts)
|
||||
tdSql.error("select twa(col6) from test1 where ts > %d" % self.ts)
|
||||
|
||||
tdSql.error("select twa(col1) from test where ts < %d" % (self.ts + self.rowNum))
|
||||
tdSql.error("select twa(col1) from test1 where ts < %d" % (self.ts + self.rowNum))
|
||||
|
||||
tdSql.error("select twa(col2) from test where ts < %d" % (self.ts + self.rowNum))
|
||||
tdSql.error("select twa(col2) from test1 where ts < %d" % (self.ts + self.rowNum))
|
||||
|
||||
tdSql.error("select twa(col3) from test where ts < %d" % (self.ts + self.rowNum))
|
||||
tdSql.error("select twa(col3) from test1 where ts < %d" % (self.ts + self.rowNum))
|
||||
|
||||
tdSql.error("select twa(col4) from test where ts < %d" % (self.ts + self.rowNum))
|
||||
tdSql.error("select twa(col4) from test1 where ts < %d" % (self.ts + self.rowNum))
|
||||
|
||||
tdSql.error("select twa(col5) from test where ts < %d" % (self.ts + self.rowNum))
|
||||
tdSql.error("select twa(col5) from test1 where ts < %d" % (self.ts + self.rowNum))
|
||||
|
||||
tdSql.error("select twa(col6) from test where ts < %d" % (self.ts + self.rowNum))
|
||||
tdSql.error("select twa(col6) from test1 where ts < %d" % (self.ts + self.rowNum))
|
||||
|
||||
tdSql.query("select twa(col1) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
|
||||
tdSql.query("select twa(col1) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
|
||||
|
||||
tdSql.query("select twa(col2) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
|
||||
tdSql.query("select twa(col2) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
|
||||
|
||||
tdSql.query("select twa(col3) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
|
||||
tdSql.query("select twa(col3) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
|
||||
|
||||
tdSql.query("select twa(col4) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
|
||||
tdSql.query("select twa(col4) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
|
||||
|
||||
tdSql.query("select twa(col5) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
|
||||
tdSql.query("select twa(col5) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
|
||||
|
||||
tdSql.query("select twa(col6) from test where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
|
||||
tdSql.query("select twa(col6) from test1 where ts > %d and ts < %d" % (self.ts, self.ts + self.rowNum))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -231,10 +231,12 @@ class TDTestCase:
|
|||
tdSql.error("select * from st where tagcol1 like '____'")
|
||||
|
||||
# > for nchar type on tag
|
||||
tdSql.error("select * from st where tagcol2 > 'table'")
|
||||
tdSql.query("select * from st where tagcol2 > 'table1'")
|
||||
tdSql.checkRows(5)
|
||||
|
||||
# >= for nchar type on tag
|
||||
tdSql.error("select * from st where tagcol2 >= 'table'")
|
||||
tdSql.query("select * from st where tagcol2 >= 'table1'")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
# = for nchar type on tag
|
||||
tdSql.query("select * from st where tagcol2 = 'table1'")
|
||||
|
@ -249,10 +251,12 @@ class TDTestCase:
|
|||
tdSql.checkRows(10)
|
||||
|
||||
# > for nchar type on tag
|
||||
tdSql.error("select * from st where tagcol2 < 'table'")
|
||||
tdSql.query("select * from st where tagcol2 < 'table'")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
# >= for nchar type on tag
|
||||
tdSql.error("select * from st where tagcol2 <= 'table'")
|
||||
tdSql.query("select * from st where tagcol2 <= 'table'")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
# % for nchar type on tag case 1
|
||||
tdSql.query("select * from st where tagcol2 like '%'")
|
||||
|
@ -291,10 +295,12 @@ class TDTestCase:
|
|||
tdSql.checkRows(10)
|
||||
|
||||
# > for binary type on tag
|
||||
tdSql.error("select * from st where tagcol3 > '表'")
|
||||
tdSql.query("select * from st where tagcol3 > '表'")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
# >= for binary type on tag
|
||||
tdSql.error("select * from st where tagcol3 >= '表'")
|
||||
tdSql.query("select * from st where tagcol3 >= '表'")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
# = for binary type on tag
|
||||
tdSql.query("select * from st where tagcol3 = '水表'")
|
||||
|
@ -309,10 +315,12 @@ class TDTestCase:
|
|||
tdSql.checkRows(5)
|
||||
|
||||
# > for binary type on tag
|
||||
tdSql.error("select * from st where tagcol3 < '水表'")
|
||||
tdSql.query("select * from st where tagcol3 < '水表'")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
# >= for binary type on tag
|
||||
tdSql.error("select * from st where tagcol3 <= '水表'")
|
||||
tdSql.query("select * from st where tagcol3 <= '水表'")
|
||||
tdSql.checkRows(5)
|
||||
|
||||
# % for binary type on tag case 1
|
||||
tdSql.query("select * from st where tagcol3 like '%'")
|
||||
|
|
|
@ -141,6 +141,7 @@ python3 ./test.py -f query/filterCombo.py
|
|||
python3 ./test.py -f query/queryNormal.py
|
||||
python3 ./test.py -f query/select_last_crash.py
|
||||
python3 ./test.py -f query/queryNullValueTest.py
|
||||
python3 ./test.py -f query/queryInsertValue.py
|
||||
|
||||
#stream
|
||||
python3 ./test.py -f stream/stream1.py
|
||||
|
@ -155,3 +156,23 @@ python3 ./test.py -f client/client.py
|
|||
# Misc
|
||||
python3 testCompress.py
|
||||
python3 testNoCompress.py
|
||||
|
||||
|
||||
# functions
|
||||
python3 ./test.py -f functions/function_avg.py
|
||||
python3 ./test.py -f functions/function_bottom.py
|
||||
python3 ./test.py -f functions/function_count.py
|
||||
python3 ./test.py -f functions/function_diff.py
|
||||
python3 ./test.py -f functions/function_first.py
|
||||
python3 ./test.py -f functions/function_last.py
|
||||
python3 ./test.py -f functions/function_last_row.py
|
||||
python3 ./test.py -f functions/function_leastsquares.py
|
||||
python3 ./test.py -f functions/function_max.py
|
||||
python3 ./test.py -f functions/function_min.py
|
||||
python3 ./test.py -f functions/function_operations.py
|
||||
python3 ./test.py -f functions/function_percentile.py
|
||||
python3 ./test.py -f functions/function_spread.py
|
||||
python3 ./test.py -f functions/function_stddev.py
|
||||
python3 ./test.py -f functions/function_sum.py
|
||||
python3 ./test.py -f functions/function_top.py
|
||||
python3 ./test.py -f functions/function_twa.py
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2020 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
import time
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.sub import *
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.conn = conn
|
||||
|
||||
def run(self):
|
||||
sqlstr = "select * from t0"
|
||||
topic = "test"
|
||||
now = int(time.time() * 1000)
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.info("create a table and insert 10 rows.")
|
||||
tdSql.execute("create table t0(ts timestamp, a int, b int);")
|
||||
for i in range(0, 10):
|
||||
tdSql.execute("insert into t0 values (%d, %d, %d);" % (now + i, i, i))
|
||||
|
||||
tdLog.info("consumption 01.")
|
||||
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(10)
|
||||
|
||||
tdLog.info("consumption 02: no new rows inserted")
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(0)
|
||||
|
||||
tdLog.info("consumption 03: after one new rows inserted")
|
||||
tdSql.execute("insert into t0 values (%d, 10, 10);" % (now + 10))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(1)
|
||||
|
||||
tdLog.info("consumption 04: keep progress and continue previous subscription")
|
||||
tdSub.close(True)
|
||||
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(0)
|
||||
|
||||
tdLog.info("consumption 05: remove progress and continue previous subscription")
|
||||
tdSub.close(False)
|
||||
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(11)
|
||||
|
||||
tdLog.info("consumption 06: keep progress and restart the subscription")
|
||||
tdSub.close(True)
|
||||
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(11)
|
||||
|
||||
tdSub.close(True)
|
||||
|
||||
def stop(self):
|
||||
tdSub.close(False)
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,114 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2020 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import taos
|
||||
import time
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.sub import *
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.conn = conn
|
||||
|
||||
def run(self):
|
||||
sqlstr = "select * from meters"
|
||||
topic = "test"
|
||||
now = int(time.time() * 1000)
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.info("create a super table and 10 sub-tables, then insert 5 rows into each sub-table.")
|
||||
tdSql.execute("create table meters(ts timestamp, a int, b int) tags(area int, loc binary(20));")
|
||||
for i in range(0, 10):
|
||||
for j in range(0, 5):
|
||||
tdSql.execute("insert into t%d using meters tags(%d, 'area%d') values (%d, %d, %d);" % (i, i, i, now + j, j, j))
|
||||
|
||||
tdLog.info("consumption 01.")
|
||||
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(50)
|
||||
|
||||
tdLog.info("consumption 02: no new rows inserted")
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(0)
|
||||
|
||||
tdLog.info("consumption 03: after one new rows inserted")
|
||||
tdSql.execute("insert into t0 values (%d, 10, 10);" % (now + 10))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(1)
|
||||
|
||||
tdLog.info("consumption 04: keep progress and continue previous subscription")
|
||||
tdSub.close(True)
|
||||
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(0)
|
||||
|
||||
tdLog.info("consumption 05: remove progress and continue previous subscription")
|
||||
tdSub.close(False)
|
||||
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(51)
|
||||
|
||||
tdLog.info("consumption 06: keep progress and restart the subscription")
|
||||
tdSub.close(True)
|
||||
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(51)
|
||||
|
||||
tdLog.info("consumption 07: insert one row to two table then remove one table")
|
||||
tdSql.execute("insert into t0 values (%d, 11, 11);" % (now + 11))
|
||||
tdSql.execute("insert into t1 values (%d, 11, 11);" % (now + 11))
|
||||
tdSql.execute("drop table t0")
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(1)
|
||||
|
||||
tdLog.info("consumption 08: check timestamp criteria")
|
||||
tdSub.close(False)
|
||||
tdSub.init(self.conn.subscribe(True, topic, sqlstr + " where ts > %d" % now, 0))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(37)
|
||||
|
||||
tdLog.info("consumption 09: insert large timestamp to t2 then insert smaller timestamp to t1")
|
||||
tdSql.execute("insert into t2 values (%d, 100, 100);" % (now + 100))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(1)
|
||||
tdSql.execute("insert into t1 values (%d, 12, 12);" % (now + 12))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(1)
|
||||
|
||||
tdLog.info("consumption 10: field criteria")
|
||||
tdSub.close(True)
|
||||
tdSub.init(self.conn.subscribe(False, topic, sqlstr + " where a > 100", 0))
|
||||
tdSql.execute("insert into t2 values (%d, 101, 100);" % (now + 101))
|
||||
tdSql.execute("insert into t2 values (%d, 100, 100);" % (now + 102))
|
||||
tdSql.execute("insert into t2 values (%d, 102, 100);" % (now + 103))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(2)
|
||||
|
||||
tdLog.info("consumption 11: two vnodes")
|
||||
tdSql.execute("insert into t2 values (%d, 102, 100);" % (now + 104))
|
||||
tdSql.execute("insert into t9 values (%d, 102, 100);" % (now + 104))
|
||||
tdSub.consume()
|
||||
tdSub.checkRows(2)
|
||||
|
||||
def stop(self):
|
||||
tdSub.close(False)
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -96,7 +96,7 @@ if __name__ == "__main__":
|
|||
processID = subprocess.check_output(usePortPID, shell=True)
|
||||
|
||||
if processID:
|
||||
killCmd = "kill -9 %s" % processID
|
||||
killCmd = "kill -TERM %s" % processID
|
||||
os.system(killCmd)
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
EXEC_DIR=`dirname "$0"`
|
||||
if [[ $EXEC_DIR != "." ]]
|
||||
then
|
||||
echo "ERROR: Please execute `basename "$0"` in its own directory (for now anyway, pardon the dust)"
|
||||
exit -1
|
||||
fi
|
||||
CURR_DIR=`pwd`
|
||||
IN_TDINTERNAL="community"
|
||||
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
||||
TAOS_DIR=$CURR_DIR/../../..
|
||||
else
|
||||
TAOS_DIR=$CURR_DIR/../..
|
||||
fi
|
||||
TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1`
|
||||
LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib
|
||||
export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR
|
||||
|
||||
if [[ "$1" == *"test.py"* ]]; then
|
||||
python3 ./test.py $@
|
||||
else
|
||||
python3 $1 $@
|
||||
fi
|
|
@ -0,0 +1,131 @@
|
|||
#!/usr/bin/python
|
||||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
# install pip
|
||||
# pip install src/connector/python/linux/python2/
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
import sys
|
||||
import getopt
|
||||
import subprocess
|
||||
from distutils.log import warn as printf
|
||||
|
||||
from util.log import *
|
||||
from util.dnodes import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
||||
import taos
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fileName = "all"
|
||||
deployPath = ""
|
||||
testCluster = False
|
||||
valgrind = 0
|
||||
logSql = True
|
||||
stop = 0
|
||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'l:sgh', [
|
||||
'logSql', 'stop', 'valgrind', 'help'])
|
||||
for key, value in opts:
|
||||
if key in ['-h', '--help']:
|
||||
tdLog.printNoPrefix(
|
||||
'A collection of test cases written using Python')
|
||||
tdLog.printNoPrefix('-l <True:False> logSql Flag')
|
||||
tdLog.printNoPrefix('-s stop All dnodes')
|
||||
tdLog.printNoPrefix('-g valgrind Test Flag')
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-l', '--logSql']:
|
||||
if (value.upper() == "TRUE"):
|
||||
logSql = True
|
||||
elif (value.upper() == "FALSE"):
|
||||
logSql = False
|
||||
else:
|
||||
tdLog.printNoPrefix("logSql value %s is invalid" % logSql)
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-g', '--valgrind']:
|
||||
valgrind = 1
|
||||
|
||||
if key in ['-s', '--stop']:
|
||||
stop = 1
|
||||
|
||||
if (stop != 0):
|
||||
if (valgrind == 0):
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP > /dev/null 2>&1" % toBeKilled
|
||||
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
||||
while(processID):
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
||||
for port in range(6030, 6041):
|
||||
usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port
|
||||
processID = subprocess.check_output(usePortPID, shell=True)
|
||||
|
||||
if processID:
|
||||
killCmd = "kill -TERM %s" % processID
|
||||
os.system(killCmd)
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
if valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
tdLog.info('stop All dnodes')
|
||||
sys.exit(0)
|
||||
|
||||
tdDnodes.init(deployPath)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
tdDnodes.setValgrind(valgrind)
|
||||
|
||||
tdDnodes.stopAll()
|
||||
tdDnodes.addSimExtraCfg("minTablesPerVnode", "100")
|
||||
tdDnodes.deploy(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
host = '127.0.0.1'
|
||||
|
||||
tdLog.info("Procedures for tdengine deployed in %s" % (host))
|
||||
|
||||
tdCases.logSql(logSql)
|
||||
|
||||
conn = taos.connect(
|
||||
host,
|
||||
config=tdDnodes.getSimCfgPath())
|
||||
|
||||
tdSql.init(conn.cursor(), True)
|
||||
|
||||
tdSql.execute("DROP DATABASE IF EXISTS db")
|
||||
tdSql.execute("CREATE DATABASE IF NOT EXISTS db")
|
||||
tdSql.execute("USE db")
|
||||
|
||||
for i in range(0, 100):
|
||||
tdSql.execute(
|
||||
"CREATE TABLE IF NOT EXISTS tb%d (ts TIMESTAMP, temperature INT, humidity FLOAT)" % i)
|
||||
|
||||
for i in range(1, 6):
|
||||
tdSql.execute("INSERT INTO tb99 values (now + %da, %d, %f)" % (i, i, i * 1.0))
|
||||
|
||||
tdSql.execute("DROP TABLE tb99")
|
||||
tdSql.execute(
|
||||
"CREATE TABLE IF NOT EXISTS tb99 (ts TIMESTAMP, temperature INT, humidity FLOAT)")
|
||||
tdSql.query("SELECT * FROM tb99")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
conn.close()
|
|
@ -0,0 +1,502 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
from util.log import *
|
||||
|
||||
|
||||
class TDSimClient:
|
||||
def __init__(self):
|
||||
self.testCluster = False
|
||||
|
||||
self.cfgDict = {
|
||||
"numOfLogLines": "100000000",
|
||||
"numOfThreadsPerCore": "2.0",
|
||||
"locale": "en_US.UTF-8",
|
||||
"charset": "UTF-8",
|
||||
"asyncLog": "0",
|
||||
"minTablesPerVnode": "4",
|
||||
"maxTablesPerVnode": "1000",
|
||||
"tableIncStepPerVnode": "10000",
|
||||
"maxVgroupsPerDb": "1000",
|
||||
"sdbDebugFlag": "143",
|
||||
"rpcDebugFlag": "135",
|
||||
"tmrDebugFlag": "131",
|
||||
"cDebugFlag": "135",
|
||||
"udebugFlag": "135",
|
||||
"jnidebugFlag": "135",
|
||||
"qdebugFlag": "135",
|
||||
}
|
||||
def init(self, path):
|
||||
self.__init__()
|
||||
self.path = path
|
||||
|
||||
def getLogDir(self):
|
||||
self.logDir = "%s/sim/psim/log" % (self.path)
|
||||
return self.logDir
|
||||
|
||||
def getCfgDir(self):
|
||||
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
|
||||
return self.cfgDir
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def addExtraCfg(self, option, value):
|
||||
self.cfgDict.update({option: value})
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def deploy(self):
|
||||
self.logDir = "%s/sim/psim/log" % (self.path)
|
||||
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
|
||||
self.cfgPath = "%s/sim/psim/cfg/taos.cfg" % (self.path)
|
||||
|
||||
cmd = "rm -rf " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "touch " + self.cfgPath
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
if self.testCluster:
|
||||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("logDir", self.logDir)
|
||||
|
||||
for key, value in self.cfgDict.items():
|
||||
self.cfg(key, value)
|
||||
|
||||
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
||||
|
||||
|
||||
class TDDnode:
|
||||
def __init__(self, index):
|
||||
self.index = index
|
||||
self.running = 0
|
||||
self.deployed = 0
|
||||
self.testCluster = False
|
||||
self.valgrind = 0
|
||||
|
||||
def init(self, path):
|
||||
self.path = path
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def getDataSize(self):
|
||||
totalSize = 0
|
||||
|
||||
if (self.deployed == 1):
|
||||
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
||||
for f in filenames:
|
||||
fp = os.path.join(dirpath, f)
|
||||
|
||||
if not os.path.islink(fp):
|
||||
totalSize = totalSize + os.path.getsize(fp)
|
||||
|
||||
return totalSize
|
||||
|
||||
def deploy(self):
|
||||
self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index)
|
||||
self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index)
|
||||
self.cfgDir = "%s/sim/dnode%d/cfg" % (self.path, self.index)
|
||||
self.cfgPath = "%s/sim/dnode%d/cfg/taos.cfg" % (
|
||||
self.path, self.index)
|
||||
|
||||
cmd = "rm -rf " + self.dataDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "rm -rf " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.dataDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.logDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "mkdir -p " + self.cfgDir
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
cmd = "touch " + self.cfgPath
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
if self.testCluster:
|
||||
self.startIP()
|
||||
|
||||
if self.testCluster:
|
||||
self.cfg("masterIp", "192.168.0.1")
|
||||
self.cfg("secondIp", "192.168.0.2")
|
||||
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("internalIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("privateIp", "192.168.0.%d" % (self.index))
|
||||
self.cfg("dataDir", self.dataDir)
|
||||
self.cfg("logDir", self.logDir)
|
||||
self.cfg("numOfLogLines", "100000000")
|
||||
self.cfg("mnodeEqualVnodeNum", "0")
|
||||
self.cfg("walLevel", "2")
|
||||
self.cfg("fsync", "1000")
|
||||
self.cfg("statusInterval", "1")
|
||||
self.cfg("numOfMnodes", "3")
|
||||
self.cfg("numOfThreadsPerCore", "2.0")
|
||||
self.cfg("monitor", "0")
|
||||
self.cfg("maxVnodeConnections", "30000")
|
||||
self.cfg("maxMgmtConnections", "30000")
|
||||
self.cfg("maxMeterConnections", "30000")
|
||||
self.cfg("maxShellConns", "30000")
|
||||
self.cfg("locale", "en_US.UTF-8")
|
||||
self.cfg("charset", "UTF-8")
|
||||
self.cfg("asyncLog", "0")
|
||||
self.cfg("anyIp", "0")
|
||||
self.cfg("dDebugFlag", "135")
|
||||
self.cfg("mDebugFlag", "135")
|
||||
self.cfg("sdbDebugFlag", "135")
|
||||
self.cfg("rpcDebugFlag", "135")
|
||||
self.cfg("tmrDebugFlag", "131")
|
||||
self.cfg("cDebugFlag", "135")
|
||||
self.cfg("httpDebugFlag", "135")
|
||||
self.cfg("monitorDebugFlag", "135")
|
||||
self.cfg("udebugFlag", "135")
|
||||
self.cfg("jnidebugFlag", "135")
|
||||
self.cfg("qdebugFlag", "135")
|
||||
self.deployed = 1
|
||||
tdLog.debug(
|
||||
"dnode:%d is deployed and configured by %s" %
|
||||
(self.index, self.cfgPath))
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def start(self):
|
||||
buildPath = self.getBuildPath()
|
||||
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
|
||||
binPath = buildPath + "/build/bin/taosd"
|
||||
|
||||
if self.deployed == 0:
|
||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||
|
||||
if self.valgrind == 0:
|
||||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||
|
||||
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||
valgrindCmdline, binPath, self.cfgDir)
|
||||
|
||||
print(cmd)
|
||||
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||
|
||||
tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index))
|
||||
time.sleep(5)
|
||||
|
||||
def stop(self):
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
while(processID):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
for port in range(6030, 6041):
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
|
||||
def forcestop(self):
|
||||
if self.valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
for port in range(6030, 6041):
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
|
||||
|
||||
def startIP(self):
|
||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def stopIP(self):
|
||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
|
||||
self.index, self.index)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def getDnodeRootDir(self, index):
|
||||
dnodeRootDir = "%s/sim/psim/dnode%d" % (self.path, index)
|
||||
return dnodeRootDir
|
||||
|
||||
def getDnodesRootDir(self):
|
||||
dnodesRootDir = "%s/sim/psim" % (self.path)
|
||||
return dnodesRootDir
|
||||
|
||||
|
||||
class TDDnodes:
|
||||
def __init__(self):
|
||||
self.dnodes = []
|
||||
self.dnodes.append(TDDnode(1))
|
||||
self.dnodes.append(TDDnode(2))
|
||||
self.dnodes.append(TDDnode(3))
|
||||
self.dnodes.append(TDDnode(4))
|
||||
self.dnodes.append(TDDnode(5))
|
||||
self.dnodes.append(TDDnode(6))
|
||||
self.dnodes.append(TDDnode(7))
|
||||
self.dnodes.append(TDDnode(8))
|
||||
self.dnodes.append(TDDnode(9))
|
||||
self.dnodes.append(TDDnode(10))
|
||||
self.simDeployed = False
|
||||
|
||||
def init(self, path):
|
||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
binPath = os.path.dirname(os.path.realpath(__file__))
|
||||
binPath = binPath + "/../../../debug/"
|
||||
tdLog.debug("binPath %s" % (binPath))
|
||||
binPath = os.path.realpath(binPath)
|
||||
tdLog.debug("binPath real path %s" % (binPath))
|
||||
|
||||
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
|
||||
# tdLog.debug(cmd)
|
||||
# os.system(cmd)
|
||||
|
||||
# cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
# tdLog.debug("execute %s" % (cmd))
|
||||
|
||||
# cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
# tdLog.debug("execute %s" % (cmd))
|
||||
|
||||
if path == "":
|
||||
# self.path = os.path.expanduser('~')
|
||||
self.path = os.path.abspath(binPath + "../../")
|
||||
else:
|
||||
self.path = os.path.realpath(path)
|
||||
|
||||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].init(self.path)
|
||||
|
||||
self.sim = TDSimClient()
|
||||
self.sim.init(self.path)
|
||||
|
||||
def setTestCluster(self, value):
|
||||
self.testCluster = value
|
||||
|
||||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def deploy(self, index):
|
||||
self.sim.setTestCluster(self.testCluster)
|
||||
|
||||
if (self.simDeployed == False):
|
||||
self.sim.deploy()
|
||||
self.simDeployed = True
|
||||
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].setTestCluster(self.testCluster)
|
||||
self.dnodes[index - 1].setValgrind(self.valgrind)
|
||||
self.dnodes[index - 1].deploy()
|
||||
|
||||
def cfg(self, index, option, value):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].cfg(option, value)
|
||||
|
||||
def start(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].start()
|
||||
|
||||
def stop(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].stop()
|
||||
|
||||
def getDataSize(self, index):
|
||||
self.check(index)
|
||||
return self.dnodes[index - 1].getDataSize()
|
||||
|
||||
def forcestop(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].forcestop()
|
||||
|
||||
def startIP(self, index):
|
||||
self.check(index)
|
||||
|
||||
if self.testCluster:
|
||||
self.dnodes[index - 1].startIP()
|
||||
|
||||
def stopIP(self, index):
|
||||
self.check(index)
|
||||
|
||||
if self.dnodes[index - 1].testCluster:
|
||||
self.dnodes[index - 1].stopIP()
|
||||
|
||||
def check(self, index):
|
||||
if index < 1 or index > 10:
|
||||
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
||||
|
||||
def stopAll(self):
|
||||
tdLog.info("stop all dnodes")
|
||||
for i in range(len(self.dnodes)):
|
||||
self.dnodes[i].stop()
|
||||
|
||||
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
if processID:
|
||||
cmd = "sudo systemctl stop taosd"
|
||||
os.system(cmd)
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
# if os.system(cmd) != 0 :
|
||||
# tdLog.exit(cmd)
|
||||
|
||||
def getDnodesRootDir(self):
|
||||
dnodesRootDir = "%s/sim" % (self.path)
|
||||
return dnodesRootDir
|
||||
|
||||
def getSimCfgPath(self):
|
||||
return self.sim.getCfgDir()
|
||||
|
||||
def getSimLogPath(self):
|
||||
return self.sim.getLogDir()
|
||||
|
||||
def addSimExtraCfg(self, option, value):
|
||||
self.sim.addExtraCfg(option, value)
|
||||
|
||||
|
||||
tdDnodes = TDDnodes()
|
|
@ -349,7 +349,7 @@ class TDDnodes:
|
|||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
|
@ -358,7 +358,7 @@ class TDDnodes:
|
|||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
|
@ -465,7 +465,7 @@ class TDDnodes:
|
|||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
|
@ -474,7 +474,7 @@ class TDDnodes:
|
|||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
|
|
|
@ -349,7 +349,7 @@ class TDDnodes:
|
|||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
|
@ -358,7 +358,7 @@ class TDDnodes:
|
|||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
|
@ -465,7 +465,7 @@ class TDDnodes:
|
|||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
|
@ -474,7 +474,7 @@ class TDDnodes:
|
|||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
|
|
|
@ -351,7 +351,7 @@ class TDDnodes:
|
|||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
|
@ -360,7 +360,7 @@ class TDDnodes:
|
|||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
|
@ -467,7 +467,7 @@ class TDDnodes:
|
|||
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
|
@ -476,7 +476,7 @@ class TDDnodes:
|
|||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||
while(processID):
|
||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
|
|
|
@ -124,6 +124,11 @@ class TDSql:
|
|||
def checkData(self, row, col, data):
|
||||
self.checkRowCol(row, col)
|
||||
if self.queryResult[row][col] != data:
|
||||
if isinstance(data, float) and abs(self.queryResult[row][col] - data) <= 0.000001:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
|
||||
(self.sql, row, col, self.queryResult[row][col], data))
|
||||
return
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
|
@ -137,6 +142,9 @@ class TDSql:
|
|||
elif isinstance(data, datetime.date):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(self.sql, row, col, self.queryResult[row][col], data))
|
||||
elif isinstance(data, float):
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(self.sql, row, col, self.queryResult[row][col], data))
|
||||
else:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
|
||||
(self.sql, row, col, self.queryResult[row][col], data))
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2020 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import datetime
|
||||
from util.log import *
|
||||
|
||||
class TDSub:
|
||||
def __init__(self):
|
||||
self.consumedRows = 0
|
||||
self.consumedCols = 0
|
||||
|
||||
def init(self, sub):
|
||||
self.sub = sub
|
||||
|
||||
def close(self, keepProgress):
|
||||
self.sub.close(keepProgress)
|
||||
|
||||
def consume(self):
|
||||
self.data = self.sub.consume()
|
||||
self.consumedRows = len(self.data)
|
||||
self.consumedCols = len(self.sub.fields)
|
||||
return self.consumedRows
|
||||
|
||||
def checkRows(self, expectRows):
|
||||
if self.consumedRows != expectRows:
|
||||
tdLog.exit("consumed rows:%d != expect:%d" % (self.consumedRows, expectRows))
|
||||
tdLog.info("consumed rows:%d == expect:%d" % (self.consumedRows, expectRows))
|
||||
|
||||
|
||||
tdSub = TDSub()
|
|
@ -135,7 +135,6 @@ run general/parser/set_tag_vals.sim
|
|||
#unsupport run general/parser/repeatAlter.sim
|
||||
#unsupport run general/parser/slimit_alter_tags.sim
|
||||
#unsupport run general/parser/stream_on_sys.sim
|
||||
run general/parser/stream.sim
|
||||
#unsupport run general/parser/repeatStream.sim
|
||||
run general/stable/disk.sim
|
||||
run general/stable/dnode3.sim
|
||||
|
@ -212,12 +211,9 @@ run general/vector/table_mix.sim
|
|||
run general/vector/table_query.sim
|
||||
run general/vector/table_time.sim
|
||||
run general/stream/restart_stream.sim
|
||||
run general/stream/stream_1.sim
|
||||
run general/stream/stream_2.sim
|
||||
run general/stream/stream_3.sim
|
||||
run general/stream/stream_restart.sim
|
||||
run general/stream/table_1.sim
|
||||
run general/stream/metrics_1.sim
|
||||
run general/stream/table_n.sim
|
||||
run general/stream/metrics_n.sim
|
||||
run general/stream/table_del.sim
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 2
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
||||
print ======== step1
|
||||
sql alter dnode 1 resetlog
|
||||
sql alter dnode 1 monitor 1
|
||||
|
||||
sleep 5000
|
||||
sql select * from log.dn
|
||||
if $rows <= 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ======== step2
|
||||
|
||||
sql alter dnode 1 resetquerycache
|
||||
sql alter dnode 1 debugFlag 135
|
||||
sql alter dnode 1 debugFlag 131
|
||||
sql alter dnode 1 monitor 0
|
||||
sql alter dnode 1 debugFlag 135
|
||||
sql alter dnode 1 monitorDebugFlag 135
|
||||
sql alter dnode 1 vDebugFlag 135
|
||||
sql alter dnode 1 mDebugFlag 135
|
||||
sql alter dnode 1 cDebugFlag 135
|
||||
sql alter dnode 1 httpDebugFlag 135
|
||||
sql alter dnode 1 qDebugflag 135
|
||||
sql alter dnode 1 sdbDebugFlag 135
|
||||
sql alter dnode 1 uDebugFlag 135
|
||||
sql alter dnode 1 tsdbDebugFlag 135
|
||||
sql alter dnode 1 sDebugflag 135
|
||||
sql alter dnode 1 rpcDebugFlag 135
|
||||
sql alter dnode 1 dDebugFlag 135
|
||||
sql alter dnode 1 mqttDebugFlag 135
|
||||
sql alter dnode 1 wDebugFlag 135
|
||||
sql alter dnode 1 tmrDebugFlag 135
|
||||
sql_error alter dnode 2 wDebugFlag 135
|
||||
sql_error alter dnode 2 tmrDebugFlag 135
|
||||
|
||||
print ======== step3
|
||||
sql_error alter $hostname1 debugFlag 135
|
||||
sql_error alter $hostname1 monitorDebugFlag 135
|
||||
sql_error alter $hostname1 vDebugFlag 135
|
||||
sql_error alter $hostname1 mDebugFlag 135
|
||||
sql_error alter dnode $hostname2 debugFlag 135
|
||||
sql_error alter dnode $hostname2 monitorDebugFlag 135
|
||||
sql_error alter dnode $hostname2 vDebugFlag 135
|
||||
sql_error alter dnode $hostname2 mDebugFlag 135
|
||||
sql alter dnode $hostname1 debugFlag 135
|
||||
sql alter dnode $hostname1 monitorDebugFlag 135
|
||||
sql alter dnode $hostname1 vDebugFlag 135
|
||||
sql alter dnode $hostname1 tmrDebugFlag 131
|
||||
|
||||
print ======== step4
|
||||
sql_error sql alter dnode 1 balance 0
|
||||
sql_error sql alter dnode 1 balance vnode:1-dnode:1
|
||||
sql_error sql alter dnode 1 balance "vnode:1"
|
||||
sql_error sql alter dnode 1 balance "vnode:1-dnode:1"
|
||||
sql_error sql alter dnode 1 balance "dnode:1-vnode:1"
|
||||
sql_error sql alter dnode 1 balance "dnode:1-"
|
||||
sql_error sql alter dnode 1 balance "vnode:2-dnod"
|
||||
sql alter dnode 1 balance "vnode:2-dnode:1" -x step4
|
||||
step4:
|
||||
|
||||
print ======= over
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -28,7 +28,7 @@ sql create table diskstrm as select count(*), avg(disk_used), last(disk_used), a
|
|||
sql create table bandstrm as select count(*), avg(band_speed), last(band_speed) from log.dn1 interval(4s) sliding(2s)
|
||||
sql create table reqstrm as select count(*), avg(req_http), last(req_http), avg(req_select), last(req_select), avg(req_insert), last(req_insert) from log.dn1 interval(4s) sliding(2s)
|
||||
sql create table iostrm as select count(*), avg(io_read), last(io_read), avg(io_write), last(io_write) from log.dn1 interval(4s) sliding(2s)
|
||||
sleep 20000
|
||||
sleep 120000
|
||||
sql select * from cpustrm
|
||||
if $rows <= 0 then
|
||||
return -1
|
||||
|
|
|
@ -3,6 +3,7 @@ cd ../../../debug; make
|
|||
|
||||
./test.sh -f general/alter/cached_schema_after_alter.sim
|
||||
./test.sh -f general/alter/count.sim
|
||||
./test.sh -f general/alter/dnode.sim
|
||||
./test.sh -f general/alter/import.sim
|
||||
./test.sh -f general/alter/insert1.sim
|
||||
./test.sh -f general/alter/insert2.sim
|
||||
|
@ -248,6 +249,7 @@ cd ../../../debug; make
|
|||
./test.sh -f unique/big/maxvnodes.sim
|
||||
./test.sh -f unique/big/tcp.sim
|
||||
|
||||
./test.sh -f unique/cluster/alter.sim
|
||||
./test.sh -f unique/cluster/balance1.sim
|
||||
./test.sh -f unique/cluster/balance2.sim
|
||||
./test.sh -f unique/cluster/balance3.sim
|
||||
|
@ -308,13 +310,10 @@ cd ../../../debug; make
|
|||
./test.sh -f unique/vnode/replica3_repeat.sim
|
||||
./test.sh -f unique/vnode/replica3_vgroup.sim
|
||||
|
||||
# stream still has bugs
|
||||
#./test.sh -f general/parser/stream_on_sys.sim
|
||||
#./test.sh -f general/parser/repeatStream.sim
|
||||
|
||||
#./test.sh -f general/stream/metrics_del.sim
|
||||
./test.sh -f general/parser/stream_on_sys.sim
|
||||
./test.sh -f general/stream/metrics_del.sim
|
||||
./test.sh -f general/stream/metrics_n.sim
|
||||
#./test.sh -f general/stream/metrics_replica1_vnoden.sim
|
||||
./test.sh -f general/stream/metrics_replica1_vnoden.sim
|
||||
./test.sh -f general/stream/restart_stream.sim
|
||||
./test.sh -f general/stream/stream_3.sim
|
||||
./test.sh -f general/stream/stream_restart.sim
|
||||
|
@ -326,13 +325,12 @@ cd ../../../debug; make
|
|||
./test.sh -f unique/arbitrator/check_cluster_cfg_para.sim
|
||||
#./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_full_dropDnodeFail.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_multiCreateDropTable.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_replica2_wal1_AddDelDnode.sim
|
||||
#./test.sh -f unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
|
||||
#./test.sh -f unique/arbitrator/dn3_mn1_replica2_wal1_AddDelDnode.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_replica_change.sim
|
||||
./test.sh -f unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
|
||||
#./test.sh -f unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
|
||||
# lower the priority while file corruption
|
||||
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_change.sim
|
||||
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim
|
||||
|
|
|
@ -125,7 +125,7 @@ echo "mqttDebugFlag 135" >> $TAOS_CFG
|
|||
echo "qdebugFlag 135" >> $TAOS_CFG
|
||||
echo "rpcDebugFlag 135" >> $TAOS_CFG
|
||||
echo "tmrDebugFlag 131" >> $TAOS_CFG
|
||||
echo "udebugFlag 143" >> $TAOS_CFG
|
||||
echo "udebugFlag 135" >> $TAOS_CFG
|
||||
echo "sdebugFlag 135" >> $TAOS_CFG
|
||||
echo "wdebugFlag 135" >> $TAOS_CFG
|
||||
echo "monitor 0" >> $TAOS_CFG
|
||||
|
|
|
@ -0,0 +1,115 @@
|
|||
#!/bin/bash
|
||||
|
||||
# if [ $# != 4 || $# != 5 ]; then
|
||||
# echo "argument list need input : "
|
||||
# echo " -n nodeName"
|
||||
# echo " -s start/stop"
|
||||
# echo " -c clear"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
NODE_NAME=
|
||||
EXEC_OPTON=
|
||||
CLEAR_OPTION="false"
|
||||
while getopts "n:s:u:x:ct" arg
|
||||
do
|
||||
case $arg in
|
||||
n)
|
||||
NODE_NAME=$OPTARG
|
||||
;;
|
||||
s)
|
||||
EXEC_OPTON=$OPTARG
|
||||
;;
|
||||
c)
|
||||
CLEAR_OPTION="clear"
|
||||
;;
|
||||
t)
|
||||
SHELL_OPTION="true"
|
||||
;;
|
||||
u)
|
||||
USERS=$OPTARG
|
||||
;;
|
||||
x)
|
||||
SIGNAL=$OPTARG
|
||||
;;
|
||||
?)
|
||||
echo "unkown argument"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
SCRIPT_DIR=`dirname $0`
|
||||
cd $SCRIPT_DIR/../
|
||||
SCRIPT_DIR=`pwd`
|
||||
|
||||
IN_TDINTERNAL="community"
|
||||
if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
||||
cd ../../..
|
||||
else
|
||||
cd ../../
|
||||
fi
|
||||
|
||||
TAOS_DIR=`pwd`
|
||||
TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1`
|
||||
|
||||
if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
||||
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3`
|
||||
else
|
||||
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2`
|
||||
fi
|
||||
|
||||
BUILD_DIR=$TAOS_DIR/$BIN_DIR/build
|
||||
|
||||
SIM_DIR=$TAOS_DIR/sim
|
||||
NODE_DIR=$SIM_DIR/$NODE_NAME
|
||||
EXE_DIR=$BUILD_DIR/bin
|
||||
CFG_DIR=$NODE_DIR/cfg
|
||||
LOG_DIR=$NODE_DIR/log
|
||||
DATA_DIR=$NODE_DIR/data
|
||||
MGMT_DIR=$NODE_DIR/data/mgmt
|
||||
TSDB_DIR=$NODE_DIR/data/tsdb
|
||||
|
||||
TAOS_CFG=$NODE_DIR/cfg/taos.cfg
|
||||
|
||||
echo ------------ $EXEC_OPTON $NODE_NAME
|
||||
|
||||
TAOS_FLAG=$SIM_DIR/tsim/flag
|
||||
if [ -f "$TAOS_FLAG" ]; then
|
||||
EXE_DIR=/usr/local/bin/taos
|
||||
fi
|
||||
|
||||
if [ "$CLEAR_OPTION" = "clear" ]; then
|
||||
echo rm -rf $MGMT_DIR $TSDB_DIR
|
||||
rm -rf $TSDB_DIR
|
||||
rm -rf $MGMT_DIR
|
||||
fi
|
||||
|
||||
if [ "$EXEC_OPTON" = "start" ]; then
|
||||
echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR
|
||||
|
||||
if [ "$SHELL_OPTION" = "true" ]; then
|
||||
TT=`date +%s`
|
||||
mkdir ${LOG_DIR}/${TT}
|
||||
nohup valgrind --log-file=${LOG_DIR}/${TT}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
|
||||
else
|
||||
nohup $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
|
||||
fi
|
||||
|
||||
else
|
||||
#relative path
|
||||
RCFG_DIR=sim/$NODE_NAME/cfg
|
||||
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
if [ "$SIGNAL" = "SIGKILL" ]; then
|
||||
echo try to kill by signal SIGKILL
|
||||
kill -9 $PID
|
||||
else
|
||||
echo try to kill by signal SIGINT
|
||||
kill -SIGINT $PID
|
||||
fi
|
||||
sleep 1
|
||||
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
fi
|
||||
|
|
@ -88,7 +88,9 @@ if [ "$EXEC_OPTON" = "start" ]; then
|
|||
echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR
|
||||
|
||||
if [ "$SHELL_OPTION" = "true" ]; then
|
||||
nohup valgrind --log-file=${LOG_DIR}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
|
||||
TT=`date +%s`
|
||||
mkdir ${LOG_DIR}/${TT}
|
||||
nohup valgrind --log-file=${LOG_DIR}/${TT}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
|
||||
else
|
||||
nohup $EXE_DIR/taosd -c $CFG_DIR --random-file-fail-factor 0 > /dev/null 2>&1 &
|
||||
fi
|
||||
|
@ -99,12 +101,12 @@ else
|
|||
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
if [ "$SIGNAL" = "SIGINT" ]; then
|
||||
echo try to kill by signal SIGINT
|
||||
kill -SIGINT $PID
|
||||
else
|
||||
if [ "$SIGNAL" = "SIGKILL" ]; then
|
||||
echo try to kill by signal SIGKILL
|
||||
kill -9 $PID
|
||||
else
|
||||
echo try to kill by signal SIGINT
|
||||
kill -SIGINT $PID
|
||||
fi
|
||||
sleep 1
|
||||
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
|
||||
|
|
|
@ -88,9 +88,12 @@ if [ "$EXEC_OPTON" = "start" ]; then
|
|||
echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR
|
||||
|
||||
if [ "$SHELL_OPTION" = "true" ]; then
|
||||
nohup valgrind --log-file=${LOG_DIR}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
|
||||
TT=`date +%s`
|
||||
mkdir ${LOG_DIR}/${TT}
|
||||
nohup valgrind --log-file=${LOG_DIR}/${TT}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
|
||||
else
|
||||
nohup $EXE_DIR/taosd -c $CFG_DIR --alloc-random-fail --random-file-fail-factor 5 > /dev/null 2>&1 &
|
||||
nohup $EXE_DIR/taosd -c $CFG_DIR --alloc-random-fail \
|
||||
--random-file-fail-factor 5 > /dev/null 2>&1 &
|
||||
fi
|
||||
|
||||
else
|
||||
|
@ -99,12 +102,12 @@ else
|
|||
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
if [ "$SIGNAL" = "SIGINT" ]; then
|
||||
echo try to kill by signal SIGINT
|
||||
kill -SIGINT $PID
|
||||
else
|
||||
if [ "$SIGNAL" = "SIGKILL" ]; then
|
||||
echo try to kill by signal SIGKILL
|
||||
kill -9 $PID
|
||||
else
|
||||
echo try to kill by signal SIGINT
|
||||
kill -SIGINT $PID
|
||||
fi
|
||||
sleep 1
|
||||
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue