other: merge 3.0.
This commit is contained in:
commit
e3d0bc6bf7
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG a11131c
|
||||
GIT_TAG 8c3d57d
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 16eb34f
|
||||
GIT_TAG 0fb640b
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -217,7 +217,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
The parameters listed in this section apply to all function modes.
|
||||
|
||||
- **filetype** : The function to be tested, with optional values `insert`, `query` and `subscribe`. These correspond to the insert, query, and subscribe functions, respectively. Users can specify only one of these in each configuration file.
|
||||
**cfgdir**: specify the TDengine cluster configuration file's directory. The default path is /etc/taos.
|
||||
**cfgdir**: specify the TDengine client configuration file's directory. The default path is /etc/taos.
|
||||
|
||||
- **host**: Specify the FQDN of the TDengine server to connect. The default value is `localhost`.
|
||||
|
||||
|
|
|
@ -735,19 +735,17 @@ To prevent system resource from being exhausted by multiple concurrent streams,
|
|||
| 44 | numOfVnodeQueryThreads | No | Yes |
|
||||
| 45 | numOfVnodeStreamThreads | No | Yes |
|
||||
| 46 | numOfVnodeFetchThreads | No | Yes |
|
||||
| 47 | numOfVnodeWriteThreads | No | Yes |
|
||||
| 48 | numOfVnodeSyncThreads | No | Yes |
|
||||
| 49 | numOfVnodeRsmaThreads | No | Yes |
|
||||
| 50 | numOfQnodeQueryThreads | No | Yes |
|
||||
| 51 | numOfQnodeFetchThreads | No | Yes |
|
||||
| 52 | numOfSnodeSharedThreads | No | Yes |
|
||||
| 53 | numOfSnodeUniqueThreads | No | Yes |
|
||||
| 54 | rpcQueueMemoryAllowed | No | Yes |
|
||||
| 55 | logDir | Yes | Yes |
|
||||
| 56 | minimalLogDirGB | Yes | Yes |
|
||||
| 57 | numOfLogLines | Yes | Yes |
|
||||
| 58 | asyncLog | Yes | Yes |
|
||||
| 59 | logKeepDays | Yes | Yes |
|
||||
| 47 | numOfVnodeRsmaThreads | No | Yes |
|
||||
| 48 | numOfQnodeQueryThreads | No | Yes |
|
||||
| 49 | numOfQnodeFetchThreads | No | Yes |
|
||||
| 50 | numOfSnodeSharedThreads | No | Yes |
|
||||
| 51 | numOfSnodeUniqueThreads | No | Yes |
|
||||
| 52 | rpcQueueMemoryAllowed | No | Yes |
|
||||
| 53 | logDir | Yes | Yes |
|
||||
| 54 | minimalLogDirGB | Yes | Yes |
|
||||
| 55 | numOfLogLines | Yes | Yes |
|
||||
| 56 | asyncLog | Yes | Yes |
|
||||
| 57 | logKeepDays | Yes | Yes |
|
||||
| 60 | debugFlag | Yes | Yes |
|
||||
| 61 | tmrDebugFlag | Yes | Yes |
|
||||
| 62 | uDebugFlag | Yes | Yes |
|
||||
|
|
|
@ -9,9 +9,10 @@ namespace Examples
|
|||
{
|
||||
string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
|
||||
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
|
||||
|
||||
if (wsConn == IntPtr.Zero)
|
||||
{
|
||||
throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
|
||||
throw new Exception("get WS connection failed");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -13,7 +13,7 @@ namespace Examples
|
|||
// Assert if connection is validate
|
||||
if (wsConn == IntPtr.Zero)
|
||||
{
|
||||
throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
|
||||
throw new Exception("get WS connection failed");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -13,7 +13,7 @@ namespace Examples
|
|||
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
|
||||
if (wsConn == IntPtr.Zero)
|
||||
{
|
||||
throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
|
||||
throw new Exception("get WS connection failed");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -21,7 +21,7 @@ namespace Examples
|
|||
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
|
||||
if (wsConn == IntPtr.Zero)
|
||||
{
|
||||
throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
|
||||
throw new Exception($"get WS connection failed");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -190,3 +190,16 @@ DROP DNODE dnodeId;
|
|||
dnodeID 是集群自动分配的,不得人工指定。它在生成时是递增的,不会重复。
|
||||
|
||||
:::
|
||||
|
||||
## 常见问题
|
||||
|
||||
1、建立集群时使用 CREATE DNODE 增加新节点后,新节点始终显示 offline 状态?
|
||||
```sql
|
||||
1)首先要检查增加的新节点上的 taosd 服务是否已经正常启动
|
||||
|
||||
2)如果已经启动,再检查到新节点的网络是否通畅,可以使用 ping fqdn 验证下
|
||||
|
||||
3)如果前面两步都没有问题,这一步要检查新节点做为独立集群在运行了,可以使用 taos -h fqdn 连接上后,show dnodes; 命令查看.
|
||||
如果显示的列表与你主节点上显示的不一致,说明此节点自己单独成立了一个集群,解决的方法是停止新节点上的服务,然后清空新节点上
|
||||
taos.cfg 中配置的 dataDir 目录下的所有文件,重新启动新节点服务即可解决。
|
||||
```
|
||||
|
|
|
@ -217,7 +217,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
本节所列参数适用于所有功能模式。
|
||||
|
||||
- **filetype** : 要测试的功能,可选值为 `insert`, `query` 和 `subscribe`。分别对应插入、查询和订阅功能。每个配置文件中只能指定其中之一。
|
||||
- **cfgdir** : TDengine 集群配置文件所在的目录,默认路径是 /etc/taos 。
|
||||
- **cfgdir** : TDengine 客户端配置文件所在的目录,默认路径是 /etc/taos 。
|
||||
|
||||
- **host** : 指定要连接的 TDengine 服务端的 FQDN,默认值为 localhost。
|
||||
|
||||
|
|
|
@ -711,19 +711,17 @@ charset 的有效值是 UTF-8。
|
|||
| 44 | numOfVnodeQueryThreads | 否 | 是 | |
|
||||
| 45 | numOfVnodeStreamThreads | 否 | 是 | |
|
||||
| 46 | numOfVnodeFetchThreads | 否 | 是 | |
|
||||
| 47 | numOfVnodeWriteThreads | 否 | 是 | |
|
||||
| 48 | numOfVnodeSyncThreads | 否 | 是 | |
|
||||
| 49 | numOfVnodeRsmaThreads | 否 | 是 | |
|
||||
| 50 | numOfQnodeQueryThreads | 否 | 是 | |
|
||||
| 51 | numOfQnodeFetchThreads | 否 | 是 | |
|
||||
| 52 | numOfSnodeSharedThreads | 否 | 是 | |
|
||||
| 53 | numOfSnodeUniqueThreads | 否 | 是 | |
|
||||
| 54 | rpcQueueMemoryAllowed | 否 | 是 | |
|
||||
| 55 | logDir | 是 | 是 | |
|
||||
| 56 | minimalLogDirGB | 是 | 是 | |
|
||||
| 57 | numOfLogLines | 是 | 是 | |
|
||||
| 58 | asyncLog | 是 | 是 | |
|
||||
| 59 | logKeepDays | 是 | 是 | |
|
||||
| 47 | numOfVnodeRsmaThreads | 否 | 是 | |
|
||||
| 48 | numOfQnodeQueryThreads | 否 | 是 | |
|
||||
| 49 | numOfQnodeFetchThreads | 否 | 是 | |
|
||||
| 50 | numOfSnodeSharedThreads | 否 | 是 | |
|
||||
| 51 | numOfSnodeUniqueThreads | 否 | 是 | |
|
||||
| 52 | rpcQueueMemoryAllowed | 否 | 是 | |
|
||||
| 53 | logDir | 是 | 是 | |
|
||||
| 54 | minimalLogDirGB | 是 | 是 | |
|
||||
| 55 | numOfLogLines | 是 | 是 | |
|
||||
| 56 | asyncLog | 是 | 是 | |
|
||||
| 57 | logKeepDays | 是 | 是 | |
|
||||
| 60 | debugFlag | 是 | 是 | |
|
||||
| 61 | tmrDebugFlag | 是 | 是 | |
|
||||
| 62 | uDebugFlag | 是 | 是 | |
|
||||
|
|
|
@ -55,8 +55,6 @@ extern int32_t tsNumOfMnodeReadThreads;
|
|||
extern int32_t tsNumOfVnodeQueryThreads;
|
||||
extern int32_t tsNumOfVnodeStreamThreads;
|
||||
extern int32_t tsNumOfVnodeFetchThreads;
|
||||
extern int32_t tsNumOfVnodeWriteThreads;
|
||||
extern int32_t tsNumOfVnodeSyncThreads;
|
||||
extern int32_t tsNumOfVnodeRsmaThreads;
|
||||
extern int32_t tsNumOfQnodeQueryThreads;
|
||||
extern int32_t tsNumOfQnodeFetchThreads;
|
||||
|
|
|
@ -216,9 +216,14 @@ typedef struct SEp {
|
|||
uint16_t port;
|
||||
} SEp;
|
||||
|
||||
#define SHOW_REWRITE_MASK() (1 << 0)
|
||||
|
||||
#define TEST_SHOW_REWRITE_MASK(m) (((m) & SHOW_REWRITE_MASK()) != 0)
|
||||
|
||||
typedef struct {
|
||||
int32_t contLen;
|
||||
int32_t vgId;
|
||||
int32_t msgMask;
|
||||
} SMsgHead;
|
||||
|
||||
// Submit message for one table
|
||||
|
|
|
@ -244,16 +244,6 @@ void syslog(int unused, const char *format, ...);
|
|||
#define TD_CHARSET_LEN 64
|
||||
#define TD_TIMEZONE_LEN 96
|
||||
|
||||
#ifdef WINDOWS
|
||||
#define TD_PATH_MAX 260
|
||||
#elif defined(PATH_MAX)
|
||||
#define TD_PATH_MAX PATH_MAX
|
||||
#elif defined(_XOPEN_PATH_MAX)
|
||||
#define TD_PATH_MAX _XOPEN_PATH_MAX
|
||||
#else
|
||||
#define TD_PATH_MAX _POSIX_PATH_MAX
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -42,6 +42,16 @@ extern "C" {
|
|||
#define PATH_MAX 256
|
||||
#endif
|
||||
|
||||
#ifdef WINDOWS
|
||||
#define TD_PATH_MAX _MAX_PATH
|
||||
#elif defined(PATH_MAX)
|
||||
#define TD_PATH_MAX PATH_MAX
|
||||
#elif defined(_XOPEN_PATH_MAX)
|
||||
#define TD_PATH_MAX _XOPEN_PATH_MAX
|
||||
#else
|
||||
#define TD_PATH_MAX _POSIX_PATH_MAX
|
||||
#endif
|
||||
|
||||
typedef struct TdFile *TdFilePtr;
|
||||
|
||||
#define TD_FILE_CREATE 0x0001
|
||||
|
|
|
@ -59,6 +59,47 @@ typedef enum {
|
|||
typedef void (*FItem)(SQueueInfo *pInfo, void *pItem);
|
||||
typedef void (*FItems)(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfItems);
|
||||
|
||||
typedef struct STaosQnode STaosQnode;
|
||||
|
||||
typedef struct STaosQnode {
|
||||
STaosQnode *next;
|
||||
STaosQueue *queue;
|
||||
int64_t timestamp;
|
||||
int32_t size;
|
||||
int8_t itype;
|
||||
int8_t reserved[3];
|
||||
char item[];
|
||||
} STaosQnode;
|
||||
|
||||
typedef struct STaosQueue {
|
||||
STaosQnode *head;
|
||||
STaosQnode *tail;
|
||||
STaosQueue *next; // for queue set
|
||||
STaosQset *qset; // for queue set
|
||||
void *ahandle; // for queue set
|
||||
FItem itemFp;
|
||||
FItems itemsFp;
|
||||
TdThreadMutex mutex;
|
||||
int64_t memOfItems;
|
||||
int32_t numOfItems;
|
||||
int64_t threadId;
|
||||
} STaosQueue;
|
||||
|
||||
typedef struct STaosQset {
|
||||
STaosQueue *head;
|
||||
STaosQueue *current;
|
||||
TdThreadMutex mutex;
|
||||
tsem_t sem;
|
||||
int32_t numOfQueues;
|
||||
int32_t numOfItems;
|
||||
} STaosQset;
|
||||
|
||||
typedef struct STaosQall {
|
||||
STaosQnode *current;
|
||||
STaosQnode *start;
|
||||
int32_t numOfItems;
|
||||
} STaosQall;
|
||||
|
||||
STaosQueue *taosOpenQueue();
|
||||
void taosCloseQueue(STaosQueue *queue);
|
||||
void taosSetQueueFp(STaosQueue *queue, FItem itemFp, FItems itemsFp);
|
||||
|
|
|
@ -26,8 +26,9 @@ typedef struct SQWorkerPool SQWorkerPool;
|
|||
typedef struct SWWorkerPool SWWorkerPool;
|
||||
|
||||
typedef struct SQWorker {
|
||||
int32_t id; // worker ID
|
||||
TdThread thread; // thread
|
||||
int32_t id; // worker id
|
||||
int64_t pid; // thread pid
|
||||
TdThread thread; // thread id
|
||||
SQWorkerPool *pool;
|
||||
} SQWorker;
|
||||
|
||||
|
@ -43,9 +44,10 @@ typedef struct SQWorkerPool {
|
|||
|
||||
typedef struct SWWorker {
|
||||
int32_t id; // worker id
|
||||
TdThread thread; // thread
|
||||
int64_t pid; // thread pid
|
||||
TdThread thread; // thread id
|
||||
STaosQall *qall;
|
||||
STaosQset *qset; // queue set
|
||||
STaosQset *qset;
|
||||
SWWorkerPool *pool;
|
||||
} SWWorker;
|
||||
|
||||
|
|
|
@ -67,6 +67,7 @@ fi
|
|||
}
|
||||
|
||||
|
||||
|
||||
function wgetFile {
|
||||
|
||||
file=$1
|
||||
|
@ -75,7 +76,10 @@ if [ ! -f ${file} ];then
|
|||
echoColor BD "wget https://www.taosdata.com/assets-download/3.0/${file}"
|
||||
wget https://www.taosdata.com/assets-download/3.0/${file}
|
||||
else
|
||||
echoColor YD "${file} already exists "
|
||||
echoColor YD "${file} already exists and use new file "
|
||||
rm -rf ${file}
|
||||
echoColor BD "wget https://www.taosdata.com/assets-download/3.0/${file}"
|
||||
wget https://www.taosdata.com/assets-download/3.0/${file}
|
||||
fi
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@ if exist %binary_dir%\\test\\cfg\\taosadapter.toml (
|
|||
copy %binary_dir%\\test\\cfg\\taosadapter.toml %tagert_dir%\\cfg\\taosadapter.toml > nul
|
||||
)
|
||||
)
|
||||
|
||||
copy %source_dir%\\include\\client\\taos.h %tagert_dir%\\include > nul
|
||||
copy %source_dir%\\include\\util\\taoserror.h %tagert_dir%\\include > nul
|
||||
copy %source_dir%\\include\\libs\\function\\taosudf.h %tagert_dir%\\include > nul
|
||||
|
@ -52,6 +51,7 @@ if exist %binary_dir%\\build\\lib\\taosws.dll.lib (
|
|||
)
|
||||
if exist %binary_dir%\\build\\lib\\taosws.dll (
|
||||
copy %binary_dir%\\build\\lib\\taosws.dll %tagert_dir%\\driver > nul
|
||||
copy %source_dir%\\tools\\taosws-rs\\target\\release\\taosws.h %tagert_dir%\\include > nul
|
||||
)
|
||||
if exist %binary_dir%\\build\\bin\\taosdump.exe (
|
||||
copy %binary_dir%\\build\\bin\\taosdump.exe %tagert_dir% > nul
|
||||
|
@ -67,3 +67,4 @@ if exist C:\\TDengine\\driver\\taosws.dll (
|
|||
copy /y C:\\TDengine\\driver\\taosws.dll C:\\Windows\\System32 > nul
|
||||
)
|
||||
sc query "taosd" >nul || sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND
|
||||
sc query "taosadapter" >nul || sc create "taosadapter" binPath= "C:\\TDengine\\taosadapter.exe" start= DEMAND
|
||||
|
|
|
@ -15,10 +15,10 @@
|
|||
#define MyAppExeName "\*.exe"
|
||||
#define MyAppTaosExeName "\taos.bat"
|
||||
#define MyAppTaosdemoExeName "\taosBenchmark.exe"
|
||||
#define MyAppDLLName "\driver\taos.dll"
|
||||
#define MyAppDLLName "\driver\*.dll"
|
||||
;#define MyAppVersion "3.0"
|
||||
;#define MyAppInstallName "TDengine"
|
||||
|
||||
;#define MyAppInstallName "TDengine"
|
||||
[Setup]
|
||||
VersionInfoVersion={#MyAppVersion}
|
||||
AppId={{A0F7A93C-79C4-485D-B2B8-F0D03DF42FAB}
|
||||
|
|
|
@ -50,8 +50,6 @@ int32_t tsNumOfMnodeReadThreads = 1;
|
|||
int32_t tsNumOfVnodeQueryThreads = 4;
|
||||
int32_t tsNumOfVnodeStreamThreads = 2;
|
||||
int32_t tsNumOfVnodeFetchThreads = 4;
|
||||
int32_t tsNumOfVnodeWriteThreads = 2;
|
||||
int32_t tsNumOfVnodeSyncThreads = 2;
|
||||
int32_t tsNumOfVnodeRsmaThreads = 2;
|
||||
int32_t tsNumOfQnodeQueryThreads = 4;
|
||||
int32_t tsNumOfQnodeFetchThreads = 1;
|
||||
|
@ -374,14 +372,6 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 4, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeWriteThreads = tsNumOfCores;
|
||||
tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeWriteThreads", tsNumOfVnodeWriteThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeSyncThreads = tsNumOfCores * 2;
|
||||
tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeSyncThreads", tsNumOfVnodeSyncThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeRsmaThreads = tsNumOfCores;
|
||||
tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeRsmaThreads", tsNumOfVnodeRsmaThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
@ -506,22 +496,6 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) {
|
|||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfVnodeWriteThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfVnodeWriteThreads = numOfCores;
|
||||
tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
|
||||
pItem->i32 = tsNumOfVnodeWriteThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfVnodeSyncThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfVnodeSyncThreads = numOfCores * 2;
|
||||
tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
|
||||
pItem->i32 = tsNumOfVnodeSyncThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfVnodeRsmaThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfVnodeRsmaThreads = numOfCores;
|
||||
|
@ -699,8 +673,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32;
|
||||
tsNumOfVnodeStreamThreads = cfgGetItem(pCfg, "numOfVnodeStreamThreads")->i32;
|
||||
tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32;
|
||||
tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32;
|
||||
tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32;
|
||||
tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
|
||||
tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32;
|
||||
// tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
|
||||
|
@ -786,6 +758,9 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
case 'd': {
|
||||
if (strcasecmp("dDebugFlag", name) == 0) {
|
||||
dDebugFlag = cfgGetItem(pCfg, "dDebugFlag")->i32;
|
||||
} else if (strcasecmp("debugFlag", name) == 0) {
|
||||
int32_t flag = cfgGetItem(pCfg, "debugFlag")->i32;
|
||||
taosSetAllDebugFlag(flag, true);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -943,10 +918,6 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
} else if (strcasecmp("numOfVnodeFetchThreads", name) == 0) {
|
||||
tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32;
|
||||
*/
|
||||
} else if (strcasecmp("numOfVnodeWriteThreads", name) == 0) {
|
||||
tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32;
|
||||
} else if (strcasecmp("numOfVnodeSyncThreads", name) == 0) {
|
||||
tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32;
|
||||
} else if (strcasecmp("numOfVnodeRsmaThreads", name) == 0) {
|
||||
tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
|
||||
} else if (strcasecmp("numOfQnodeQueryThreads", name) == 0) {
|
||||
|
|
|
@ -140,6 +140,9 @@ int32_t smPutMsgToQueue(SSnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
|||
if (pSnode == NULL) {
|
||||
dError("snode: msg:%p failed to put into vnode queue since %s, type:%s qtype:%d", pMsg, terrstr(),
|
||||
TMSG_INFO(pMsg->msgType), qtype);
|
||||
taosFreeQitem(pMsg);
|
||||
rpcFreeCont(pRpc->pCont);
|
||||
pRpc->pCont = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,10 +33,6 @@ typedef struct SVnodeMgmt {
|
|||
SQWorkerPool queryPool;
|
||||
SQWorkerPool streamPool;
|
||||
SWWorkerPool fetchPool;
|
||||
SWWorkerPool syncPool;
|
||||
SWWorkerPool syncCtrlPool;
|
||||
SWWorkerPool writePool;
|
||||
SWWorkerPool applyPool;
|
||||
SSingleWorker mgmtWorker;
|
||||
SHashObj *hash;
|
||||
TdThreadRwlock lock;
|
||||
|
@ -58,10 +54,10 @@ typedef struct {
|
|||
int8_t dropped;
|
||||
char *path;
|
||||
SVnode *pImpl;
|
||||
STaosQueue *pWriteQ;
|
||||
STaosQueue *pSyncQ;
|
||||
STaosQueue *pSyncCtrlQ;
|
||||
STaosQueue *pApplyQ;
|
||||
SMultiWorker pWriteW;
|
||||
SMultiWorker pSyncW;
|
||||
SMultiWorker pSyncCtrlW;
|
||||
SMultiWorker pApplyW;
|
||||
STaosQueue *pQueryQ;
|
||||
STaosQueue *pStreamQ;
|
||||
STaosQueue *pFetchQ;
|
||||
|
|
|
@ -79,29 +79,49 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
|
|||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
|
||||
vnodePreClose(pVnode->pImpl);
|
||||
|
||||
taosThreadRwlockWrlock(&pMgmt->lock);
|
||||
taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t));
|
||||
taosThreadRwlockUnlock(&pMgmt->lock);
|
||||
vmReleaseVnode(pMgmt, pVnode);
|
||||
|
||||
dTrace("vgId:%d, wait for vnode ref become 0", pVnode->vgId);
|
||||
while (pVnode->refCount > 0) taosMsleep(10);
|
||||
dTrace("vgId:%d, wait for vnode queue is empty", pVnode->vgId);
|
||||
dInfo("vgId:%d, pre close", pVnode->vgId);
|
||||
vnodePreClose(pVnode->pImpl);
|
||||
|
||||
while (!taosQueueEmpty(pVnode->pWriteQ)) taosMsleep(10);
|
||||
while (!taosQueueEmpty(pVnode->pSyncQ)) taosMsleep(10);
|
||||
while (!taosQueueEmpty(pVnode->pApplyQ)) taosMsleep(10);
|
||||
dInfo("vgId:%d, wait for vnode ref become 0", pVnode->vgId);
|
||||
while (pVnode->refCount > 0) taosMsleep(10);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode write queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pWriteW.queue,
|
||||
pVnode->pWriteW.queue->threadId);
|
||||
tMultiWorkerCleanup(&pVnode->pWriteW);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode sync queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncW.queue,
|
||||
pVnode->pSyncW.queue->threadId);
|
||||
tMultiWorkerCleanup(&pVnode->pSyncW);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode sync ctrl queue:%p is empty, thread:%08" PRId64, pVnode->vgId,
|
||||
pVnode->pSyncCtrlW.queue, pVnode->pSyncCtrlW.queue->threadId);
|
||||
tMultiWorkerCleanup(&pVnode->pSyncCtrlW);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode apply queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pApplyW.queue,
|
||||
pVnode->pApplyW.queue->threadId);
|
||||
tMultiWorkerCleanup(&pVnode->pApplyW);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode query queue:%p is empty", pVnode->vgId, pVnode->pQueryQ);
|
||||
while (!taosQueueEmpty(pVnode->pQueryQ)) taosMsleep(10);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode fetch queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
|
||||
pVnode->pFetchQ->threadId);
|
||||
while (!taosQueueEmpty(pVnode->pFetchQ)) taosMsleep(10);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode stream queue:%p is empty", pVnode->vgId, pVnode->pStreamQ);
|
||||
while (!taosQueueEmpty(pVnode->pStreamQ)) taosMsleep(10);
|
||||
dTrace("vgId:%d, vnode queue is empty", pVnode->vgId);
|
||||
|
||||
dInfo("vgId:%d, all vnode queues is empty", pVnode->vgId);
|
||||
|
||||
vmFreeQueue(pMgmt, pVnode);
|
||||
vnodeClose(pVnode->pImpl);
|
||||
pVnode->pImpl = NULL;
|
||||
dDebug("vgId:%d, vnode is closed", pVnode->vgId);
|
||||
dInfo("vgId:%d, vnode is closed", pVnode->vgId);
|
||||
|
||||
if (pVnode->dropped) {
|
||||
dInfo("vgId:%d, vnode is destroyed, dropped:%d", pVnode->vgId, pVnode->dropped);
|
||||
|
|
|
@ -145,6 +145,7 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
|
|||
|
||||
pHead->contLen = ntohl(pHead->contLen);
|
||||
pHead->vgId = ntohl(pHead->vgId);
|
||||
pHead->msgMask = ntohl(pHead->msgMask);
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId);
|
||||
if (pVnode == NULL) {
|
||||
|
@ -155,7 +156,7 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
|
|||
|
||||
switch (qtype) {
|
||||
case QUERY_QUEUE:
|
||||
if ((pMsg->msgType == TDMT_SCH_QUERY) && (grantCheck(TSDB_GRANT_TIME) != TSDB_CODE_SUCCESS)) {
|
||||
if ((pMsg->msgType == TDMT_SCH_QUERY) && (grantCheck(TSDB_GRANT_TIME) != TSDB_CODE_SUCCESS) && !TEST_SHOW_REWRITE_MASK(pHead->msgMask)) {
|
||||
terrno = TSDB_CODE_GRANT_EXPIRED;
|
||||
code = terrno;
|
||||
dDebug("vgId:%d, msg:%p put into vnode-query queue failed since %s", pVnode->vgId, pMsg, terrstr(code));
|
||||
|
@ -188,30 +189,20 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
|
|||
dDebug("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, terrstr(code));
|
||||
} else {
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-write queue", pVnode->vgId, pMsg);
|
||||
taosWriteQitem(pVnode->pWriteQ, pMsg);
|
||||
#if 0 // tests for batch writes
|
||||
if (pMsg->msgType == TDMT_VND_CREATE_TABLE) {
|
||||
SRpcMsg *pDup = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM);
|
||||
memcpy(pDup, pMsg, sizeof(SRpcMsg));
|
||||
pDup->pCont = rpcMallocCont(pMsg->contLen);
|
||||
memcpy(pDup->pCont, pMsg->pCont, pMsg->contLen);
|
||||
pDup->info.handle = NULL;
|
||||
taosWriteQitem(pVnode->pWriteQ, pDup);
|
||||
}
|
||||
#endif
|
||||
taosWriteQitem(pVnode->pWriteW.queue, pMsg);
|
||||
}
|
||||
break;
|
||||
case SYNC_QUEUE:
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-sync queue", pVnode->vgId, pMsg);
|
||||
taosWriteQitem(pVnode->pSyncQ, pMsg);
|
||||
taosWriteQitem(pVnode->pSyncW.queue, pMsg);
|
||||
break;
|
||||
case SYNC_CTRL_QUEUE:
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-sync-ctrl queue", pVnode->vgId, pMsg);
|
||||
taosWriteQitem(pVnode->pSyncCtrlQ, pMsg);
|
||||
taosWriteQitem(pVnode->pSyncCtrlW.queue, pMsg);
|
||||
break;
|
||||
case APPLY_QUEUE:
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-apply queue", pVnode->vgId, pMsg);
|
||||
taosWriteQitem(pVnode->pApplyQ, pMsg);
|
||||
taosWriteQitem(pVnode->pApplyW.queue, pMsg);
|
||||
break;
|
||||
default:
|
||||
code = -1;
|
||||
|
@ -276,13 +267,13 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
|
|||
if (pVnode != NULL) {
|
||||
switch (qtype) {
|
||||
case WRITE_QUEUE:
|
||||
size = taosQueueItemSize(pVnode->pWriteQ);
|
||||
size = taosQueueItemSize(pVnode->pWriteW.queue);
|
||||
break;
|
||||
case SYNC_QUEUE:
|
||||
size = taosQueueItemSize(pVnode->pSyncQ);
|
||||
size = taosQueueItemSize(pVnode->pSyncW.queue);
|
||||
break;
|
||||
case APPLY_QUEUE:
|
||||
size = taosQueueItemSize(pVnode->pApplyQ);
|
||||
size = taosQueueItemSize(pVnode->pApplyW.queue);
|
||||
break;
|
||||
case QUERY_QUEUE:
|
||||
size = taosQueueItemSize(pVnode->pQueryQ);
|
||||
|
@ -306,40 +297,44 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
|
|||
}
|
||||
|
||||
int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
||||
pVnode->pWriteQ = tWWorkerAllocQueue(&pMgmt->writePool, pVnode->pImpl, (FItems)vnodeProposeWriteMsg);
|
||||
pVnode->pSyncQ = tWWorkerAllocQueue(&pMgmt->syncPool, pVnode, (FItems)vmProcessSyncQueue);
|
||||
pVnode->pSyncCtrlQ = tWWorkerAllocQueue(&pMgmt->syncCtrlPool, pVnode, (FItems)vmProcessSyncQueue);
|
||||
pVnode->pApplyQ = tWWorkerAllocQueue(&pMgmt->applyPool, pVnode->pImpl, (FItems)vnodeApplyWriteMsg);
|
||||
SMultiWorkerCfg wcfg = {.max = 1, .name = "vnode-write", .fp = (FItems)vnodeProposeWriteMsg, .param = pVnode->pImpl};
|
||||
SMultiWorkerCfg scfg = {.max = 1, .name = "vnode-sync", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
|
||||
SMultiWorkerCfg sccfg = {.max = 1, .name = "vnode-sync-ctrl", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
|
||||
SMultiWorkerCfg acfg = {.max = 1, .name = "vnode-apply", .fp = (FItems)vnodeApplyWriteMsg, .param = pVnode->pImpl};
|
||||
(void)tMultiWorkerInit(&pVnode->pWriteW, &wcfg);
|
||||
(void)tMultiWorkerInit(&pVnode->pSyncW, &scfg);
|
||||
(void)tMultiWorkerInit(&pVnode->pSyncCtrlW, &sccfg);
|
||||
(void)tMultiWorkerInit(&pVnode->pApplyW, &acfg);
|
||||
|
||||
pVnode->pQueryQ = tQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue);
|
||||
pVnode->pStreamQ = tQWorkerAllocQueue(&pMgmt->streamPool, pVnode, (FItem)vmProcessStreamQueue);
|
||||
pVnode->pFetchQ = tWWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItems)vmProcessFetchQueue);
|
||||
|
||||
if (pVnode->pWriteQ == NULL || pVnode->pSyncQ == NULL || pVnode->pApplyQ == NULL || pVnode->pQueryQ == NULL ||
|
||||
pVnode->pStreamQ == NULL || pVnode->pFetchQ == NULL) {
|
||||
if (pVnode->pWriteW.queue == NULL || pVnode->pSyncW.queue == NULL || pVnode->pSyncCtrlW.queue == NULL ||
|
||||
pVnode->pApplyW.queue == NULL || pVnode->pQueryQ == NULL || pVnode->pStreamQ == NULL || pVnode->pFetchQ == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
dDebug("vgId:%d, write-queue:%p is alloced", pVnode->vgId, pVnode->pWriteQ);
|
||||
dDebug("vgId:%d, sync-queue:%p is alloced", pVnode->vgId, pVnode->pSyncQ);
|
||||
dDebug("vgId:%d, apply-queue:%p is alloced", pVnode->vgId, pVnode->pApplyQ);
|
||||
dDebug("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ);
|
||||
dDebug("vgId:%d, stream-queue:%p is alloced", pVnode->vgId, pVnode->pStreamQ);
|
||||
dDebug("vgId:%d, fetch-queue:%p is alloced", pVnode->vgId, pVnode->pFetchQ);
|
||||
dInfo("vgId:%d, write-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pWriteW.queue,
|
||||
pVnode->pWriteW.queue->threadId);
|
||||
dInfo("vgId:%d, sync-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncW.queue,
|
||||
pVnode->pSyncW.queue->threadId);
|
||||
dInfo("vgId:%d, sync-ctrl-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncCtrlW.queue,
|
||||
pVnode->pSyncCtrlW.queue->threadId);
|
||||
dInfo("vgId:%d, apply-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pApplyW.queue,
|
||||
pVnode->pApplyW.queue->threadId);
|
||||
dInfo("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ);
|
||||
dInfo("vgId:%d, fetch-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
|
||||
pVnode->pFetchQ->threadId);
|
||||
dInfo("vgId:%d, stream-queue:%p is alloced", pVnode->vgId, pVnode->pStreamQ);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
||||
tWWorkerFreeQueue(&pMgmt->writePool, pVnode->pWriteQ);
|
||||
tWWorkerFreeQueue(&pMgmt->applyPool, pVnode->pApplyQ);
|
||||
tWWorkerFreeQueue(&pMgmt->syncPool, pVnode->pSyncQ);
|
||||
tWWorkerFreeQueue(&pMgmt->syncCtrlPool, pVnode->pSyncCtrlQ);
|
||||
tQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ);
|
||||
tQWorkerFreeQueue(&pMgmt->streamPool, pVnode->pStreamQ);
|
||||
tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
|
||||
pVnode->pWriteQ = NULL;
|
||||
pVnode->pSyncQ = NULL;
|
||||
pVnode->pApplyQ = NULL;
|
||||
pVnode->pQueryQ = NULL;
|
||||
pVnode->pStreamQ = NULL;
|
||||
pVnode->pFetchQ = NULL;
|
||||
|
@ -364,26 +359,6 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
|
|||
pFPool->max = tsNumOfVnodeFetchThreads;
|
||||
if (tWWorkerInit(pFPool) != 0) return -1;
|
||||
|
||||
SWWorkerPool *pWPool = &pMgmt->writePool;
|
||||
pWPool->name = "vnode-write";
|
||||
pWPool->max = tsNumOfVnodeWriteThreads;
|
||||
if (tWWorkerInit(pWPool) != 0) return -1;
|
||||
|
||||
SWWorkerPool *pAPool = &pMgmt->applyPool;
|
||||
pAPool->name = "vnode-apply";
|
||||
pAPool->max = tsNumOfVnodeWriteThreads;
|
||||
if (tWWorkerInit(pAPool) != 0) return -1;
|
||||
|
||||
SWWorkerPool *pSPool = &pMgmt->syncPool;
|
||||
pSPool->name = "vnode-sync";
|
||||
pSPool->max = tsNumOfVnodeSyncThreads;
|
||||
if (tWWorkerInit(pSPool) != 0) return -1;
|
||||
|
||||
SWWorkerPool *pSCPool = &pMgmt->syncCtrlPool;
|
||||
pSCPool->name = "vnode-sync-ctrl";
|
||||
pSCPool->max = tsNumOfVnodeSyncThreads;
|
||||
if (tWWorkerInit(pSCPool) != 0) return -1;
|
||||
|
||||
SSingleWorkerCfg mgmtCfg = {
|
||||
.min = 1,
|
||||
.max = 1,
|
||||
|
@ -398,10 +373,6 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
|
|||
}
|
||||
|
||||
void vmStopWorker(SVnodeMgmt *pMgmt) {
|
||||
tWWorkerCleanup(&pMgmt->writePool);
|
||||
tWWorkerCleanup(&pMgmt->applyPool);
|
||||
tWWorkerCleanup(&pMgmt->syncPool);
|
||||
tWWorkerCleanup(&pMgmt->syncCtrlPool);
|
||||
tQWorkerCleanup(&pMgmt->queryPool);
|
||||
tQWorkerCleanup(&pMgmt->streamPool);
|
||||
tWWorkerCleanup(&pMgmt->fetchPool);
|
||||
|
|
|
@ -466,7 +466,7 @@ static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pSt
|
|||
|
||||
contLen += sizeof(SMsgHead);
|
||||
|
||||
SMsgHead *pHead = taosMemoryMalloc(contLen);
|
||||
SMsgHead *pHead = taosMemoryCalloc(1, contLen);
|
||||
if (pHead == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
|
@ -519,6 +519,7 @@ static void *mndBuildVDropStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pStb,
|
|||
|
||||
pHead->contLen = htonl(contLen);
|
||||
pHead->vgId = htonl(pVgroup->vgId);
|
||||
pHead->msgMask = htonl(0);
|
||||
|
||||
void *pBuf = POINTER_SHIFT(pHead, sizeof(SMsgHead));
|
||||
|
||||
|
|
|
@ -282,6 +282,8 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) {
|
|||
pMgmt->errCode = 0;
|
||||
|
||||
SRpcMsg req = {.msgType = TDMT_MND_APPLY_MSG, .contLen = sdbGetRawTotalSize(pRaw)};
|
||||
if (req.contLen <= 0) return -1;
|
||||
|
||||
req.pCont = rpcMallocCont(req.contLen);
|
||||
if (req.pCont == NULL) return -1;
|
||||
memcpy(req.pCont, pRaw, req.contLen);
|
||||
|
|
|
@ -375,7 +375,7 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
|
|||
if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
|
||||
action.pCont = NULL;
|
||||
} else {
|
||||
if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
|
||||
if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -325,10 +325,10 @@ static void *mndBuildAlterVnodeConfigReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pV
|
|||
static void *mndBuildAlterVnodeReplicaReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_t dnodeId,
|
||||
int32_t *pContLen) {
|
||||
SAlterVnodeReplicaReq alterReq = {
|
||||
alterReq.vgId = pVgroup->vgId,
|
||||
alterReq.strict = pDb->cfg.strict,
|
||||
alterReq.replica = pVgroup->replica,
|
||||
alterReq.selfIndex = -1,
|
||||
.vgId = pVgroup->vgId,
|
||||
.strict = pDb->cfg.strict,
|
||||
.replica = pVgroup->replica,
|
||||
.selfIndex = -1,
|
||||
};
|
||||
|
||||
for (int32_t v = 0; v < pVgroup->replica; ++v) {
|
||||
|
|
|
@ -97,6 +97,7 @@ bool vnodeShouldRollback(SVnode* pVnode);
|
|||
// vnodeSync.c
|
||||
int32_t vnodeSyncOpen(SVnode* pVnode, char* path);
|
||||
void vnodeSyncStart(SVnode* pVnode);
|
||||
void vnodeSyncPreClose(SVnode* pVnode);
|
||||
void vnodeSyncClose(SVnode* pVnode);
|
||||
void vnodeRedirectRpcMsg(SVnode* pVnode, SRpcMsg* pMsg);
|
||||
bool vnodeIsLeader(SVnode* pVnode);
|
||||
|
|
|
@ -279,7 +279,6 @@ int32_t tLDataIterOpen(struct SLDataIter **pIter, SDataFReader *pReader, int32_t
|
|||
|
||||
(*pIter)->pBlockLoadInfo = pBlockLoadInfo;
|
||||
|
||||
// size_t size = taosArrayGetSize(pBlockLoadInfo->aSttBlk);
|
||||
if (!pBlockLoadInfo->sttBlockLoaded) {
|
||||
int64_t st = taosGetTimestampUs();
|
||||
pBlockLoadInfo->sttBlockLoaded = true;
|
||||
|
|
|
@ -242,12 +242,7 @@ _err:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void vnodePreClose(SVnode *pVnode) {
|
||||
if (pVnode) {
|
||||
syncLeaderTransfer(pVnode->sync);
|
||||
syncPreStop(pVnode->sync);
|
||||
}
|
||||
}
|
||||
void vnodePreClose(SVnode *pVnode) { vnodeSyncPreClose(pVnode); }
|
||||
|
||||
void vnodeClose(SVnode *pVnode) {
|
||||
if (pVnode) {
|
||||
|
|
|
@ -342,52 +342,26 @@ static void vnodeSyncRollBackMsg(const SSyncFSM *pFsm, const SRpcMsg *pMsg, cons
|
|||
TMSG_INFO(pMsg->msgType));
|
||||
}
|
||||
|
||||
#define USE_TSDB_SNAPSHOT
|
||||
|
||||
static int32_t vnodeSnapshotStartRead(const SSyncFSM *pFsm, void *pParam, void **ppReader) {
|
||||
#ifdef USE_TSDB_SNAPSHOT
|
||||
SVnode *pVnode = pFsm->data;
|
||||
SSnapshotParam *pSnapshotParam = pParam;
|
||||
int32_t code = vnodeSnapReaderOpen(pVnode, pSnapshotParam->start, pSnapshotParam->end, (SVSnapReader **)ppReader);
|
||||
return code;
|
||||
#else
|
||||
*ppReader = taosMemoryMalloc(32);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int32_t vnodeSnapshotStopRead(const SSyncFSM *pFsm, void *pReader) {
|
||||
#ifdef USE_TSDB_SNAPSHOT
|
||||
SVnode *pVnode = pFsm->data;
|
||||
int32_t code = vnodeSnapReaderClose(pReader);
|
||||
return code;
|
||||
#else
|
||||
taosMemoryFree(pReader);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int32_t vnodeSnapshotDoRead(const SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) {
|
||||
#ifdef USE_TSDB_SNAPSHOT
|
||||
SVnode *pVnode = pFsm->data;
|
||||
int32_t code = vnodeSnapRead(pReader, (uint8_t **)ppBuf, len);
|
||||
return code;
|
||||
#else
|
||||
static int32_t times = 0;
|
||||
if (times++ < 5) {
|
||||
*len = 64;
|
||||
*ppBuf = taosMemoryMalloc(*len);
|
||||
snprintf(*ppBuf, *len, "snapshot block %d", times);
|
||||
} else {
|
||||
*len = 0;
|
||||
*ppBuf = NULL;
|
||||
}
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int32_t vnodeSnapshotStartWrite(const SSyncFSM *pFsm, void *pParam, void **ppWriter) {
|
||||
#ifdef USE_TSDB_SNAPSHOT
|
||||
SVnode *pVnode = pFsm->data;
|
||||
SSnapshotParam *pSnapshotParam = pParam;
|
||||
|
||||
|
@ -404,14 +378,9 @@ static int32_t vnodeSnapshotStartWrite(const SSyncFSM *pFsm, void *pParam, void
|
|||
|
||||
int32_t code = vnodeSnapWriterOpen(pVnode, pSnapshotParam->start, pSnapshotParam->end, (SVSnapWriter **)ppWriter);
|
||||
return code;
|
||||
#else
|
||||
*ppWriter = taosMemoryMalloc(32);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int32_t vnodeSnapshotStopWrite(const SSyncFSM *pFsm, void *pWriter, bool isApply, SSnapshot *pSnapshot) {
|
||||
#ifdef USE_TSDB_SNAPSHOT
|
||||
SVnode *pVnode = pFsm->data;
|
||||
vInfo("vgId:%d, stop write vnode snapshot, apply:%d, index:%" PRId64 " term:%" PRIu64 " config:%" PRId64,
|
||||
pVnode->config.vgId, isApply, pSnapshot->lastApplyIndex, pSnapshot->lastApplyTerm, pSnapshot->lastConfigIndex);
|
||||
|
@ -419,22 +388,14 @@ static int32_t vnodeSnapshotStopWrite(const SSyncFSM *pFsm, void *pWriter, bool
|
|||
int32_t code = vnodeSnapWriterClose(pWriter, !isApply, pSnapshot);
|
||||
vInfo("vgId:%d, apply vnode snapshot finished, code:0x%x", pVnode->config.vgId, code);
|
||||
return code;
|
||||
#else
|
||||
taosMemoryFree(pWriter);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int32_t vnodeSnapshotDoWrite(const SSyncFSM *pFsm, void *pWriter, void *pBuf, int32_t len) {
|
||||
#ifdef USE_TSDB_SNAPSHOT
|
||||
SVnode *pVnode = pFsm->data;
|
||||
vDebug("vgId:%d, continue write vnode snapshot, len:%d", pVnode->config.vgId, len);
|
||||
int32_t code = vnodeSnapWrite(pWriter, pBuf, len);
|
||||
vDebug("vgId:%d, continue write vnode snapshot finished, len:%d", pVnode->config.vgId, len);
|
||||
return code;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void vnodeRestoreFinish(const SSyncFSM *pFsm) {
|
||||
|
@ -461,7 +422,6 @@ static void vnodeBecomeFollower(const SSyncFSM *pFsm) {
|
|||
SVnode *pVnode = pFsm->data;
|
||||
vDebug("vgId:%d, become follower", pVnode->config.vgId);
|
||||
|
||||
// clear old leader resource
|
||||
taosThreadMutexLock(&pVnode->lock);
|
||||
if (pVnode->blocked) {
|
||||
pVnode->blocked = false;
|
||||
|
@ -474,15 +434,6 @@ static void vnodeBecomeFollower(const SSyncFSM *pFsm) {
|
|||
static void vnodeBecomeLeader(const SSyncFSM *pFsm) {
|
||||
SVnode *pVnode = pFsm->data;
|
||||
vDebug("vgId:%d, become leader", pVnode->config.vgId);
|
||||
|
||||
#if 0
|
||||
taosThreadMutexLock(&pVnode->lock);
|
||||
if (pVnode->blocked) {
|
||||
pVnode->blocked = false;
|
||||
tsem_post(&pVnode->syncSem);
|
||||
}
|
||||
taosThreadMutexUnlock(&pVnode->lock);
|
||||
#endif
|
||||
}
|
||||
|
||||
static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
|
||||
|
@ -543,12 +494,25 @@ int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
|
|||
}
|
||||
|
||||
void vnodeSyncStart(SVnode *pVnode) {
|
||||
vDebug("vgId:%d, start sync", pVnode->config.vgId);
|
||||
vInfo("vgId:%d, start sync", pVnode->config.vgId);
|
||||
syncStart(pVnode->sync);
|
||||
}
|
||||
|
||||
void vnodeSyncPreClose(SVnode *pVnode) {
|
||||
vInfo("vgId:%d, pre close sync", pVnode->config.vgId);
|
||||
syncLeaderTransfer(pVnode->sync);
|
||||
syncPreStop(pVnode->sync);
|
||||
taosThreadMutexLock(&pVnode->lock);
|
||||
if (pVnode->blocked) {
|
||||
vInfo("vgId:%d, post block after close sync", pVnode->config.vgId);
|
||||
pVnode->blocked = false;
|
||||
tsem_post(&pVnode->syncSem);
|
||||
}
|
||||
taosThreadMutexUnlock(&pVnode->lock);
|
||||
}
|
||||
|
||||
void vnodeSyncClose(SVnode *pVnode) {
|
||||
vDebug("vgId:%d, close sync", pVnode->config.vgId);
|
||||
vInfo("vgId:%d, close sync", pVnode->config.vgId);
|
||||
syncStop(pVnode->sync);
|
||||
}
|
||||
|
||||
|
|
|
@ -1495,10 +1495,6 @@ void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray
|
|||
while (i < numOfSrcCols && j < taosArrayGetSize(pColMatchInfo)) {
|
||||
SColumnInfoData* p = taosArrayGet(pCols, i);
|
||||
SColMatchItem* pmInfo = taosArrayGet(pColMatchInfo, j);
|
||||
/* if (!outputEveryColumn && pmInfo->reserved) {
|
||||
j++;
|
||||
continue;
|
||||
}*/
|
||||
|
||||
if (p->info.colId == pmInfo->colId) {
|
||||
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, pmInfo->dstSlotId);
|
||||
|
|
|
@ -579,22 +579,8 @@ int32_t doOpenMultiwayMergeOperator(SOperatorInfo* pOperator) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, SArray* pColMatchInfo,
|
||||
SOperatorInfo* pOperator) {
|
||||
SMultiwayMergeOperatorInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* pHandle, int32_t capacity, SSDataBlock* p) {
|
||||
|
||||
blockDataCleanup(pDataBlock);
|
||||
int32_t capacity = pDataBlock->info.capacity;
|
||||
|
||||
SSDataBlock* p = tsortGetSortedDataBlock(pHandle);
|
||||
if (p == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
blockDataEnsureCapacity(p, capacity);
|
||||
|
||||
_retry:
|
||||
while (1) {
|
||||
STupleHandle* pTupleHandle = NULL;
|
||||
if (pInfo->groupSort) {
|
||||
|
@ -639,22 +625,47 @@ _retry:
|
|||
pInfo->hasGroupId = false;
|
||||
}
|
||||
|
||||
if (p->info.rows > 0) { // todo extract method
|
||||
applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo, pOperator);
|
||||
if (p->info.rows == 0) {
|
||||
goto _retry;
|
||||
}
|
||||
|
||||
SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, SArray* pColMatchInfo,
|
||||
SOperatorInfo* pOperator) {
|
||||
SMultiwayMergeOperatorInfo* pInfo = pOperator->info;
|
||||
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
blockDataCleanup(pDataBlock);
|
||||
|
||||
SSDataBlock* p = tsortGetSortedDataBlock(pHandle);
|
||||
if (p == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
blockDataEnsureCapacity(pDataBlock, p->info.rows);
|
||||
int32_t capacity = pOperator->resultInfo.capacity;
|
||||
blockDataEnsureCapacity(p, capacity);
|
||||
|
||||
while (1) {
|
||||
doGetSortedBlockData(pInfo, pHandle, capacity, p);
|
||||
if (p->info.rows == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (p->info.rows > 0) {
|
||||
applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo, pOperator);
|
||||
if (p->info.rows > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (p->info.rows > 0) {
|
||||
int32_t numOfCols = taosArrayGetSize(pColMatchInfo);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColMatchItem* pmInfo = taosArrayGet(pColMatchInfo, i);
|
||||
// ASSERT(pColMatchInfo-> == COL_MATCH_FROM_SLOT_ID);
|
||||
|
||||
SColumnInfoData* pSrc = taosArrayGet(p->pDataBlock, pmInfo->srcSlotId);
|
||||
SColumnInfoData* pDst = taosArrayGet(pDataBlock->pDataBlock, pmInfo->dstSlotId);
|
||||
colDataAssign(pDst, pSrc, p->info.rows, &pDataBlock->info);
|
||||
}
|
||||
|
||||
pInfo->limitInfo.numOfOutputRows += p->info.rows;
|
||||
pDataBlock->info.rows = p->info.rows;
|
||||
pDataBlock->info.groupId = pInfo->groupId;
|
||||
|
|
|
@ -985,7 +985,8 @@ void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInf
|
|||
// current result is done in computing final results.
|
||||
if (pInfo->timeWindowInterpo && isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) {
|
||||
closeResultRow(pResult);
|
||||
tdListPopHead(pResultRowInfo->openWindow);
|
||||
SListNode *pNode = tdListPopHead(pResultRowInfo->openWindow);
|
||||
taosMemoryFree(pNode);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5451,6 +5451,8 @@ int32_t modeFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
colDataAppendNULL(pCol, currentRow);
|
||||
}
|
||||
|
||||
taosHashCleanup(pInfo->pHash);
|
||||
|
||||
return pResInfo->numOfRes;
|
||||
}
|
||||
|
||||
|
|
|
@ -185,12 +185,12 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa
|
|||
code = rsp->code;
|
||||
}
|
||||
}
|
||||
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
|
||||
|
||||
if (taosArrayGetSize((SArray*)pJob->execRes.res) <= 0) {
|
||||
taosArrayDestroy((SArray*)pJob->execRes.res);
|
||||
pJob->execRes.res = NULL;
|
||||
}
|
||||
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
|
||||
}
|
||||
|
||||
tDecoderClear(&coder);
|
||||
|
@ -1047,6 +1047,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
|
|||
|
||||
SSubQueryMsg *pMsg = msg;
|
||||
pMsg->header.vgId = htonl(addr->nodeId);
|
||||
pMsg->header.msgMask = htonl((pTask->plan->showRewrite) ? SHOW_REWRITE_MASK() : 0);
|
||||
pMsg->sId = htobe64(schMgmt.sId);
|
||||
pMsg->queryId = htobe64(pJob->queryId);
|
||||
pMsg->taskId = htobe64(pTask->taskId);
|
||||
|
|
|
@ -49,6 +49,7 @@ extern "C" {
|
|||
// IN commitIndex' = [commitIndex EXCEPT ![i] = newCommitIndex]
|
||||
// /\ UNCHANGED <<messages, serverVars, candidateVars, leaderVars, log>>
|
||||
//
|
||||
void syncOneReplicaAdvance(SSyncNode* pSyncNode);
|
||||
void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode);
|
||||
bool syncAgreeIndex(SSyncNode* pSyncNode, SRaftId* pRaftId, SyncIndex index);
|
||||
bool syncAgree(SSyncNode* pSyncNode, SyncIndex index);
|
||||
|
|
|
@ -44,6 +44,56 @@
|
|||
// IN commitIndex' = [commitIndex EXCEPT ![i] = newCommitIndex]
|
||||
// /\ UNCHANGED <<messages, serverVars, candidateVars, leaderVars, log>>
|
||||
//
|
||||
void syncOneReplicaAdvance(SSyncNode* pSyncNode) {
|
||||
if (pSyncNode == NULL) {
|
||||
sError("pSyncNode is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
|
||||
syncNodeErrorLog(pSyncNode, "not leader, can not advance commit index");
|
||||
return;
|
||||
}
|
||||
|
||||
if (pSyncNode->replicaNum != 1) {
|
||||
syncNodeErrorLog(pSyncNode, "not one replica, can not advance commit index");
|
||||
return;
|
||||
}
|
||||
|
||||
// advance commit index to snapshot first
|
||||
SSnapshot snapshot;
|
||||
pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot);
|
||||
if (snapshot.lastApplyIndex > 0 && snapshot.lastApplyIndex > pSyncNode->commitIndex) {
|
||||
SyncIndex commitBegin = pSyncNode->commitIndex;
|
||||
SyncIndex commitEnd = snapshot.lastApplyIndex;
|
||||
pSyncNode->commitIndex = snapshot.lastApplyIndex;
|
||||
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "commit by snapshot from index:%" PRId64 " to index:%" PRId64, commitBegin,
|
||||
commitEnd);
|
||||
syncNodeEventLog(pSyncNode, eventLog);
|
||||
}
|
||||
|
||||
// advance commit index as large as possible
|
||||
SyncIndex lastIndex = syncNodeGetLastIndex(pSyncNode);
|
||||
if (lastIndex > pSyncNode->commitIndex) {
|
||||
do {
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "commit by wal from index:%" PRId64 " to index:%" PRId64,
|
||||
pSyncNode->commitIndex + 1, lastIndex);
|
||||
syncNodeEventLog(pSyncNode, eventLog);
|
||||
} while (0);
|
||||
|
||||
pSyncNode->commitIndex = lastIndex;
|
||||
}
|
||||
|
||||
// call back Wal
|
||||
SyncIndex walCommitVer = logStoreWalCommitVer(pSyncNode->pLogStore);
|
||||
if (pSyncNode->commitIndex > walCommitVer) {
|
||||
pSyncNode->pLogStore->syncLogUpdateCommitIndex(pSyncNode->pLogStore, pSyncNode->commitIndex);
|
||||
}
|
||||
}
|
||||
|
||||
void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
|
||||
if (pSyncNode == NULL) {
|
||||
sError("pSyncNode is NULL");
|
||||
|
|
|
@ -768,7 +768,7 @@ char* sync2SimpleStr(int64_t rid) {
|
|||
sTrace("syncSetRpc get pSyncNode is NULL, rid:%" PRId64, rid);
|
||||
return NULL;
|
||||
}
|
||||
ASSERT(rid == pSyncNode->rid);
|
||||
|
||||
char* s = syncNode2SimpleStr(pSyncNode);
|
||||
syncNodeRelease(pSyncNode);
|
||||
|
||||
|
@ -778,11 +778,9 @@ char* sync2SimpleStr(int64_t rid) {
|
|||
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak) {
|
||||
SSyncNode* pSyncNode = syncNodeAcquire(rid);
|
||||
if (pSyncNode == NULL) {
|
||||
syncNodeRelease(pSyncNode);
|
||||
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
return -1;
|
||||
}
|
||||
ASSERT(rid == pSyncNode->rid);
|
||||
|
||||
int32_t ret = syncNodePropose(pSyncNode, pMsg, isWeak);
|
||||
syncNodeRelease(pSyncNode);
|
||||
|
@ -3010,7 +3008,11 @@ int32_t syncNodeOnClientRequest(SSyncNode* ths, SyncClientRequest* pMsg, SyncInd
|
|||
|
||||
// if only myself, maybe commit right now
|
||||
if (ths->replicaNum == 1) {
|
||||
if (syncNodeIsMnode(ths)) {
|
||||
syncMaybeAdvanceCommitIndex(ths);
|
||||
} else {
|
||||
syncOneReplicaAdvance(ths);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3104,15 +3106,15 @@ int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* p
|
|||
|
||||
if (ths->pFsm->FpLeaderTransferCb != NULL) {
|
||||
SFsmCbMeta cbMeta = {
|
||||
cbMeta.code = 0,
|
||||
cbMeta.currentTerm = ths->pRaftStore->currentTerm,
|
||||
cbMeta.flag = 0,
|
||||
cbMeta.index = pEntry->index,
|
||||
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, pEntry->index),
|
||||
cbMeta.isWeak = pEntry->isWeak,
|
||||
cbMeta.seqNum = pEntry->seqNum,
|
||||
cbMeta.state = ths->state,
|
||||
cbMeta.term = pEntry->term,
|
||||
.code = 0,
|
||||
.currentTerm = ths->pRaftStore->currentTerm,
|
||||
.flag = 0,
|
||||
.index = pEntry->index,
|
||||
.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, pEntry->index),
|
||||
.isWeak = pEntry->isWeak,
|
||||
.seqNum = pEntry->seqNum,
|
||||
.state = ths->state,
|
||||
.term = pEntry->term,
|
||||
};
|
||||
ths->pFsm->FpLeaderTransferCb(ths->pFsm, pRpcMsg, &cbMeta);
|
||||
}
|
||||
|
|
|
@ -72,6 +72,7 @@ SSyncRaftEntry* syncEntryBuildNoop(SyncTerm term, SyncIndex index, int32_t vgId)
|
|||
SMsgHead head;
|
||||
head.vgId = vgId;
|
||||
head.contLen = sizeof(SMsgHead);
|
||||
head.msgMask = 0;
|
||||
SRpcMsg rpcMsg;
|
||||
memset(&rpcMsg, 0, sizeof(SRpcMsg));
|
||||
rpcMsg.contLen = head.contLen;
|
||||
|
|
|
@ -219,7 +219,7 @@ static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntr
|
|||
pEntry->index, err, err, errStr, sysErr, sysErrStr);
|
||||
syncNodeErrorLog(pData->pSyncNode, logBuf);
|
||||
|
||||
ASSERT(0);
|
||||
// ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
pEntry->index = index;
|
||||
|
@ -327,7 +327,7 @@ static int32_t raftLogTruncate(struct SSyncLogStore* pLogStore, SyncIndex fromIn
|
|||
sError("vgId:%d, wal truncate error, from-index:%" PRId64 ", err:%d %X, msg:%s, syserr:%d, sysmsg:%s",
|
||||
pData->pSyncNode->vgId, fromIndex, err, err, errStr, sysErr, sysErrStr);
|
||||
|
||||
ASSERT(0);
|
||||
// ASSERT(0);
|
||||
}
|
||||
|
||||
// event log
|
||||
|
@ -376,6 +376,7 @@ int32_t raftLogUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index) {
|
|||
pData->pSyncNode->vgId, index, err, err, errStr, sysErr, sysErrStr);
|
||||
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -47,6 +47,19 @@ static bool syncNodeOnRequestVoteLogOK(SSyncNode* pSyncNode, SyncRequestVote* pM
|
|||
SyncTerm myLastTerm = syncNodeGetLastTerm(pSyncNode);
|
||||
SyncIndex myLastIndex = syncNodeGetLastIndex(pSyncNode);
|
||||
|
||||
if (pMsg->lastLogIndex < pSyncNode->commitIndex) {
|
||||
do {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf),
|
||||
"logok:0, {my-lterm:%" PRIu64 ", my-lindex:%" PRId64 ", recv-lterm:%" PRIu64 ", recv-lindex:%" PRId64
|
||||
", recv-term:%" PRIu64 "}",
|
||||
myLastTerm, myLastIndex, pMsg->lastLogTerm, pMsg->lastLogIndex, pMsg->term);
|
||||
syncNodeEventLog(pSyncNode, logBuf);
|
||||
} while (0);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (myLastTerm == SYNC_TERM_INVALID) {
|
||||
do {
|
||||
char logBuf[128];
|
||||
|
|
|
@ -146,15 +146,15 @@ void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl, bool rsp) {
|
|||
cnt++;
|
||||
|
||||
SFsmCbMeta cbMeta = {
|
||||
cbMeta.index = SYNC_INDEX_INVALID,
|
||||
cbMeta.lastConfigIndex = SYNC_INDEX_INVALID,
|
||||
cbMeta.isWeak = false,
|
||||
cbMeta.code = TSDB_CODE_SYN_TIMEOUT,
|
||||
cbMeta.state = pSyncNode->state,
|
||||
cbMeta.seqNum = *pSeqNum,
|
||||
cbMeta.term = SYNC_TERM_INVALID,
|
||||
cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm,
|
||||
cbMeta.flag = 0,
|
||||
.index = SYNC_INDEX_INVALID,
|
||||
.lastConfigIndex = SYNC_INDEX_INVALID,
|
||||
.isWeak = false,
|
||||
.code = TSDB_CODE_SYN_TIMEOUT,
|
||||
.state = pSyncNode->state,
|
||||
.seqNum = *pSeqNum,
|
||||
.term = SYNC_TERM_INVALID,
|
||||
.currentTerm = pSyncNode->pRaftStore->currentTerm,
|
||||
.flag = 0,
|
||||
};
|
||||
|
||||
pStub->rpcMsg.pCont = NULL;
|
||||
|
|
|
@ -427,7 +427,7 @@ void cliHandleExceptImpl(SCliConn* pConn, int32_t code) {
|
|||
|
||||
if (pCtx == NULL || pCtx->pSem == NULL) {
|
||||
if (transMsg.info.ahandle == NULL) {
|
||||
if (REQUEST_NO_RESP(&pMsg->msg) || pMsg->type == Release) destroyCmsg(pMsg);
|
||||
if (pMsg == NULL || REQUEST_NO_RESP(&pMsg->msg) || pMsg->type == Release) destroyCmsg(pMsg);
|
||||
once = true;
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -722,13 +722,13 @@ int32_t cfgLoadFromEnvFile(SConfig *pConfig, const char *envFile) {
|
|||
const char *filepath = ".env";
|
||||
if (envFile != NULL && strlen(envFile) > 0) {
|
||||
if (!taosCheckExistFile(envFile)) {
|
||||
uError("failed to load env file: %s", envFile);
|
||||
uError("failed to load env file:%s", envFile);
|
||||
return -1;
|
||||
}
|
||||
filepath = envFile;
|
||||
} else {
|
||||
if (!taosCheckExistFile(filepath)) {
|
||||
uInfo("failed to load env file: %s", filepath);
|
||||
uInfo("env file:%s not load", filepath);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -929,7 +929,7 @@ int32_t cfgLoadFromApollUrl(SConfig *pConfig, const char *url) {
|
|||
if (strncmp(url, "jsonFile", 8) == 0) {
|
||||
char *filepath = p;
|
||||
if (!taosCheckExistFile(filepath)) {
|
||||
uError("failed to load json file: %s", filepath);
|
||||
uError("failed to load json file:%s", filepath);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1056,13 +1056,13 @@ int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char *apolloUrl
|
|||
const char *filepath = ".env";
|
||||
if (envFile != NULL && strlen(envFile) > 0) {
|
||||
if (!taosCheckExistFile(envFile)) {
|
||||
uError("failed to load env file: %s", envFile);
|
||||
uError("failed to load env file:%s", envFile);
|
||||
return -1;
|
||||
}
|
||||
filepath = envFile;
|
||||
} else {
|
||||
if (!taosCheckExistFile(filepath)) {
|
||||
uInfo("failed to load env file: %s", filepath);
|
||||
uInfo("env file:%s not load", filepath);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,25 +81,25 @@ int64_t tsNumOfDebugLogs = 0;
|
|||
int64_t tsNumOfTraceLogs = 0;
|
||||
|
||||
// log
|
||||
int32_t dDebugFlag = 135;
|
||||
int32_t vDebugFlag = 135;
|
||||
int32_t mDebugFlag = 135;
|
||||
int32_t dDebugFlag = 131;
|
||||
int32_t vDebugFlag = 131;
|
||||
int32_t mDebugFlag = 131;
|
||||
int32_t cDebugFlag = 131;
|
||||
int32_t jniDebugFlag = 131;
|
||||
int32_t tmrDebugFlag = 131;
|
||||
int32_t uDebugFlag = 131;
|
||||
int32_t rpcDebugFlag = 131;
|
||||
int32_t qDebugFlag = 131;
|
||||
int32_t wDebugFlag = 135;
|
||||
int32_t sDebugFlag = 135;
|
||||
int32_t wDebugFlag = 131;
|
||||
int32_t sDebugFlag = 131;
|
||||
int32_t tsdbDebugFlag = 131;
|
||||
int32_t tdbDebugFlag = 131;
|
||||
int32_t tqDebugFlag = 135;
|
||||
int32_t fsDebugFlag = 135;
|
||||
int32_t metaDebugFlag = 135;
|
||||
int32_t udfDebugFlag = 135;
|
||||
int32_t tqDebugFlag = 131;
|
||||
int32_t fsDebugFlag = 131;
|
||||
int32_t metaDebugFlag = 131;
|
||||
int32_t udfDebugFlag = 131;
|
||||
int32_t smaDebugFlag = 131;
|
||||
int32_t idxDebugFlag = 135;
|
||||
int32_t idxDebugFlag = 131;
|
||||
|
||||
int64_t dbgEmptyW = 0;
|
||||
int64_t dbgWN = 0;
|
||||
|
|
|
@ -21,46 +21,6 @@
|
|||
int64_t tsRpcQueueMemoryAllowed = 0;
|
||||
int64_t tsRpcQueueMemoryUsed = 0;
|
||||
|
||||
typedef struct STaosQnode STaosQnode;
|
||||
|
||||
typedef struct STaosQnode {
|
||||
STaosQnode *next;
|
||||
STaosQueue *queue;
|
||||
int64_t timestamp;
|
||||
int32_t size;
|
||||
int8_t itype;
|
||||
int8_t reserved[3];
|
||||
char item[];
|
||||
} STaosQnode;
|
||||
|
||||
typedef struct STaosQueue {
|
||||
STaosQnode *head;
|
||||
STaosQnode *tail;
|
||||
STaosQueue *next; // for queue set
|
||||
STaosQset *qset; // for queue set
|
||||
void *ahandle; // for queue set
|
||||
FItem itemFp;
|
||||
FItems itemsFp;
|
||||
TdThreadMutex mutex;
|
||||
int64_t memOfItems;
|
||||
int32_t numOfItems;
|
||||
} STaosQueue;
|
||||
|
||||
typedef struct STaosQset {
|
||||
STaosQueue *head;
|
||||
STaosQueue *current;
|
||||
TdThreadMutex mutex;
|
||||
tsem_t sem;
|
||||
int32_t numOfQueues;
|
||||
int32_t numOfItems;
|
||||
} STaosQset;
|
||||
|
||||
typedef struct STaosQall {
|
||||
STaosQnode *current;
|
||||
STaosQnode *start;
|
||||
int32_t numOfItems;
|
||||
} STaosQall;
|
||||
|
||||
STaosQueue *taosOpenQueue() {
|
||||
STaosQueue *queue = taosMemoryCalloc(1, sizeof(STaosQueue));
|
||||
if (queue == NULL) {
|
||||
|
|
|
@ -361,7 +361,7 @@ int32_t taosListRef() {
|
|||
|
||||
if (pSet->state == TSDB_REF_STATE_EMPTY) continue;
|
||||
|
||||
uInfo("rsetId:%d state:%d count::%d", i, pSet->state, pSet->count);
|
||||
uInfo("rsetId:%d state:%d count:%d", i, pSet->state, pSet->count);
|
||||
|
||||
for (int32_t j = 0; j < pSet->max; ++j) {
|
||||
pNode = pSet->nodeList[j];
|
||||
|
|
|
@ -36,14 +36,13 @@ int32_t tQWorkerInit(SQWorkerPool *pool) {
|
|||
worker->pool = pool;
|
||||
}
|
||||
|
||||
uInfo("worker:%s is initialized, min:%d max:%d", pool->name, pool->min, pool->max);
|
||||
uDebug("worker:%s is initialized, min:%d max:%d", pool->name, pool->min, pool->max);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tQWorkerCleanup(SQWorkerPool *pool) {
|
||||
for (int32_t i = 0; i < pool->max; ++i) {
|
||||
SQWorker *worker = pool->workers + i;
|
||||
// if (worker == NULL) continue;
|
||||
if (taosCheckPthreadValid(worker->thread)) {
|
||||
taosQsetThreadResume(pool->qset);
|
||||
}
|
||||
|
@ -51,7 +50,6 @@ void tQWorkerCleanup(SQWorkerPool *pool) {
|
|||
|
||||
for (int32_t i = 0; i < pool->max; ++i) {
|
||||
SQWorker *worker = pool->workers + i;
|
||||
// if (worker == NULL) continue;
|
||||
if (taosCheckPthreadValid(worker->thread)) {
|
||||
taosThreadJoin(worker->thread, NULL);
|
||||
taosThreadClear(&worker->thread);
|
||||
|
@ -73,11 +71,13 @@ static void *tQWorkerThreadFp(SQWorker *worker) {
|
|||
|
||||
taosBlockSIGPIPE();
|
||||
setThreadName(pool->name);
|
||||
uDebug("worker:%s:%d is running", pool->name, worker->id);
|
||||
worker->pid = taosGetSelfPthreadId();
|
||||
uInfo("worker:%s:%d is running, thread:%08" PRId64, pool->name, worker->id, worker->pid);
|
||||
|
||||
while (1) {
|
||||
if (taosReadQitemFromQset(pool->qset, (void **)&msg, &qinfo) == 0) {
|
||||
uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset);
|
||||
uInfo("worker:%s:%d qset:%p, got no message and exiting, thread:%08" PRId64, pool->name, worker->id, pool->qset,
|
||||
worker->pid);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -124,7 +124,7 @@ STaosQueue *tQWorkerAllocQueue(SQWorkerPool *pool, void *ahandle, FItem fp) {
|
|||
}
|
||||
|
||||
taosThreadMutexUnlock(&pool->mutex);
|
||||
uDebug("worker:%s, queue:%p is allocated, ahandle:%p", pool->name, queue, ahandle);
|
||||
uInfo("worker:%s, queue:%p is allocated, ahandle:%p", pool->name, queue, ahandle);
|
||||
|
||||
return queue;
|
||||
}
|
||||
|
@ -191,12 +191,14 @@ static void *tWWorkerThreadFp(SWWorker *worker) {
|
|||
|
||||
taosBlockSIGPIPE();
|
||||
setThreadName(pool->name);
|
||||
uDebug("worker:%s:%d is running", pool->name, worker->id);
|
||||
worker->pid = taosGetSelfPthreadId();
|
||||
uInfo("worker:%s:%d is running, thread:%08" PRId64, pool->name, worker->id, worker->pid);
|
||||
|
||||
while (1) {
|
||||
numOfMsgs = taosReadAllQitemsFromQset(worker->qset, worker->qall, &qinfo);
|
||||
if (numOfMsgs == 0) {
|
||||
uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, worker->qset);
|
||||
uInfo("worker:%s:%d qset:%p, got no message and exiting, thread:%08" PRId64, pool->name, worker->id, worker->qset,
|
||||
worker->pid);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -244,7 +246,6 @@ STaosQueue *tWWorkerAllocQueue(SWWorkerPool *pool, void *ahandle, FItems fp) {
|
|||
pool->nextId = (pool->nextId + 1) % pool->max;
|
||||
}
|
||||
|
||||
uDebug("worker:%s, queue:%p is allocated, ahandle:%p", pool->name, queue, ahandle);
|
||||
code = 0;
|
||||
|
||||
_OVER:
|
||||
|
@ -256,6 +257,9 @@ _OVER:
|
|||
if (worker->qall != NULL) taosFreeQall(worker->qall);
|
||||
return NULL;
|
||||
} else {
|
||||
while (worker->pid <= 0) taosMsleep(10);
|
||||
queue->threadId = worker->pid;
|
||||
uInfo("worker:%s, queue:%p is allocated, ahandle:%p thread:%08" PRId64, pool->name, queue, ahandle, queue->threadId);
|
||||
return queue;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 4,
|
||||
"thread_count_create_tbl": 4,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 0,
|
||||
"num_of_records_per_req": 3000,
|
||||
"max_sql_len": 1024000,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db",
|
||||
"drop": "yes",
|
||||
"replica": 1,
|
||||
"precision": "ms"
|
||||
},
|
||||
"super_tables": [{
|
||||
"name": "stb0",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 10,
|
||||
"childtable_prefix": "stb00_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 10,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 100,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset": 0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval": 0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-11-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
|
||||
},
|
||||
{
|
||||
"name": "stb1",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 10,
|
||||
"childtable_prefix": "stb01_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 10,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 200,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset": 0,
|
||||
"interlace_rows": 0 ,
|
||||
"insert_interval": 0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-11-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 1, "count":3}, {"type": "BINARY", "len": 2, "count":6}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
|
||||
}]
|
||||
}]
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 4,
|
||||
"thread_count_create_tbl": 4,
|
||||
"result_file": "./insert_res.txt",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"interlace_rows": 0,
|
||||
"num_of_records_per_req": 3000,
|
||||
"max_sql_len": 1024000,
|
||||
"databases": [{
|
||||
"dbinfo": {
|
||||
"name": "db",
|
||||
"drop": "yes",
|
||||
"precision": "ms"
|
||||
},
|
||||
"super_tables": [{
|
||||
"name": "stb0",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 2,
|
||||
"childtable_prefix": "stb00_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 10,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 10,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset": 0,
|
||||
"interlace_rows": 0,
|
||||
"insert_interval": 0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-11-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
|
||||
},
|
||||
{
|
||||
"name": "stb1",
|
||||
"child_table_exists":"no",
|
||||
"childtable_count": 2,
|
||||
"childtable_prefix": "stb01_",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 10,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"insert_rows": 5,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset": 0,
|
||||
"interlace_rows": 0 ,
|
||||
"insert_interval": 0,
|
||||
"max_sql_len": 1024000,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1,
|
||||
"start_timestamp": "2020-11-01 00:00:00.000",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 1, "count":3}, {"type": "BINARY", "len": 2, "count":6}],
|
||||
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
|
||||
}]
|
||||
}]
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"filetype": "query",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"databases": "db",
|
||||
"query_times": 1,
|
||||
"specified_table_query": {
|
||||
"query_interval": 10,
|
||||
"threads": 4,
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_0",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_9 ",
|
||||
"result": "./query_res1.txt"
|
||||
}]
|
||||
},
|
||||
"super_table_query": {
|
||||
"stblname": "stb1",
|
||||
"query_interval":20,
|
||||
"threads": 4,
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select last_row(ts) from xxxx",
|
||||
"result": "./query_res2.txt"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
{
|
||||
"filetype": "query",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"databases": "db",
|
||||
"query_times": 2,
|
||||
"query_mode": "rest",
|
||||
"specified_table_query": {
|
||||
"query_interval": 1,
|
||||
"threads": 3,
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select last_row(*) from db.stb0 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select count(*) from db.stb00_1",
|
||||
"result": "./query_res1.txt"
|
||||
}
|
||||
]
|
||||
},
|
||||
"super_table_query": {
|
||||
"stblname": "stb1",
|
||||
"query_interval": 1,
|
||||
"threads": 3,
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select last_row(ts) from xxxx",
|
||||
"result": "./query_res2.txt"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,429 @@
|
|||
{
|
||||
"filetype": "query",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"databases": "db",
|
||||
"query_times": 2,
|
||||
"specified_table_query": {
|
||||
"query_interval": 1,
|
||||
"threads": 3,
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_0",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_1",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_2",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_3",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_4",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_5",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_6",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_7",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_8",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_9",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_10 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_11 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_12 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_13 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_14 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_15 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_16 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_17 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_18 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_19 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_20 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_21 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_22 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_23 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_24 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_25 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_26 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_27 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_28 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_29 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_30 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_31 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_32 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_33 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_34 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_35 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_36 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_37 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_38 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_39 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_40 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_41 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_42 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_43 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_44 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_45 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_46 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_47 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_48 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_49 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_50 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_51 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_52 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_53 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_54 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_55 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_56 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_57 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_58 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_59 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_60",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_61",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_62",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_63",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_64",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_65",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_66",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_67",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_68",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_69",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_70 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_71 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_72 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_73 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_74 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_75 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_76 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_77 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_78 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_79 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_80 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_81 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_82 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_83 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_84 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_85 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_86 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_87 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_88 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_89 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_90 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_91 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_92 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_93 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_94 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_95 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_96 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_97 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_98 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from stb00_99 ",
|
||||
"result": "./query_res0.txt"
|
||||
|
||||
}]
|
||||
},
|
||||
"super_table_query": {
|
||||
"stblname": "stb1",
|
||||
"query_interval": 1,
|
||||
"threads": 3,
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select last_row(ts) from xxxx",
|
||||
"result": "./query_res2.txt"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,419 @@
|
|||
{
|
||||
"filetype": "query",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"databases": "db",
|
||||
"query_times": 3,
|
||||
"super_table_query": {
|
||||
"stblname": "stb0",
|
||||
"query_interval": 10,
|
||||
"threads": 9,
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select last_row(*) from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select * from xxxx ",
|
||||
"result": "./query_res0.txt"
|
||||
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
{
|
||||
"filetype": "query",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "127.0.0.1",
|
||||
"port": 6030,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"databases": "db",
|
||||
"query_times": 2,
|
||||
"query_mode": "taosc",
|
||||
"specified_table_query": {
|
||||
"query_interval": 1,
|
||||
"threads": 3,
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select last_row(*) from stb0 ",
|
||||
"result": "./query_res0.txt"
|
||||
},
|
||||
{
|
||||
"sql": "select count(*) from stb00_1",
|
||||
"result": "./query_res1.txt"
|
||||
}
|
||||
]
|
||||
},
|
||||
"super_table_query": {
|
||||
"stblname": "stb1",
|
||||
"query_interval": 1,
|
||||
"threads": 3,
|
||||
"sqls": [
|
||||
{
|
||||
"sql": "select last_row(ts) from xxxx",
|
||||
"result": "./query_res2.txt"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,241 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
import time
|
||||
from datetime import datetime
|
||||
import ast
|
||||
import re
|
||||
|
||||
# from assertpy import assert_that
|
||||
import subprocess
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql, replicaVarl=1):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def getPath(self, tool="taosBenchmark"):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if "community" in selfPath:
|
||||
projPath = selfPath[: selfPath.find("community")]
|
||||
elif "src" in selfPath:
|
||||
projPath = selfPath[: selfPath.find("src")]
|
||||
elif "/tools/" in selfPath:
|
||||
projPath = selfPath[: selfPath.find("/tools/")]
|
||||
elif "/tests/" in selfPath:
|
||||
projPath = selfPath[: selfPath.find("/tests/")]
|
||||
else:
|
||||
tdLog.info("cannot found %s in path: %s, use system's" % (tool, selfPath))
|
||||
projPath = "/usr/local/taos/bin/"
|
||||
|
||||
paths = []
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if (tool) in files:
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if "packaging" not in rootRealPath:
|
||||
paths.append(os.path.join(root, tool))
|
||||
break
|
||||
if len(paths) == 0:
|
||||
return ""
|
||||
return paths[0]
|
||||
|
||||
# 获取taosc接口查询的结果文件中的内容,返回每行数据,并断言数据的第一列内容。
|
||||
def assertfileDataTaosc(self, filename, expectResult):
|
||||
self.filename = filename
|
||||
self.expectResult = expectResult
|
||||
with open("%s" % filename, "r+") as f1:
|
||||
for line in f1.readlines():
|
||||
queryResultTaosc = line.strip().split()[0]
|
||||
self.assertCheck(filename, queryResultTaosc, expectResult)
|
||||
|
||||
# 获取restful接口查询的结果文件中的关键内容,目前的关键内容找到第一个key就跳出循,所以就只有一个数据。后续再修改多个结果文件。
|
||||
def getfileDataRestful(self, filename):
|
||||
self.filename = filename
|
||||
with open("%s" % filename, "r+") as f1:
|
||||
for line in f1.readlines():
|
||||
contents = line.strip()
|
||||
if contents.find("data") != -1:
|
||||
pattern = re.compile("{.*}")
|
||||
contents = pattern.search(contents).group()
|
||||
contentsDict = ast.literal_eval(contents) # 字符串转换为字典
|
||||
queryResultRest = contentsDict["data"][0][0]
|
||||
break
|
||||
else:
|
||||
queryResultRest = ""
|
||||
return queryResultRest
|
||||
|
||||
# 获取taosc接口查询次数
|
||||
def queryTimesTaosc(self, filename):
|
||||
self.filename = filename
|
||||
command = "cat %s |wc -l" % filename
|
||||
times = int(subprocess.getstatusoutput(command)[1])
|
||||
return times
|
||||
|
||||
# 获取restful接口查询次数
|
||||
def queryTimesRestful(self, filename):
|
||||
self.filename = filename
|
||||
command = 'cat %s |grep "200 OK" |wc -l' % filename
|
||||
times = int(subprocess.getstatusoutput(command)[1])
|
||||
return times
|
||||
|
||||
# 定义断言结果是否正确。不正确返回错误结果,正确即通过。
|
||||
def assertCheck(self, filename, queryResult, expectResult):
|
||||
self.filename = filename
|
||||
self.queryResult = queryResult
|
||||
self.expectResult = expectResult
|
||||
args0 = (filename, queryResult, expectResult)
|
||||
assert queryResult == expectResult, (
|
||||
"Queryfile:%s ,result is %s != expect: %s" % args0
|
||||
)
|
||||
|
||||
def run(self):
|
||||
binPath = self.getPath()
|
||||
if binPath == "":
|
||||
tdLog.exit("taosBenchmark not found!")
|
||||
else:
|
||||
tdLog.info("taosBenchmark use %s" % binPath)
|
||||
|
||||
# delete useless files
|
||||
os.system("rm -rf ./query_res*")
|
||||
os.system("rm -rf ./all_query*")
|
||||
|
||||
# taosc query: query specified table and query super table
|
||||
os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryInsertdata.json" % binPath)
|
||||
os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryTaosc.json" % binPath)
|
||||
os.system("cat query_res0.txt* > all_query_res0_taosc.txt")
|
||||
os.system("cat query_res1.txt* > all_query_res1_taosc.txt")
|
||||
os.system("cat query_res2.txt* > all_query_res2_taosc.txt")
|
||||
|
||||
# correct Times testcases
|
||||
queryTimes0Taosc = self.queryTimesTaosc("all_query_res0_taosc.txt")
|
||||
self.assertCheck("all_query_res0_taosc.txt", queryTimes0Taosc, 6)
|
||||
|
||||
queryTimes1Taosc = self.queryTimesTaosc("all_query_res1_taosc.txt")
|
||||
self.assertCheck("all_query_res1_taosc.txt", queryTimes1Taosc, 6)
|
||||
|
||||
queryTimes2Taosc = self.queryTimesTaosc("all_query_res2_taosc.txt")
|
||||
self.assertCheck("all_query_res2_taosc.txt", queryTimes2Taosc, 20)
|
||||
|
||||
# correct data testcase
|
||||
self.assertfileDataTaosc("all_query_res0_taosc.txt", "1604160000099")
|
||||
self.assertfileDataTaosc("all_query_res1_taosc.txt", "100")
|
||||
self.assertfileDataTaosc("all_query_res2_taosc.txt", "1604160000199")
|
||||
|
||||
# delete useless files
|
||||
os.system("rm -rf ./query_res*")
|
||||
os.system("rm -rf ./all_query*")
|
||||
|
||||
# use restful api to query
|
||||
os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryInsertrestdata.json" % binPath)
|
||||
os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryRestful.json" % binPath)
|
||||
os.system("cat query_res0.txt* > all_query_res0_rest.txt")
|
||||
os.system("cat query_res1.txt* > all_query_res1_rest.txt")
|
||||
os.system("cat query_res2.txt* > all_query_res2_rest.txt")
|
||||
|
||||
# correct Times testcases
|
||||
queryTimes0Restful = self.queryTimesRestful("all_query_res0_rest.txt")
|
||||
self.assertCheck("all_query_res0_rest.txt", queryTimes0Restful, 6)
|
||||
|
||||
queryTimes1Restful = self.queryTimesRestful("all_query_res1_rest.txt")
|
||||
self.assertCheck("all_query_res1_rest.txt", queryTimes1Restful, 6)
|
||||
|
||||
queryTimes2Restful = self.queryTimesRestful("all_query_res2_rest.txt")
|
||||
self.assertCheck("all_query_res2_rest.txt", queryTimes2Restful, 4)
|
||||
|
||||
# correct data testcase
|
||||
data0 = self.getfileDataRestful("all_query_res0_rest.txt")
|
||||
if data0 != "2020-11-01 00:00:00.009" and data0 != "2020-10-31T16:00:00.009Z":
|
||||
tdLog.exit(
|
||||
"data0 is not 2020-11-01 00:00:00.009 and 2020-10-31T16:00:00.009Z"
|
||||
)
|
||||
|
||||
data1 = self.getfileDataRestful("all_query_res1_rest.txt")
|
||||
self.assertCheck("all_query_res1_rest.txt", data1, 10)
|
||||
|
||||
data2 = self.getfileDataRestful("all_query_res2_rest.txt")
|
||||
if data2 != "2020-11-01 00:00:00.004" and data2 != "2020-10-31T16:00:00.004Z":
|
||||
tdLog.exit(
|
||||
"data2 is not 2020-11-01 00:00:00.004 and 2020-10-31T16:00:00.004Z"
|
||||
)
|
||||
|
||||
# query times less than or equal to 100
|
||||
assert (
|
||||
os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryInsertdata.json" % binPath) == 0
|
||||
)
|
||||
assert (
|
||||
os.system("%s -f ./5-taos-tools/taosbenchmark/json/querySpeciMutisql100.json" % binPath)
|
||||
!= 0
|
||||
)
|
||||
assert (
|
||||
os.system("%s -f ./5-taos-tools/taosbenchmark/json/querySuperMutisql100.json" % binPath)
|
||||
== 0
|
||||
)
|
||||
|
||||
# query result print QPS
|
||||
os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryInsertdata.json" % binPath)
|
||||
exceptcode = os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryQps.json" % binPath)
|
||||
assert exceptcode == 0
|
||||
|
||||
# 2021.02.09 need modify taosBenchmakr code
|
||||
# use illegal or out of range parameters query json file
|
||||
os.system("%s -f ./5-taos-tools/taosbenchmark/json/queryInsertdata.json" % binPath)
|
||||
# 2021.02.09 need modify taosBenchmakr code
|
||||
# exceptcode = os.system(
|
||||
# "%s -f ./taosbenchmark/json/queryTimes0.json" %
|
||||
# binPath)
|
||||
# assert exceptcode != 0
|
||||
|
||||
# 2021.02.09 need modify taosBenchmakr code
|
||||
# exceptcode0 = os.system(
|
||||
# "%s -f ./taosbenchmark/json/queryTimesless0.json" %
|
||||
# binPath)
|
||||
# assert exceptcode0 != 0
|
||||
|
||||
# exceptcode1 = os.system(
|
||||
# "%s -f ./taosbenchmark/json/queryConcurrent0.json" %
|
||||
# binPath)
|
||||
# assert exceptcode2 != 0
|
||||
|
||||
# exceptcode3 = os.system(
|
||||
# "%s -f ./taosbenchmark/json/querrThreadsless0.json" %
|
||||
# binPath)
|
||||
# assert exceptcode3 != 0
|
||||
|
||||
# exceptcode4 = os.system(
|
||||
# "%s -f ./taosbenchmark/json/querrThreads0.json" %
|
||||
# binPath)
|
||||
# assert exceptcode4 != 0
|
||||
|
||||
# delete useless files
|
||||
os.system("rm -rf ./insert_res.txt")
|
||||
os.system("rm -rf 5-taos-tools/taosbenchmark/*.py.sql")
|
||||
os.system("rm -rf ./querySystemInfo*")
|
||||
os.system("rm -rf ./query_res*")
|
||||
os.system("rm -rf ./all_query*")
|
||||
os.system("rm -rf ./test_query_res0.txt")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -18,3 +18,4 @@ python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py
|
|||
#python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_telnet_alltypes.py
|
||||
#python3 ./test.py -f 5-taos-tools/taosbenchmark/taosadapter_json.py
|
||||
#python3 ./test.py -f 5-taos-tools/taosbenchmark/telnet_tcp.py
|
||||
python3 ./test.py -f 5-taos-tools/taosbenchmark/taosdemoTestQueryWithJson.py -R
|
||||
|
|
|
@ -24,6 +24,7 @@ import socket
|
|||
import threading
|
||||
|
||||
import toml
|
||||
|
||||
sys.path.append("../pytest")
|
||||
from util.log import *
|
||||
from util.dnodes import *
|
||||
|
@ -34,14 +35,16 @@ from util.taosadapter import *
|
|||
import taos
|
||||
import taosrest
|
||||
|
||||
|
||||
def checkRunTimeError():
|
||||
import win32gui
|
||||
|
||||
timeCount = 0
|
||||
while 1:
|
||||
time.sleep(1)
|
||||
timeCount = timeCount + 1
|
||||
print("checkRunTimeError",timeCount)
|
||||
if (timeCount>600):
|
||||
print("checkRunTimeError", timeCount)
|
||||
if timeCount > 600:
|
||||
print("stop the test.")
|
||||
os.system("TASKKILL /F /IM taosd.exe")
|
||||
os.system("TASKKILL /F /IM taos.exe")
|
||||
|
@ -53,6 +56,7 @@ def checkRunTimeError():
|
|||
if hwnd:
|
||||
os.system("TASKKILL /F /IM taosd.exe")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
fileName = "all"
|
||||
|
@ -73,102 +77,124 @@ if __name__ == "__main__":
|
|||
createDnodeNums = 1
|
||||
restful = False
|
||||
replicaVar = 1
|
||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RD:n:', [
|
||||
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','adaptercfgupdate','replicaVar'])
|
||||
opts, args = getopt.gnu_getopt(
|
||||
sys.argv[1:],
|
||||
"f:p:m:l:scghrd:k:e:N:M:Q:C:RD:n:",
|
||||
[
|
||||
"file=",
|
||||
"path=",
|
||||
"master",
|
||||
"logSql",
|
||||
"stop",
|
||||
"cluster",
|
||||
"valgrind",
|
||||
"help",
|
||||
"restart",
|
||||
"updateCfgDict",
|
||||
"killv",
|
||||
"execCmd",
|
||||
"dnodeNums",
|
||||
"mnodeNums",
|
||||
"queryPolicy",
|
||||
"createDnodeNums",
|
||||
"restful",
|
||||
"adaptercfgupdate",
|
||||
"replicaVar",
|
||||
],
|
||||
)
|
||||
for key, value in opts:
|
||||
if key in ['-h', '--help']:
|
||||
tdLog.printNoPrefix(
|
||||
'A collection of test cases written using Python')
|
||||
tdLog.printNoPrefix('-f Name of test case file written by Python')
|
||||
tdLog.printNoPrefix('-p Deploy Path for Simulator')
|
||||
tdLog.printNoPrefix('-m Master Ip for Simulator')
|
||||
tdLog.printNoPrefix('-l <True:False> logSql Flag')
|
||||
tdLog.printNoPrefix('-s stop All dnodes')
|
||||
tdLog.printNoPrefix('-c Test Cluster Flag')
|
||||
tdLog.printNoPrefix('-g valgrind Test Flag')
|
||||
tdLog.printNoPrefix('-r taosd restart test')
|
||||
tdLog.printNoPrefix('-d update cfg dict, base64 json str')
|
||||
tdLog.printNoPrefix('-k not kill valgrind processer')
|
||||
tdLog.printNoPrefix('-e eval str to run')
|
||||
tdLog.printNoPrefix('-N start dnodes numbers in clusters')
|
||||
tdLog.printNoPrefix('-M create mnode numbers in clusters')
|
||||
tdLog.printNoPrefix('-Q set queryPolicy in one dnode')
|
||||
tdLog.printNoPrefix('-C create Dnode Numbers in one cluster')
|
||||
tdLog.printNoPrefix('-R restful realization form')
|
||||
tdLog.printNoPrefix('-D taosadapter update cfg dict ')
|
||||
tdLog.printNoPrefix('-n the number of replicas')
|
||||
if key in ["-h", "--help"]:
|
||||
tdLog.printNoPrefix("A collection of test cases written using Python")
|
||||
tdLog.printNoPrefix("-f Name of test case file written by Python")
|
||||
tdLog.printNoPrefix("-p Deploy Path for Simulator")
|
||||
tdLog.printNoPrefix("-m Master Ip for Simulator")
|
||||
tdLog.printNoPrefix("-l <True:False> logSql Flag")
|
||||
tdLog.printNoPrefix("-s stop All dnodes")
|
||||
tdLog.printNoPrefix("-c Test Cluster Flag")
|
||||
tdLog.printNoPrefix("-g valgrind Test Flag")
|
||||
tdLog.printNoPrefix("-r taosd restart test")
|
||||
tdLog.printNoPrefix("-d update cfg dict, base64 json str")
|
||||
tdLog.printNoPrefix("-k not kill valgrind processer")
|
||||
tdLog.printNoPrefix("-e eval str to run")
|
||||
tdLog.printNoPrefix("-N start dnodes numbers in clusters")
|
||||
tdLog.printNoPrefix("-M create mnode numbers in clusters")
|
||||
tdLog.printNoPrefix("-Q set queryPolicy in one dnode")
|
||||
tdLog.printNoPrefix("-C create Dnode Numbers in one cluster")
|
||||
tdLog.printNoPrefix("-R restful realization form")
|
||||
tdLog.printNoPrefix("-D taosadapter update cfg dict ")
|
||||
tdLog.printNoPrefix("-n the number of replicas")
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-r', '--restart']:
|
||||
if key in ["-r", "--restart"]:
|
||||
restart = True
|
||||
|
||||
if key in ['-f', '--file']:
|
||||
if key in ["-f", "--file"]:
|
||||
fileName = value
|
||||
|
||||
if key in ['-p', '--path']:
|
||||
if key in ["-p", "--path"]:
|
||||
deployPath = value
|
||||
|
||||
if key in ['-m', '--master']:
|
||||
if key in ["-m", "--master"]:
|
||||
masterIp = value
|
||||
|
||||
if key in ['-l', '--logSql']:
|
||||
if (value.upper() == "TRUE"):
|
||||
if key in ["-l", "--logSql"]:
|
||||
if value.upper() == "TRUE":
|
||||
logSql = True
|
||||
elif (value.upper() == "FALSE"):
|
||||
elif value.upper() == "FALSE":
|
||||
logSql = False
|
||||
else:
|
||||
tdLog.printNoPrefix("logSql value %s is invalid" % logSql)
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-c', '--cluster']:
|
||||
if key in ["-c", "--cluster"]:
|
||||
testCluster = True
|
||||
|
||||
if key in ['-g', '--valgrind']:
|
||||
if key in ["-g", "--valgrind"]:
|
||||
valgrind = 1
|
||||
|
||||
if key in ['-s', '--stop']:
|
||||
if key in ["-s", "--stop"]:
|
||||
stop = 1
|
||||
|
||||
if key in ['-d', '--updateCfgDict']:
|
||||
if key in ["-d", "--updateCfgDict"]:
|
||||
try:
|
||||
updateCfgDict = eval(base64.b64decode(value.encode()).decode())
|
||||
except:
|
||||
print('updateCfgDict convert fail.')
|
||||
print("updateCfgDict convert fail.")
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-k', '--killValgrind']:
|
||||
if key in ["-k", "--killValgrind"]:
|
||||
killValgrind = 0
|
||||
|
||||
if key in ['-e', '--execCmd']:
|
||||
if key in ["-e", "--execCmd"]:
|
||||
try:
|
||||
execCmd = base64.b64decode(value.encode()).decode()
|
||||
except:
|
||||
print('execCmd run fail.')
|
||||
print("execCmd run fail.")
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-N', '--dnodeNums']:
|
||||
if key in ["-N", "--dnodeNums"]:
|
||||
dnodeNums = value
|
||||
|
||||
if key in ['-M', '--mnodeNums']:
|
||||
if key in ["-M", "--mnodeNums"]:
|
||||
mnodeNums = value
|
||||
|
||||
if key in ['-Q', '--queryPolicy']:
|
||||
if key in ["-Q", "--queryPolicy"]:
|
||||
queryPolicy = value
|
||||
|
||||
if key in ['-C', '--createDnodeNums']:
|
||||
if key in ["-C", "--createDnodeNums"]:
|
||||
createDnodeNums = value
|
||||
|
||||
if key in ['-R', '--restful']:
|
||||
if key in ["-R", "--restful"]:
|
||||
restful = True
|
||||
|
||||
if key in ['-D', '--adaptercfgupdate']:
|
||||
if key in ["-D", "--adaptercfgupdate"]:
|
||||
try:
|
||||
adaptercfgupdate = eval(base64.b64decode(value.encode()).decode())
|
||||
except:
|
||||
print('adapter cfg update convert fail.')
|
||||
print("adapter cfg update convert fail.")
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-n', '--replicaVar']:
|
||||
if key in ["-n", "--replicaVar"]:
|
||||
replicaVar = value
|
||||
|
||||
if not execCmd == "":
|
||||
|
@ -180,18 +206,21 @@ if __name__ == "__main__":
|
|||
exec(execCmd)
|
||||
quit()
|
||||
|
||||
if (stop != 0):
|
||||
if (valgrind == 0):
|
||||
if stop != 0:
|
||||
if valgrind == 0:
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled
|
||||
killCmd = (
|
||||
"ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1"
|
||||
% toBeKilled
|
||||
)
|
||||
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
||||
while(processID):
|
||||
while processID:
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
@ -218,7 +247,7 @@ if __name__ == "__main__":
|
|||
# psCmd = f"pgrep {toBeKilled}"
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
||||
while(processID):
|
||||
while processID:
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
@ -233,9 +262,9 @@ if __name__ == "__main__":
|
|||
fuserCmd = f"fuser -k -n tcp {port}"
|
||||
os.system(fuserCmd)
|
||||
|
||||
tdLog.info('stop taosadapter')
|
||||
tdLog.info("stop taosadapter")
|
||||
|
||||
tdLog.info('stop All dnodes')
|
||||
tdLog.info("stop All dnodes")
|
||||
|
||||
if masterIp == "":
|
||||
host = socket.gethostname()
|
||||
|
@ -247,33 +276,40 @@ if __name__ == "__main__":
|
|||
host = masterIp
|
||||
|
||||
tdLog.info("Procedures for tdengine deployed in %s" % (host))
|
||||
if platform.system().lower() == 'windows':
|
||||
if platform.system().lower() == "windows":
|
||||
fileName = fileName.replace("/", os.sep)
|
||||
if (masterIp == "" and not fileName == "0-others\\udf_create.py"):
|
||||
threading.Thread(target=checkRunTimeError,daemon=True).start()
|
||||
if masterIp == "" and not fileName == "0-others\\udf_create.py":
|
||||
threading.Thread(target=checkRunTimeError, daemon=True).start()
|
||||
tdLog.info("Procedures for testing self-deployment")
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
tdDnodes.setValgrind(valgrind)
|
||||
tdDnodes.stopAll()
|
||||
key_word = 'tdCases.addWindows'
|
||||
key_word = "tdCases.addWindows"
|
||||
is_test_framework = 0
|
||||
try:
|
||||
if key_word in open(fileName, encoding='UTF-8').read():
|
||||
if key_word in open(fileName, encoding="UTF-8").read():
|
||||
is_test_framework = 1
|
||||
except Exception as r:
|
||||
print(r)
|
||||
updateCfgDictStr = ''
|
||||
updateCfgDictStr = ""
|
||||
# adapter_cfg_dict_str = ''
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')):
|
||||
if (json.dumps(updateCfgDict) == "{}") and hasattr(
|
||||
ucase, "updatecfgDict"
|
||||
):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
|
||||
if ((json.dumps(adapter_cfg_dict) == '{}') and hasattr(ucase, 'taosadapter_cfg_dict')):
|
||||
updateCfgDictStr = (
|
||||
"-d %s"
|
||||
% base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
|
||||
)
|
||||
if (json.dumps(adapter_cfg_dict) == "{}") and hasattr(
|
||||
ucase, "taosadapter_cfg_dict"
|
||||
):
|
||||
adapter_cfg_dict = ucase.taosadapter_cfg_dict
|
||||
# adapter_cfg_dict_str = f"-D {base64.b64encode(toml.dumps(adapter_cfg_dict).encode()).decode()}"
|
||||
except Exception as r:
|
||||
|
@ -284,8 +320,8 @@ if __name__ == "__main__":
|
|||
tAdapter.init(deployPath, masterIp)
|
||||
tAdapter.stop(force_kill=True)
|
||||
|
||||
if dnodeNums == 1 :
|
||||
tdDnodes.deploy(1,updateCfgDict)
|
||||
if dnodeNums == 1:
|
||||
tdDnodes.deploy(1, updateCfgDict)
|
||||
tdDnodes.start(1)
|
||||
tdCases.logSql(logSql)
|
||||
if restful:
|
||||
|
@ -293,11 +329,11 @@ if __name__ == "__main__":
|
|||
tAdapter.start()
|
||||
|
||||
if queryPolicy != 1:
|
||||
queryPolicy=int(queryPolicy)
|
||||
queryPolicy = int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
|
||||
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("create qnode on dnode 1")
|
||||
|
@ -305,22 +341,29 @@ if __name__ == "__main__":
|
|||
cursor.execute("show local variables")
|
||||
res = cursor.fetchall()
|
||||
for i in range(cursor.rowcount):
|
||||
if res[i][0] == "queryPolicy" :
|
||||
if res[i][0] == "queryPolicy":
|
||||
if int(res[i][1]) == int(queryPolicy):
|
||||
tdLog.success(f'alter queryPolicy to {queryPolicy} successfully')
|
||||
tdLog.success(
|
||||
f"alter queryPolicy to {queryPolicy} successfully"
|
||||
)
|
||||
else:
|
||||
tdLog.debug(res)
|
||||
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
|
||||
else :
|
||||
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
|
||||
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums,mnodeNums=mnodeNums)
|
||||
else:
|
||||
tdLog.debug(
|
||||
"create an cluster with %s nodes and make %s dnode as independent mnode"
|
||||
% (dnodeNums, mnodeNums)
|
||||
)
|
||||
dnodeslist = cluster.configure_cluster(
|
||||
dnodeNums=dnodeNums, mnodeNums=mnodeNums
|
||||
)
|
||||
tdDnodes = ClusterDnodes(dnodeslist)
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
tdDnodes.setValgrind(valgrind)
|
||||
tdDnodes.stopAll()
|
||||
for dnode in tdDnodes.dnodes:
|
||||
tdDnodes.deploy(dnode.index,{})
|
||||
tdDnodes.deploy(dnode.index, {})
|
||||
for dnode in tdDnodes.dnodes:
|
||||
tdDnodes.starttaosd(dnode.index)
|
||||
tdCases.logSql(logSql)
|
||||
|
@ -330,26 +373,26 @@ if __name__ == "__main__":
|
|||
tAdapter.start()
|
||||
|
||||
if not restful:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
|
||||
else:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041")
|
||||
# tdLog.info(tdDnodes.getSimCfgPath(),host)
|
||||
if createDnodeNums == 1:
|
||||
createDnodeNums=dnodeNums
|
||||
createDnodeNums = dnodeNums
|
||||
else:
|
||||
createDnodeNums=createDnodeNums
|
||||
cluster.create_dnode(conn,createDnodeNums)
|
||||
createDnodeNums = createDnodeNums
|
||||
cluster.create_dnode(conn, createDnodeNums)
|
||||
try:
|
||||
if cluster.check_dnode(conn) :
|
||||
if cluster.check_dnode(conn):
|
||||
print("check dnode ready")
|
||||
except Exception as r:
|
||||
print(r)
|
||||
if queryPolicy != 1:
|
||||
queryPolicy=int(queryPolicy)
|
||||
queryPolicy = int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
|
||||
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("create qnode on dnode 1")
|
||||
|
@ -357,18 +400,20 @@ if __name__ == "__main__":
|
|||
cursor.execute("show local variables")
|
||||
res = cursor.fetchall()
|
||||
for i in range(cursor.rowcount):
|
||||
if res[i][0] == "queryPolicy" :
|
||||
if res[i][0] == "queryPolicy":
|
||||
if int(res[i][1]) == int(queryPolicy):
|
||||
tdLog.success(f'alter queryPolicy to {queryPolicy} successfully')
|
||||
tdLog.success(
|
||||
f"alter queryPolicy to {queryPolicy} successfully"
|
||||
)
|
||||
else:
|
||||
tdLog.debug(res)
|
||||
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
|
||||
|
||||
if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True:
|
||||
if ucase is not None and hasattr(ucase, "noConn") and ucase.noConn == True:
|
||||
conn = None
|
||||
else:
|
||||
if not restful:
|
||||
conn = taos.connect(host="%s"%(host), config=tdDnodes.sim.getCfgDir())
|
||||
conn = taos.connect(host="%s" % (host), config=tdDnodes.sim.getCfgDir())
|
||||
else:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041")
|
||||
if is_test_framework:
|
||||
|
@ -382,7 +427,7 @@ if __name__ == "__main__":
|
|||
tdDnodes.setValgrind(valgrind)
|
||||
tdDnodes.stopAll()
|
||||
is_test_framework = 0
|
||||
key_word = 'tdCases.addLinux'
|
||||
key_word = "tdCases.addLinux"
|
||||
try:
|
||||
if key_word in open(fileName).read():
|
||||
is_test_framework = 1
|
||||
|
@ -393,9 +438,9 @@ if __name__ == "__main__":
|
|||
uModule = importlib.import_module(moduleName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
if (json.dumps(updateCfgDict) == '{}'):
|
||||
if json.dumps(updateCfgDict) == "{}":
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
if (json.dumps(adapter_cfg_dict) == '{}'):
|
||||
if json.dumps(adapter_cfg_dict) == "{}":
|
||||
adapter_cfg_dict = ucase.taosadapter_cfg_dict
|
||||
except:
|
||||
pass
|
||||
|
@ -404,8 +449,8 @@ if __name__ == "__main__":
|
|||
tAdapter.init(deployPath, masterIp)
|
||||
tAdapter.stop(force_kill=True)
|
||||
|
||||
if dnodeNums == 1 :
|
||||
tdDnodes.deploy(1,updateCfgDict)
|
||||
if dnodeNums == 1:
|
||||
tdDnodes.deploy(1, updateCfgDict)
|
||||
tdDnodes.start(1)
|
||||
tdCases.logSql(logSql)
|
||||
|
||||
|
@ -414,9 +459,9 @@ if __name__ == "__main__":
|
|||
tAdapter.start()
|
||||
|
||||
if queryPolicy != 1:
|
||||
queryPolicy=int(queryPolicy)
|
||||
queryPolicy = int(queryPolicy)
|
||||
if not restful:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
|
||||
else:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041")
|
||||
# tdSql.init(conn.cursor())
|
||||
|
@ -437,23 +482,30 @@ if __name__ == "__main__":
|
|||
cursor.execute("show local variables")
|
||||
res = cursor.fetchall()
|
||||
for i in range(cursor.rowcount):
|
||||
if res[i][0] == "queryPolicy" :
|
||||
if res[i][0] == "queryPolicy":
|
||||
if int(res[i][1]) == int(queryPolicy):
|
||||
tdLog.success(f'alter queryPolicy to {queryPolicy} successfully')
|
||||
tdLog.success(
|
||||
f"alter queryPolicy to {queryPolicy} successfully"
|
||||
)
|
||||
else:
|
||||
tdLog.debug(res)
|
||||
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
|
||||
|
||||
else :
|
||||
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
|
||||
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums,mnodeNums=mnodeNums)
|
||||
else:
|
||||
tdLog.debug(
|
||||
"create an cluster with %s nodes and make %s dnode as independent mnode"
|
||||
% (dnodeNums, mnodeNums)
|
||||
)
|
||||
dnodeslist = cluster.configure_cluster(
|
||||
dnodeNums=dnodeNums, mnodeNums=mnodeNums
|
||||
)
|
||||
tdDnodes = ClusterDnodes(dnodeslist)
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
tdDnodes.setValgrind(valgrind)
|
||||
tdDnodes.stopAll()
|
||||
for dnode in tdDnodes.dnodes:
|
||||
tdDnodes.deploy(dnode.index,{})
|
||||
tdDnodes.deploy(dnode.index, {})
|
||||
for dnode in tdDnodes.dnodes:
|
||||
tdDnodes.starttaosd(dnode.index)
|
||||
tdCases.logSql(logSql)
|
||||
|
@ -463,27 +515,27 @@ if __name__ == "__main__":
|
|||
tAdapter.start()
|
||||
|
||||
if not restful:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
|
||||
else:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041")
|
||||
print(tdDnodes.getSimCfgPath(),host)
|
||||
print(tdDnodes.getSimCfgPath(), host)
|
||||
if createDnodeNums == 1:
|
||||
createDnodeNums=dnodeNums
|
||||
createDnodeNums = dnodeNums
|
||||
else:
|
||||
createDnodeNums=createDnodeNums
|
||||
cluster.create_dnode(conn,createDnodeNums)
|
||||
createDnodeNums = createDnodeNums
|
||||
cluster.create_dnode(conn, createDnodeNums)
|
||||
try:
|
||||
if cluster.check_dnode(conn) :
|
||||
if cluster.check_dnode(conn):
|
||||
print("check dnode ready")
|
||||
except Exception as r:
|
||||
print(r)
|
||||
|
||||
if queryPolicy != 1:
|
||||
queryPolicy=int(queryPolicy)
|
||||
queryPolicy = int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
|
||||
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("create qnode on dnode 1")
|
||||
|
@ -491,14 +543,15 @@ if __name__ == "__main__":
|
|||
cursor.execute("show local variables")
|
||||
res = cursor.fetchall()
|
||||
for i in range(cursor.rowcount):
|
||||
if res[i][0] == "queryPolicy" :
|
||||
if res[i][0] == "queryPolicy":
|
||||
if int(res[i][1]) == int(queryPolicy):
|
||||
tdLog.success(f'alter queryPolicy to {queryPolicy} successfully')
|
||||
tdLog.success(
|
||||
f"alter queryPolicy to {queryPolicy} successfully"
|
||||
)
|
||||
else:
|
||||
tdLog.debug(res)
|
||||
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
|
||||
|
||||
|
||||
if testCluster:
|
||||
tdLog.info("Procedures for testing cluster")
|
||||
if fileName == "all":
|
||||
|
@ -508,7 +561,7 @@ if __name__ == "__main__":
|
|||
else:
|
||||
tdLog.info("Procedures for testing self-deployment")
|
||||
if not restful:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
|
||||
else:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041")
|
||||
|
||||
|
@ -527,7 +580,7 @@ if __name__ == "__main__":
|
|||
tdDnodes.start(1)
|
||||
time.sleep(1)
|
||||
if not restful:
|
||||
conn = taos.connect( host, config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(host, config=tdDnodes.getSimCfgPath())
|
||||
else:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041")
|
||||
tdLog.info("Procedures for tdengine deployed in %s" % (host))
|
||||
|
|
|
@ -27,11 +27,11 @@ dotnet run --project optsTelnet/optstelnet.csproj
|
|||
taos -s "drop database if exists test"
|
||||
dotnet run --project optsJSON/optsJSON.csproj
|
||||
|
||||
taos -s "create database if exists test"
|
||||
dotnet run --project wsConnect/wsConnect.csproj
|
||||
dotnet run --project wsInsert/wsInsert.csproj
|
||||
dotnet run --project wsStmt/wsStmt.csproj
|
||||
dotnet run --project wsQuery/wsQuery.csproj
|
||||
taos -s "create database if not exists test"
|
||||
# dotnet run --project wsConnect/wsConnect.csproj
|
||||
# dotnet run --project wsInsert/wsInsert.csproj
|
||||
# dotnet run --project wsStmt/wsStmt.csproj
|
||||
# dotnet run --project wsQuery/wsQuery.csproj
|
||||
|
||||
taos -s "drop database if exists test"
|
||||
taos -s "drop database if exists power"
|
|
@ -21,43 +21,52 @@ import argparse
|
|||
import datetime
|
||||
import string
|
||||
from requests.auth import HTTPBasicAuth
|
||||
func_list=['avg','count','twa','sum','stddev','leastsquares','min',
|
||||
'max','first','last','top','bottom','percentile','apercentile',
|
||||
'last_row','diff','spread','distinct']
|
||||
condition_list=[
|
||||
|
||||
func_list = ['abs', 'acos', 'asin', 'atan', 'ceil', 'cos', 'floor', 'log', 'pow', 'round', 'sin', 'sqrt', 'tan',
|
||||
'char_length', 'concat', 'concat_ws', 'length', 'lower', 'ltrim', 'rtrim', 'substr', 'upper',
|
||||
'cast', 'to_iso8601', 'to_json', 'to_unixtimestamp', 'now', 'timediff', 'timetruncate', 'timezone', 'today',
|
||||
'apercentile', 'avg', 'count', 'elapsed', 'leastsquares', 'spread', 'stddev', 'sum', 'hyperloglog', 'histogram', 'percentile',
|
||||
'bottom', 'first', 'interp', 'last', 'last_row', 'max', 'min', 'mode', 'sample', 'tail', 'top', 'unique',
|
||||
'csum', 'derivative', 'diff', 'irate', 'mavg', 'statecount', 'stateduration', 'twa',
|
||||
'database', 'client_version', 'server_version', 'server_status']
|
||||
|
||||
condition_list = [
|
||||
"where _c0 > now -10d ",
|
||||
'interval(10s)',
|
||||
'limit 10',
|
||||
'group by',
|
||||
'partition by',
|
||||
'order by',
|
||||
'fill(null)'
|
||||
|
||||
]
|
||||
where_list = ['_c0>now-10d',' <50','like',' is null','in']
|
||||
|
||||
where_list = ['_c0>now-10d', ' <50', 'like', ' is null', 'in']
|
||||
|
||||
|
||||
class ConcurrentInquiry:
|
||||
# def __init__(self,ts=1500000001000,host='127.0.0.1',user='root',password='taosdata',dbname='test',
|
||||
# stb_prefix='st',subtb_prefix='t',n_Therads=10,r_Therads=10,probabilities=0.05,loop=5,
|
||||
# stableNum = 2,subtableNum = 1000,insertRows = 100):
|
||||
def __init__(self,ts,host,user,password,dbname,
|
||||
stb_prefix,subtb_prefix,n_Therads,r_Therads,probabilities,loop,
|
||||
stableNum ,subtableNum ,insertRows ,mix_table, replay):
|
||||
def __init__(self, ts, host, user, password, dbname,
|
||||
stb_prefix, subtb_prefix, n_Therads, r_Therads, probabilities, loop,
|
||||
stableNum, subtableNum, insertRows, mix_table, replay):
|
||||
self.n_numOfTherads = n_Therads
|
||||
self.r_numOfTherads = r_Therads
|
||||
self.ts=ts
|
||||
self.ts = ts
|
||||
self.host = host
|
||||
self.user = user
|
||||
self.password = password
|
||||
self.dbname=dbname
|
||||
self.dbname = dbname
|
||||
self.stb_prefix = stb_prefix
|
||||
self.subtb_prefix = subtb_prefix
|
||||
self.stb_list=[]
|
||||
self.subtb_list=[]
|
||||
self.stb_stru_list=[]
|
||||
self.subtb_stru_list=[]
|
||||
self.stb_tag_list=[]
|
||||
self.subtb_tag_list=[]
|
||||
self.probabilities = [1-probabilities,probabilities]
|
||||
self.ifjoin = [1,0]
|
||||
self.stb_list = []
|
||||
self.subtb_list = []
|
||||
self.stb_stru_list = []
|
||||
self.subtb_stru_list = []
|
||||
self.stb_tag_list = []
|
||||
self.subtb_tag_list = []
|
||||
self.probabilities = [1-probabilities, probabilities]
|
||||
self.ifjoin = [1, 0]
|
||||
self.loop = loop
|
||||
self.stableNum = stableNum
|
||||
self.subtableNum = subtableNum
|
||||
|
@ -66,252 +75,275 @@ class ConcurrentInquiry:
|
|||
self.max_ts = datetime.datetime.now()
|
||||
self.min_ts = datetime.datetime.now() - datetime.timedelta(days=5)
|
||||
self.replay = replay
|
||||
def SetThreadsNum(self,num):
|
||||
self.numOfTherads=num
|
||||
|
||||
def ret_fcol(self,cl,sql): #返回结果的第一列
|
||||
def SetThreadsNum(self, num):
|
||||
self.numOfTherads = num
|
||||
|
||||
def ret_fcol(self, cl, sql): # 返回结果的第一列
|
||||
cl.execute(sql)
|
||||
fcol_list=[]
|
||||
fcol_list = []
|
||||
for data in cl:
|
||||
fcol_list.append(data[0])
|
||||
return fcol_list
|
||||
|
||||
def r_stb_list(self,cl): #返回超级表列表
|
||||
sql='show '+self.dbname+'.stables'
|
||||
self.stb_list=self.ret_fcol(cl,sql)
|
||||
def r_stb_list(self, cl): # 返回超级表列表
|
||||
sql = 'show '+self.dbname+'.stables'
|
||||
self.stb_list = self.ret_fcol(cl, sql)
|
||||
|
||||
def r_subtb_list(self,cl,stablename): #每个超级表返回2个子表
|
||||
sql='select tbname from '+self.dbname+'.'+stablename+' limit 2;'
|
||||
self.subtb_list+=self.ret_fcol(cl,sql)
|
||||
def r_subtb_list(self, cl, stablename): # 每个超级表返回2个子表
|
||||
sql = 'select tbname from '+self.dbname+'.'+stablename+' limit 2;'
|
||||
self.subtb_list += self.ret_fcol(cl, sql)
|
||||
|
||||
def cal_struct(self,cl,tbname): #查看表结构
|
||||
tb=[]
|
||||
tag=[]
|
||||
sql='describe '+self.dbname+'.'+tbname+';'
|
||||
def cal_struct(self, cl, tbname): # 查看表结构
|
||||
tb = []
|
||||
tag = []
|
||||
sql = 'describe '+self.dbname+'.'+tbname+';'
|
||||
cl.execute(sql)
|
||||
for data in cl:
|
||||
if data[3]:
|
||||
tag.append(data[0])
|
||||
else:
|
||||
tb.append(data[0])
|
||||
return tb,tag
|
||||
return tb, tag
|
||||
|
||||
def r_stb_stru(self,cl): #获取所有超级表的表结构
|
||||
def r_stb_stru(self, cl): # 获取所有超级表的表结构
|
||||
for i in self.stb_list:
|
||||
tb,tag=self.cal_struct(cl,i)
|
||||
tb, tag = self.cal_struct(cl, i)
|
||||
self.stb_stru_list.append(tb)
|
||||
self.stb_tag_list.append(tag)
|
||||
|
||||
def r_subtb_stru(self,cl): #返回所有子表的表结构
|
||||
def r_subtb_stru(self, cl): # 返回所有子表的表结构
|
||||
for i in self.subtb_list:
|
||||
tb,tag=self.cal_struct(cl,i)
|
||||
tb, tag = self.cal_struct(cl, i)
|
||||
self.subtb_stru_list.append(tb)
|
||||
self.subtb_tag_list.append(tag)
|
||||
|
||||
def get_timespan(self,cl): #获取时间跨度(仅第一个超级表)
|
||||
sql = 'select first(_c0),last(_c0) from ' + self.dbname + '.' + self.stb_list[0] + ';'
|
||||
def get_timespan(self, cl): # 获取时间跨度(仅第一个超级表)
|
||||
sql = 'select first(_c0),last(_c0) from ' + \
|
||||
self.dbname + '.' + self.stb_list[0] + ';'
|
||||
print(sql)
|
||||
cl.execute(sql)
|
||||
for data in cl:
|
||||
self.max_ts = data[1]
|
||||
self.min_ts = data[0]
|
||||
|
||||
def get_full(self): #获取所有的表、表结构
|
||||
def get_full(self): # 获取所有的表、表结构
|
||||
host = self.host
|
||||
user = self.user
|
||||
password = self.password
|
||||
conn = taos.connect(
|
||||
host,
|
||||
user,
|
||||
password,
|
||||
host='%s' % host,
|
||||
user='%s' % user,
|
||||
password='%s' % password,
|
||||
)
|
||||
cl = conn.cursor()
|
||||
self.r_stb_list(cl)
|
||||
for i in self.stb_list:
|
||||
self.r_subtb_list(cl,i)
|
||||
self.r_subtb_list(cl, i)
|
||||
self.r_stb_stru(cl)
|
||||
self.r_subtb_stru(cl)
|
||||
self.get_timespan(cl)
|
||||
cl.close()
|
||||
conn.close()
|
||||
|
||||
#query condition
|
||||
def con_where(self,tlist,col_list,tag_list):
|
||||
l=[]
|
||||
for i in range(random.randint(0,len(tlist))):
|
||||
# query condition
|
||||
def con_where(self, tlist, col_list, tag_list):
|
||||
l = []
|
||||
for i in range(random.randint(0, len(tlist))):
|
||||
c = random.choice(where_list)
|
||||
if c == '_c0>now-10d':
|
||||
rdate = self.min_ts + (self.max_ts - self.min_ts)/10 * random.randint(-11,11)
|
||||
conlist = ' _c0 ' + random.choice(['<','>','>=','<=','<>']) + "'" + str(rdate) + "'"
|
||||
rdate = self.min_ts + \
|
||||
(self.max_ts - self.min_ts)/10 * random.randint(-11, 11)
|
||||
conlist = ' _c0 ' + \
|
||||
random.choice(['<', '>', '>=', '<=', '<>']
|
||||
) + "'" + str(rdate) + "'"
|
||||
if self.random_pick():
|
||||
l.append(conlist)
|
||||
else: l.append(c)
|
||||
else:
|
||||
l.append(c)
|
||||
elif '<50' in c:
|
||||
conlist = ' ' + random.choice(tlist) + random.choice(['<','>','>=','<=','<>']) + str(random.randrange(-100,100))
|
||||
conlist = ' ' + random.choice(tlist) + random.choice(
|
||||
['<', '>', '>=', '<=', '<>']) + str(random.randrange(-100, 100))
|
||||
l.append(conlist)
|
||||
elif 'is null' in c:
|
||||
conlist = ' ' + random.choice(tlist) + random.choice([' is null',' is not null'])
|
||||
conlist = ' ' + \
|
||||
random.choice(tlist) + \
|
||||
random.choice([' is null', ' is not null'])
|
||||
l.append(conlist)
|
||||
elif 'in' in c:
|
||||
in_list = []
|
||||
temp = []
|
||||
for i in range(random.randint(0,100)):
|
||||
temp.append(random.randint(-10000,10000))
|
||||
for i in range(random.randint(0, 100)):
|
||||
temp.append(random.randint(-10000, 10000))
|
||||
temp = (str(i) for i in temp)
|
||||
in_list.append(temp)
|
||||
temp1 = []
|
||||
for i in range(random.randint(0,100)):
|
||||
temp1.append("'" + ''.join(random.sample(string.ascii_letters, random.randint(0,10))) + "'")
|
||||
for i in range(random.randint(0, 100)):
|
||||
temp1.append(
|
||||
"'" + ''.join(random.sample(string.ascii_letters, random.randint(0, 10))) + "'")
|
||||
in_list.append(temp1)
|
||||
in_list.append(['NULL','NULL'])
|
||||
conlist = ' ' + random.choice(tlist) + ' in (' + ','.join(random.choice(in_list)) + ')'
|
||||
in_list.append(['NULL', 'NULL'])
|
||||
conlist = ' ' + \
|
||||
random.choice(tlist) + ' in (' + \
|
||||
','.join(random.choice(in_list)) + ')'
|
||||
l.append(conlist)
|
||||
else:
|
||||
s_all = string.ascii_letters
|
||||
conlist = ' ' + random.choice(tlist) + " like \'%" + random.choice(s_all) + "%\' "
|
||||
conlist = ' ' + \
|
||||
random.choice(tlist) + " like \'%" + \
|
||||
random.choice(s_all) + "%\' "
|
||||
l.append(conlist)
|
||||
return 'where '+random.choice([' and ',' or ']).join(l)
|
||||
return 'where '+random.choice([' and ', ' or ']).join(l)
|
||||
|
||||
def con_interval(self,tlist,col_list,tag_list):
|
||||
interval = 'interval(' + str(random.randint(0,20)) + random.choice(['a','s','d','w','n','y']) + ')'
|
||||
def con_interval(self, tlist, col_list, tag_list):
|
||||
interval = 'interval(' + str(random.randint(0, 20)) + \
|
||||
random.choice(['a', 's', 'd', 'w', 'n', 'y']) + ')'
|
||||
return interval
|
||||
|
||||
def con_limit(self,tlist,col_list,tag_list):
|
||||
rand1 = str(random.randint(0,1000))
|
||||
rand2 = str(random.randint(0,1000))
|
||||
return random.choice(['limit ' + rand1,'limit ' + rand1 + ' offset '+rand2,
|
||||
' slimit ' + rand1,' slimit ' + rand1 + ' offset ' + rand2,'limit '+rand1 + ' slimit '+ rand2,
|
||||
'limit '+ rand1 + ' offset' + rand2 + ' slimit '+ rand1 + ' soffset ' + rand2 ])
|
||||
def con_limit(self, tlist, col_list, tag_list):
|
||||
rand1 = str(random.randint(0, 1000))
|
||||
rand2 = str(random.randint(0, 1000))
|
||||
return random.choice(['limit ' + rand1, 'limit ' + rand1 + ' offset '+rand2,
|
||||
' slimit ' + rand1, ' slimit ' + rand1 + ' offset ' +
|
||||
rand2, 'limit '+rand1 + ' slimit ' + rand2,
|
||||
'limit ' + rand1 + ' offset' + rand2 + ' slimit ' + rand1 + ' soffset ' + rand2])
|
||||
|
||||
def con_fill(self,tlist,col_list,tag_list):
|
||||
return random.choice(['fill(null)','fill(prev)','fill(none)','fill(LINEAR)'])
|
||||
def con_fill(self, tlist, col_list, tag_list):
|
||||
return random.choice(['fill(null)', 'fill(prev)', 'fill(none)', 'fill(LINEAR)'])
|
||||
|
||||
def con_group(self,tlist,col_list,tag_list):
|
||||
rand_tag = random.randint(0,5)
|
||||
rand_col = random.randint(0,1)
|
||||
def con_group(self, tlist, col_list, tag_list):
|
||||
rand_tag = random.randint(0, 5)
|
||||
rand_col = random.randint(0, 1)
|
||||
if len(tag_list):
|
||||
return 'group by '+','.join(random.sample(col_list,rand_col) + random.sample(tag_list,rand_tag))
|
||||
return 'group by '+','.join(random.sample(col_list, rand_col) + random.sample(tag_list, rand_tag))
|
||||
else:
|
||||
return 'group by '+','.join(random.sample(col_list,rand_col))
|
||||
return 'group by '+','.join(random.sample(col_list, rand_col))
|
||||
|
||||
def con_order(self,tlist,col_list,tag_list):
|
||||
def con_order(self, tlist, col_list, tag_list):
|
||||
return 'order by '+random.choice(tlist)
|
||||
|
||||
def con_state_window(self,tlist,col_list,tag_list):
|
||||
def con_state_window(self, tlist, col_list, tag_list):
|
||||
return 'state_window(' + random.choice(tlist + tag_list) + ')'
|
||||
|
||||
def con_session_window(self,tlist,col_list,tag_list):
|
||||
session_window = 'session_window(' + random.choice(tlist + tag_list) + ',' + str(random.randint(0,20)) + random.choice(['a','s','d','w','n','y']) + ')'
|
||||
def con_session_window(self, tlist, col_list, tag_list):
|
||||
session_window = 'session_window(' + random.choice(tlist + tag_list) + ',' + str(
|
||||
random.randint(0, 20)) + random.choice(['a', 's', 'd', 'w', 'n', 'y']) + ')'
|
||||
return session_window
|
||||
|
||||
def gen_subquery_sql(self):
|
||||
subsql ,col_num = self.gen_query_sql(1)
|
||||
subsql, col_num = self.gen_query_sql(1)
|
||||
if col_num == 0:
|
||||
return 0
|
||||
col_list=[]
|
||||
tag_list=[]
|
||||
col_list = []
|
||||
tag_list = []
|
||||
for i in range(col_num):
|
||||
col_list.append("taosd%d"%i)
|
||||
col_list.append("taosd%d" % i)
|
||||
|
||||
tlist=col_list+['abc'] #增加不存在的域'abc',是否会引起新bug
|
||||
con_rand=random.randint(0,len(condition_list))
|
||||
func_rand=random.randint(0,len(func_list))
|
||||
col_rand=random.randint(0,len(col_list))
|
||||
t_rand=random.randint(0,len(tlist))
|
||||
sql='select ' #select
|
||||
tlist = col_list+['abc'] # 增加不存在的域'abc',是否会引起新bug
|
||||
con_rand = random.randint(0, len(condition_list))
|
||||
func_rand = random.randint(0, len(func_list))
|
||||
col_rand = random.randint(0, len(col_list))
|
||||
t_rand = random.randint(0, len(tlist))
|
||||
sql = 'select ' # select
|
||||
random.shuffle(col_list)
|
||||
random.shuffle(func_list)
|
||||
sel_col_list=[]
|
||||
col_rand=random.randint(0,len(col_list))
|
||||
sel_col_list = []
|
||||
col_rand = random.randint(0, len(col_list))
|
||||
loop = 0
|
||||
for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数
|
||||
alias = ' as '+ 'sub%d ' % loop
|
||||
for i, j in zip(col_list[0:col_rand], func_list): # 决定每个被查询col的函数
|
||||
alias = ' as ' + 'sub%d ' % loop
|
||||
loop += 1
|
||||
pick_func = ''
|
||||
if j == 'leastsquares':
|
||||
pick_func=j+'('+i+',1,1)'
|
||||
pick_func = j+'('+i+',1,1)'
|
||||
elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile':
|
||||
pick_func=j+'('+i+',1)'
|
||||
pick_func = j+'('+i+',1)'
|
||||
else:
|
||||
pick_func=j+'('+i+')'
|
||||
if bool(random.getrandbits(1)) :
|
||||
pick_func+=alias
|
||||
pick_func = j+'('+i+')'
|
||||
if bool(random.getrandbits(1)):
|
||||
pick_func += alias
|
||||
sel_col_list.append(pick_func)
|
||||
if col_rand == 0:
|
||||
sql = sql + '*'
|
||||
else:
|
||||
sql=sql+','.join(sel_col_list) #select col & func
|
||||
sql = sql + ' from ('+ subsql +') '
|
||||
con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill,self.con_state_window,self.con_session_window]
|
||||
sel_con=random.sample(con_func,random.randint(0,len(con_func)))
|
||||
sel_con_list=[]
|
||||
sql = sql+','.join(sel_col_list) # select col & func
|
||||
sql = sql + ' from (' + subsql + ') '
|
||||
con_func = [self.con_where, self.con_interval, self.con_limit, self.con_group,
|
||||
self.con_order, self.con_fill, self.con_state_window, self.con_session_window]
|
||||
sel_con = random.sample(con_func, random.randint(0, len(con_func)))
|
||||
sel_con_list = []
|
||||
for i in sel_con:
|
||||
sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数
|
||||
sql+=' '.join(sel_con_list) # condition
|
||||
#print(sql)
|
||||
sel_con_list.append(i(tlist, col_list, tag_list)) # 获取对应的条件函数
|
||||
# condition
|
||||
sql += ' '.join(sel_con_list)
|
||||
# print(sql)
|
||||
return sql
|
||||
|
||||
def gen_query_sql(self,subquery=0): #生成查询语句
|
||||
tbi=random.randint(0,len(self.subtb_list)+len(self.stb_list)) #随机决定查询哪张表
|
||||
tbname=''
|
||||
col_list=[]
|
||||
tag_list=[]
|
||||
is_stb=0
|
||||
if tbi>len(self.stb_list) :
|
||||
tbi=tbi-len(self.stb_list)
|
||||
tbname=self.subtb_list[tbi-1]
|
||||
col_list=self.subtb_stru_list[tbi-1]
|
||||
tag_list=self.subtb_tag_list[tbi-1]
|
||||
def gen_query_sql(self, subquery=0): # 生成查询语句
|
||||
tbi = random.randint(0, len(self.subtb_list) +
|
||||
len(self.stb_list)) # 随机决定查询哪张表
|
||||
tbname = ''
|
||||
col_list = []
|
||||
tag_list = []
|
||||
is_stb = 0
|
||||
if tbi > len(self.stb_list):
|
||||
tbi = tbi-len(self.stb_list)
|
||||
tbname = self.subtb_list[tbi-1]
|
||||
col_list = self.subtb_stru_list[tbi-1]
|
||||
tag_list = self.subtb_tag_list[tbi-1]
|
||||
else:
|
||||
tbname=self.stb_list[tbi-1]
|
||||
col_list=self.stb_stru_list[tbi-1]
|
||||
tag_list=self.stb_tag_list[tbi-1]
|
||||
is_stb=1
|
||||
tlist=col_list+tag_list+['abc'] #增加不存在的域'abc',是否会引起新bug
|
||||
con_rand=random.randint(0,len(condition_list))
|
||||
func_rand=random.randint(0,len(func_list))
|
||||
col_rand=random.randint(0,len(col_list))
|
||||
tag_rand=random.randint(0,len(tag_list))
|
||||
t_rand=random.randint(0,len(tlist))
|
||||
sql='select ' #select
|
||||
tbname = self.stb_list[tbi-1]
|
||||
col_list = self.stb_stru_list[tbi-1]
|
||||
tag_list = self.stb_tag_list[tbi-1]
|
||||
is_stb = 1
|
||||
tlist = col_list+tag_list+['abc'] # 增加不存在的域'abc',是否会引起新bug
|
||||
con_rand = random.randint(0, len(condition_list))
|
||||
func_rand = random.randint(0, len(func_list))
|
||||
col_rand = random.randint(0, len(col_list))
|
||||
tag_rand = random.randint(0, len(tag_list))
|
||||
t_rand = random.randint(0, len(tlist))
|
||||
sql = 'select ' # select
|
||||
random.shuffle(col_list)
|
||||
random.shuffle(func_list)
|
||||
sel_col_list=[]
|
||||
col_rand=random.randint(0,len(col_list))
|
||||
sel_col_list = []
|
||||
col_rand = random.randint(0, len(col_list))
|
||||
loop = 0
|
||||
for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数
|
||||
alias = ' as '+ 'taos%d ' % loop
|
||||
for i, j in zip(col_list[0:col_rand], func_list): # 决定每个被查询col的函数
|
||||
alias = ' as ' + 'taos%d ' % loop
|
||||
loop += 1
|
||||
pick_func = ''
|
||||
if j == 'leastsquares':
|
||||
pick_func=j+'('+i+',1,1)'
|
||||
pick_func = j+'('+i+',1,1)'
|
||||
elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile':
|
||||
pick_func=j+'('+i+',1)'
|
||||
pick_func = j+'('+i+',1)'
|
||||
else:
|
||||
pick_func=j+'('+i+')'
|
||||
if bool(random.getrandbits(1)) | subquery :
|
||||
pick_func+=alias
|
||||
pick_func = j+'('+i+')'
|
||||
if bool(random.getrandbits(1)) | subquery:
|
||||
pick_func += alias
|
||||
sel_col_list.append(pick_func)
|
||||
if col_rand == 0 & subquery :
|
||||
if col_rand == 0 & subquery:
|
||||
sql = sql + '*'
|
||||
else:
|
||||
sql=sql+','.join(sel_col_list) #select col & func
|
||||
sql = sql+','.join(sel_col_list) # select col & func
|
||||
if self.mix_table == 0:
|
||||
sql = sql + ' from '+random.choice(self.stb_list+self.subtb_list)+' '
|
||||
sql = sql + ' from ' + \
|
||||
random.choice(self.stb_list+self.subtb_list)+' '
|
||||
elif self.mix_table == 1:
|
||||
sql = sql + ' from '+random.choice(self.subtb_list)+' '
|
||||
else:
|
||||
sql = sql + ' from '+random.choice(self.stb_list)+' '
|
||||
con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill,self.con_state_window,self.con_session_window]
|
||||
sel_con=random.sample(con_func,random.randint(0,len(con_func)))
|
||||
sel_con_list=[]
|
||||
con_func = [self.con_where, self.con_interval, self.con_limit, self.con_group,
|
||||
self.con_order, self.con_fill, self.con_state_window, self.con_session_window]
|
||||
sel_con = random.sample(con_func, random.randint(0, len(con_func)))
|
||||
sel_con_list = []
|
||||
for i in sel_con:
|
||||
sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数
|
||||
sql+=' '.join(sel_con_list) # condition
|
||||
#print(sql)
|
||||
return (sql,loop)
|
||||
sel_con_list.append(i(tlist, col_list, tag_list)) # 获取对应的条件函数
|
||||
# condition
|
||||
sql += ' '.join(sel_con_list)
|
||||
# print(sql)
|
||||
return (sql, loop)
|
||||
|
||||
def gen_query_join(self): #生成join查询语句
|
||||
def gen_query_join(self): # 生成join查询语句
|
||||
tbname = []
|
||||
col_list = []
|
||||
tag_list = []
|
||||
|
@ -321,86 +353,103 @@ class ConcurrentInquiry:
|
|||
if self.mix_table == 0:
|
||||
if bool(random.getrandbits(1)):
|
||||
subtable = True
|
||||
tbname = random.sample(self.subtb_list,2)
|
||||
tbname = random.sample(self.subtb_list, 2)
|
||||
for i in tbname:
|
||||
col_list.append(self.subtb_stru_list[self.subtb_list.index(i)])
|
||||
tag_list.append(self.subtb_stru_list[self.subtb_list.index(i)])
|
||||
col_intersection = list(set(col_list[0]).intersection(set(col_list[1])))
|
||||
tag_intersection = list(set(tag_list[0]).intersection(set(tag_list[1])))
|
||||
col_list.append(
|
||||
self.subtb_stru_list[self.subtb_list.index(i)])
|
||||
tag_list.append(
|
||||
self.subtb_stru_list[self.subtb_list.index(i)])
|
||||
col_intersection = list(
|
||||
set(col_list[0]).intersection(set(col_list[1])))
|
||||
tag_intersection = list(
|
||||
set(tag_list[0]).intersection(set(tag_list[1])))
|
||||
else:
|
||||
tbname = random.sample(self.stb_list,2)
|
||||
tbname = random.sample(self.stb_list, 2)
|
||||
for i in tbname:
|
||||
col_list.append(self.stb_stru_list[self.stb_list.index(i)])
|
||||
tag_list.append(self.stb_stru_list[self.stb_list.index(i)])
|
||||
col_intersection = list(set(col_list[0]).intersection(set(col_list[1])))
|
||||
tag_intersection = list(set(tag_list[0]).intersection(set(tag_list[1])))
|
||||
col_intersection = list(
|
||||
set(col_list[0]).intersection(set(col_list[1])))
|
||||
tag_intersection = list(
|
||||
set(tag_list[0]).intersection(set(tag_list[1])))
|
||||
elif self.mix_table == 1:
|
||||
subtable = True
|
||||
tbname = random.sample(self.subtb_list,2)
|
||||
tbname = random.sample(self.subtb_list, 2)
|
||||
for i in tbname:
|
||||
col_list.append(self.subtb_stru_list[self.subtb_list.index(i)])
|
||||
tag_list.append(self.subtb_stru_list[self.subtb_list.index(i)])
|
||||
col_intersection = list(set(col_list[0]).intersection(set(col_list[1])))
|
||||
tag_intersection = list(set(tag_list[0]).intersection(set(tag_list[1])))
|
||||
col_intersection = list(
|
||||
set(col_list[0]).intersection(set(col_list[1])))
|
||||
tag_intersection = list(
|
||||
set(tag_list[0]).intersection(set(tag_list[1])))
|
||||
else:
|
||||
tbname = random.sample(self.stb_list,2)
|
||||
tbname = random.sample(self.stb_list, 2)
|
||||
for i in tbname:
|
||||
col_list.append(self.stb_stru_list[self.stb_list.index(i)])
|
||||
tag_list.append(self.stb_stru_list[self.stb_list.index(i)])
|
||||
col_intersection = list(set(col_list[0]).intersection(set(col_list[1])))
|
||||
tag_intersection = list(set(tag_list[0]).intersection(set(tag_list[1])))
|
||||
con_rand=random.randint(0,len(condition_list))
|
||||
col_rand=random.randint(0,len(col_list))
|
||||
tag_rand=random.randint(0,len(tag_list))
|
||||
sql='select ' #select
|
||||
col_intersection = list(
|
||||
set(col_list[0]).intersection(set(col_list[1])))
|
||||
tag_intersection = list(
|
||||
set(tag_list[0]).intersection(set(tag_list[1])))
|
||||
con_rand = random.randint(0, len(condition_list))
|
||||
col_rand = random.randint(0, len(col_list))
|
||||
tag_rand = random.randint(0, len(tag_list))
|
||||
sql = 'select ' # select
|
||||
|
||||
sel_col_tag=[]
|
||||
col_rand=random.randint(0,len(col_list))
|
||||
sel_col_tag = []
|
||||
col_rand = random.randint(0, len(col_list))
|
||||
if bool(random.getrandbits(1)):
|
||||
sql += '*'
|
||||
else:
|
||||
sel_col_tag.append('t1.' + str(random.choice(col_list[0] + tag_list[0])))
|
||||
sel_col_tag.append('t2.' + str(random.choice(col_list[1] + tag_list[1])))
|
||||
sel_col_tag.append(
|
||||
't1.' + str(random.choice(col_list[0] + tag_list[0])))
|
||||
sel_col_tag.append(
|
||||
't2.' + str(random.choice(col_list[1] + tag_list[1])))
|
||||
sel_col_list = []
|
||||
random.shuffle(func_list)
|
||||
if self.random_pick():
|
||||
loop = 0
|
||||
for i,j in zip(sel_col_tag,func_list): #决定每个被查询col的函数
|
||||
alias = ' as '+ 'taos%d ' % loop
|
||||
for i, j in zip(sel_col_tag, func_list): # 决定每个被查询col的函数
|
||||
alias = ' as ' + 'taos%d ' % loop
|
||||
loop += 1
|
||||
pick_func = ''
|
||||
if j == 'leastsquares':
|
||||
pick_func=j+'('+i+',1,1)'
|
||||
pick_func = j+'('+i+',1,1)'
|
||||
elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile':
|
||||
pick_func=j+'('+i+',1)'
|
||||
pick_func = j+'('+i+',1)'
|
||||
else:
|
||||
pick_func=j+'('+i+')'
|
||||
pick_func = j+'('+i+')'
|
||||
if bool(random.getrandbits(1)):
|
||||
pick_func+=alias
|
||||
pick_func += alias
|
||||
sel_col_list.append(pick_func)
|
||||
sql += ','.join(sel_col_list)
|
||||
else:
|
||||
sql += ','.join(sel_col_tag)
|
||||
|
||||
sql = sql + ' from '+ str(tbname[0]) +' t1,' + str(tbname[1]) + ' t2 ' #select col & func
|
||||
sql = sql + ' from ' + \
|
||||
str(tbname[0]) + ' t1,' + str(tbname[1]) + \
|
||||
' t2 ' # select col & func
|
||||
join_section = None
|
||||
temp = None
|
||||
if subtable:
|
||||
temp = random.choices(col_intersection)
|
||||
join_section = temp.pop()
|
||||
sql += 'where t1._c0 = t2._c0 and ' + 't1.' + str(join_section) + '=t2.' + str(join_section)
|
||||
sql += 'where t1._c0 = t2._c0 and ' + 't1.' + \
|
||||
str(join_section) + '=t2.' + str(join_section)
|
||||
else:
|
||||
temp = random.choices(col_intersection+tag_intersection)
|
||||
join_section = temp.pop()
|
||||
sql += 'where t1._c0 = t2._c0 and ' + 't1.' + str(join_section) + '=t2.' + str(join_section)
|
||||
sql += 'where t1._c0 = t2._c0 and ' + 't1.' + \
|
||||
str(join_section) + '=t2.' + str(join_section)
|
||||
return sql
|
||||
|
||||
def random_pick(self):
|
||||
x = random.uniform(0,1)
|
||||
x = random.uniform(0, 1)
|
||||
cumulative_probability = 0.0
|
||||
for item, item_probability in zip(self.ifjoin, self.probabilities):
|
||||
cumulative_probability += item_probability
|
||||
if x < cumulative_probability:break
|
||||
if x < cumulative_probability:
|
||||
break
|
||||
return item
|
||||
|
||||
def gen_data(self):
|
||||
|
@ -412,52 +461,54 @@ class ConcurrentInquiry:
|
|||
user = self.user
|
||||
password = self.password
|
||||
conn = taos.connect(
|
||||
host,
|
||||
user,
|
||||
password,
|
||||
host='%s' % host,
|
||||
user='%s' % user,
|
||||
password='%s' % password,
|
||||
)
|
||||
cl = conn.cursor()
|
||||
cl.execute("drop database if exists %s;" %self.dbname)
|
||||
cl.execute("create database if not exists %s;" %self.dbname)
|
||||
cl.execute("drop database if exists %s;" % self.dbname)
|
||||
cl.execute("create database if not exists %s;" % self.dbname)
|
||||
cl.execute("use %s" % self.dbname)
|
||||
for k in range(stableNum):
|
||||
sql="create table %s (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool,c8 binary(20),c9 nchar(20),c11 int unsigned,c12 smallint unsigned,c13 tinyint unsigned,c14 bigint unsigned) \
|
||||
sql = "create table %s (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool,c8 binary(20),c9 nchar(20),c11 int unsigned,c12 smallint unsigned,c13 tinyint unsigned,c14 bigint unsigned) \
|
||||
tags(t1 int, t2 float, t3 bigint, t4 smallint, t5 tinyint, t6 double, t7 bool,t8 binary(20),t9 nchar(20), t11 int unsigned , t12 smallint unsigned , t13 tinyint unsigned , t14 bigint unsigned)" % (self.stb_prefix+str(k))
|
||||
cl.execute(sql)
|
||||
for j in range(subtableNum):
|
||||
if j % 100 == 0:
|
||||
sql = "create table %s using %s tags(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)" % \
|
||||
(self.subtb_prefix+str(k)+'_'+str(j),self.stb_prefix+str(k))
|
||||
(self.subtb_prefix+str(k)+'_' +
|
||||
str(j), self.stb_prefix+str(k))
|
||||
else:
|
||||
sql = "create table %s using %s tags(%d,%d,%d,%d,%d,%d,%d,'%s','%s',%d,%d,%d,%d)" % \
|
||||
(self.subtb_prefix+str(k)+'_'+str(j),self.stb_prefix+str(k),j,j/2.0,j%41,j%51,j%53,j*1.0,j%2,'taos'+str(j),'涛思'+str(j), j%43, j%23 , j%17 , j%3167)
|
||||
(self.subtb_prefix+str(k)+'_'+str(j), self.stb_prefix+str(k), j, j/2.0, j % 41, j %
|
||||
51, j % 53, j*1.0, j % 2, 'taos'+str(j), '涛思'+str(j), j % 43, j % 23, j % 17, j % 3167)
|
||||
print(sql)
|
||||
cl.execute(sql)
|
||||
for i in range(insertRows):
|
||||
if i % 100 == 0 :
|
||||
if i % 100 == 0:
|
||||
ret = cl.execute(
|
||||
"insert into %s values (%d , NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)" %
|
||||
(self.subtb_prefix+str(k)+'_'+str(j), t0+i))
|
||||
else:
|
||||
ret = cl.execute(
|
||||
"insert into %s values (%d , %d,%d,%d,%d,%d,%d,%d,'%s','%s',%d,%d,%d,%d)" %
|
||||
(self.subtb_prefix+str(k)+'_'+str(j), t0+i, i%100, i/2.0, i%41, i%51, i%53, i*1.0, i%2,'taos'+str(i),'涛思'+str(i), i%43, i%23 , i%17 , i%3167))
|
||||
(self.subtb_prefix+str(k)+'_'+str(j), t0+i, i % 100, i/2.0, i % 41, i % 51, i % 53, i*1.0, i % 2, 'taos'+str(i), '涛思'+str(i), i % 43, i % 23, i % 17, i % 3167))
|
||||
cl.close()
|
||||
conn.close()
|
||||
|
||||
def rest_query(self,sql): #rest 接口
|
||||
def rest_query(self, sql): # rest 接口
|
||||
host = self.host
|
||||
user = self.user
|
||||
password = self.password
|
||||
port =6041
|
||||
url = "http://{}:{}/rest/sql".format(host, port )
|
||||
port = 6041
|
||||
url = "http://{}:{}/rest/sql".format(host, port)
|
||||
try:
|
||||
r = requests.post(url,
|
||||
data = 'use %s' % self.dbname,
|
||||
auth = HTTPBasicAuth('root', 'taosdata'))
|
||||
data='use %s' % self.dbname,
|
||||
auth=HTTPBasicAuth('root', 'taosdata'))
|
||||
r = requests.post(url,
|
||||
data = sql,
|
||||
auth = HTTPBasicAuth('root', 'taosdata'))
|
||||
data=sql,
|
||||
auth=HTTPBasicAuth('root', 'taosdata'))
|
||||
except:
|
||||
print("REST API Failure (TODO: more info here)")
|
||||
raise
|
||||
|
@ -481,19 +532,18 @@ class ConcurrentInquiry:
|
|||
nRows = rj['rows'] if ('rows' in rj) else 0
|
||||
return nRows
|
||||
|
||||
|
||||
def query_thread_n(self,threadID): #使用原生python接口查询
|
||||
def query_thread_n(self, threadID): # 使用原生python接口查询
|
||||
host = self.host
|
||||
user = self.user
|
||||
password = self.password
|
||||
conn = taos.connect(
|
||||
host,
|
||||
user,
|
||||
password,
|
||||
host='%s' % host,
|
||||
user='%s' % user,
|
||||
password='%s' % password,
|
||||
)
|
||||
cl = conn.cursor()
|
||||
cl.execute("use %s;" % self.dbname)
|
||||
fo = open('bak_sql_n_%d'%threadID,'w+')
|
||||
fo = open('bak_sql_n_%d' % threadID, 'w+')
|
||||
print("Thread %d: starting" % threadID)
|
||||
loop = self.loop
|
||||
while loop:
|
||||
|
@ -501,145 +551,152 @@ class ConcurrentInquiry:
|
|||
try:
|
||||
if self.random_pick():
|
||||
if self.random_pick():
|
||||
sql,temp=self.gen_query_sql()
|
||||
sql, temp = self.gen_query_sql()
|
||||
else:
|
||||
sql = self.gen_subquery_sql()
|
||||
else:
|
||||
sql = self.gen_query_join()
|
||||
print("sql is ",sql)
|
||||
print("sql is ", sql)
|
||||
fo.write(sql+'\n')
|
||||
start = time.time()
|
||||
cl.execute(sql)
|
||||
cl.fetchall()
|
||||
end = time.time()
|
||||
print("time cost :",end-start)
|
||||
print("time cost :", end-start)
|
||||
except Exception as e:
|
||||
print('-'*40)
|
||||
print(
|
||||
"Failure thread%d, sql: %s \nexception: %s" %
|
||||
(threadID, str(sql),str(e)))
|
||||
err_uec='Unable to establish connection'
|
||||
if err_uec in str(e) and loop >0:
|
||||
(threadID, str(sql), str(e)))
|
||||
err_uec = 'Unable to establish connection'
|
||||
if err_uec in str(e) and loop > 0:
|
||||
exit(-1)
|
||||
loop -= 1
|
||||
if loop == 0: break
|
||||
if loop == 0:
|
||||
break
|
||||
fo.close()
|
||||
cl.close()
|
||||
conn.close()
|
||||
print("Thread %d: finishing" % threadID)
|
||||
|
||||
def query_thread_nr(self,threadID): #使用原生python接口进行重放
|
||||
def query_thread_nr(self, threadID): # 使用原生python接口进行重放
|
||||
host = self.host
|
||||
user = self.user
|
||||
password = self.password
|
||||
conn = taos.connect(
|
||||
host,
|
||||
user,
|
||||
password,
|
||||
host='%s' % host,
|
||||
user='%s' % user,
|
||||
password='%s' % password,
|
||||
)
|
||||
cl = conn.cursor()
|
||||
cl.execute("use %s;" % self.dbname)
|
||||
replay_sql = []
|
||||
with open('bak_sql_n_%d'%threadID,'r') as f:
|
||||
with open('bak_sql_n_%d' % threadID, 'r') as f:
|
||||
replay_sql = f.readlines()
|
||||
print("Replay Thread %d: starting" % threadID)
|
||||
for sql in replay_sql:
|
||||
try:
|
||||
print("sql is ",sql)
|
||||
print("sql is ", sql)
|
||||
start = time.time()
|
||||
cl.execute(sql)
|
||||
cl.fetchall()
|
||||
end = time.time()
|
||||
print("time cost :",end-start)
|
||||
print("time cost :", end-start)
|
||||
except Exception as e:
|
||||
print('-'*40)
|
||||
print(
|
||||
"Failure thread%d, sql: %s \nexception: %s" %
|
||||
(threadID, str(sql),str(e)))
|
||||
err_uec='Unable to establish connection'
|
||||
if err_uec in str(e) and loop >0:
|
||||
(threadID, str(sql), str(e)))
|
||||
err_uec = 'Unable to establish connection'
|
||||
if err_uec in str(e) and loop > 0:
|
||||
exit(-1)
|
||||
cl.close()
|
||||
conn.close()
|
||||
print("Replay Thread %d: finishing" % threadID)
|
||||
|
||||
def query_thread_r(self,threadID): #使用rest接口查询
|
||||
def query_thread_r(self, threadID): # 使用rest接口查询
|
||||
print("Thread %d: starting" % threadID)
|
||||
fo = open('bak_sql_r_%d'%threadID,'w+')
|
||||
fo = open('bak_sql_r_%d' % threadID, 'w+')
|
||||
loop = self.loop
|
||||
while loop:
|
||||
try:
|
||||
if self.random_pick():
|
||||
if self.random_pick():
|
||||
sql,temp=self.gen_query_sql()
|
||||
sql, temp = self.gen_query_sql()
|
||||
else:
|
||||
sql = self.gen_subquery_sql()
|
||||
else:
|
||||
sql = self.gen_query_join()
|
||||
print("sql is ",sql)
|
||||
print("sql is ", sql)
|
||||
fo.write(sql+'\n')
|
||||
start = time.time()
|
||||
self.rest_query(sql)
|
||||
end = time.time()
|
||||
print("time cost :",end-start)
|
||||
print("time cost :", end-start)
|
||||
except Exception as e:
|
||||
print('-'*40)
|
||||
print(
|
||||
"Failure thread%d, sql: %s \nexception: %s" %
|
||||
(threadID, str(sql),str(e)))
|
||||
err_uec='Unable to establish connection'
|
||||
if err_uec in str(e) and loop >0:
|
||||
(threadID, str(sql), str(e)))
|
||||
err_uec = 'Unable to establish connection'
|
||||
if err_uec in str(e) and loop > 0:
|
||||
exit(-1)
|
||||
loop -= 1
|
||||
if loop == 0: break
|
||||
if loop == 0:
|
||||
break
|
||||
fo.close()
|
||||
print("Thread %d: finishing" % threadID)
|
||||
|
||||
def query_thread_rr(self,threadID): #使用rest接口重放
|
||||
def query_thread_rr(self, threadID): # 使用rest接口重放
|
||||
print("Replay Thread %d: starting" % threadID)
|
||||
replay_sql = []
|
||||
with open('bak_sql_r_%d'%threadID,'r') as f:
|
||||
with open('bak_sql_r_%d' % threadID, 'r') as f:
|
||||
replay_sql = f.readlines()
|
||||
|
||||
for sql in replay_sql:
|
||||
try:
|
||||
print("sql is ",sql)
|
||||
print("sql is ", sql)
|
||||
start = time.time()
|
||||
self.rest_query(sql)
|
||||
end = time.time()
|
||||
print("time cost :",end-start)
|
||||
print("time cost :", end-start)
|
||||
except Exception as e:
|
||||
print('-'*40)
|
||||
print(
|
||||
"Failure thread%d, sql: %s \nexception: %s" %
|
||||
(threadID, str(sql),str(e)))
|
||||
err_uec='Unable to establish connection'
|
||||
if err_uec in str(e) and loop >0:
|
||||
(threadID, str(sql), str(e)))
|
||||
err_uec = 'Unable to establish connection'
|
||||
if err_uec in str(e) and loop > 0:
|
||||
exit(-1)
|
||||
print("Replay Thread %d: finishing" % threadID)
|
||||
|
||||
def run(self):
|
||||
print(self.n_numOfTherads,self.r_numOfTherads)
|
||||
print(self.n_numOfTherads, self.r_numOfTherads)
|
||||
threads = []
|
||||
if self.replay: #whether replay
|
||||
if self.replay: # whether replay
|
||||
for i in range(self.n_numOfTherads):
|
||||
thread = threading.Thread(target=self.query_thread_nr, args=(i,))
|
||||
thread = threading.Thread(
|
||||
target=self.query_thread_nr, args=(i,))
|
||||
threads.append(thread)
|
||||
thread.start()
|
||||
for i in range(self.r_numOfTherads):
|
||||
thread = threading.Thread(target=self.query_thread_rr, args=(i,))
|
||||
thread = threading.Thread(
|
||||
target=self.query_thread_rr, args=(i,))
|
||||
threads.append(thread)
|
||||
thread.start()
|
||||
else:
|
||||
for i in range(self.n_numOfTherads):
|
||||
thread = threading.Thread(target=self.query_thread_n, args=(i,))
|
||||
thread = threading.Thread(
|
||||
target=self.query_thread_n, args=(i,))
|
||||
threads.append(thread)
|
||||
thread.start()
|
||||
for i in range(self.r_numOfTherads):
|
||||
thread = threading.Thread(target=self.query_thread_r, args=(i,))
|
||||
thread = threading.Thread(
|
||||
target=self.query_thread_r, args=(i,))
|
||||
threads.append(thread)
|
||||
thread.start()
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'-H',
|
||||
|
@ -729,9 +786,9 @@ parser.add_argument(
|
|||
'-w',
|
||||
'--password',
|
||||
action='store',
|
||||
default='root',
|
||||
default='taosdata',
|
||||
type=str,
|
||||
help='user name')
|
||||
help='password')
|
||||
parser.add_argument(
|
||||
'-n',
|
||||
'--number-of-tables',
|
||||
|
@ -763,15 +820,14 @@ parser.add_argument(
|
|||
|
||||
args = parser.parse_args()
|
||||
q = ConcurrentInquiry(
|
||||
args.ts,args.host_name,args.user,args.password,args.db_name,
|
||||
args.stb_name_prefix,args.subtb_name_prefix,args.number_of_native_threads,args.number_of_rest_threads,
|
||||
args.probabilities,args.loop_per_thread,args.number_of_stables,args.number_of_tables ,args.number_of_records,
|
||||
args.mix_stable_subtable, args.replay )
|
||||
args.ts, args.host_name, args.user, args.password, args.db_name,
|
||||
args.stb_name_prefix, args.subtb_name_prefix, args.number_of_native_threads, args.number_of_rest_threads,
|
||||
args.probabilities, args.loop_per_thread, args.number_of_stables, args.number_of_tables, args.number_of_records,
|
||||
args.mix_stable_subtable, args.replay)
|
||||
|
||||
if args.create_table:
|
||||
q.gen_data()
|
||||
q.get_full()
|
||||
|
||||
#q.gen_query_sql()
|
||||
# q.gen_query_sql()
|
||||
q.run()
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ class ConfigureyCluster:
|
|||
self.portStep = 100
|
||||
self.mnodeNums = 0
|
||||
|
||||
def configure_cluster(self ,dnodeNums=5,mnodeNums=0,startPort=6030,portStep=100,hostname="%s"%hostname):
|
||||
def configure_cluster(self ,dnodeNums=5,mnodeNums=0,independentMnode=True,startPort=6030,portStep=100,hostname="%s"%hostname):
|
||||
self.startPort=int(startPort)
|
||||
self.portStep=int(portStep)
|
||||
self.hostname=hostname
|
||||
|
@ -52,7 +52,7 @@ class ConfigureyCluster:
|
|||
dnode.addExtraCfg("secondEp", f"{hostname}:{startPort_sec}")
|
||||
|
||||
# configure dnoe of independent mnodes
|
||||
if num <= self.mnodeNums and self.mnodeNums != 0 :
|
||||
if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == True :
|
||||
dnode.addExtraCfg("supportVnodes", 0)
|
||||
# print(dnode)
|
||||
self.dnodes.append(dnode)
|
||||
|
@ -67,6 +67,13 @@ class ConfigureyCluster:
|
|||
tdSql.execute(" create dnode '%s';"%dnode_id)
|
||||
|
||||
|
||||
def create_mnode(self,conn,mnodeNums):
|
||||
tdSql.init(conn.cursor())
|
||||
mnodeNums=int(mnodeNums)
|
||||
for i in range(2,mnodeNums+1):
|
||||
tdSql.execute(" create mnode on dnode %d;"%i)
|
||||
|
||||
|
||||
|
||||
def check_dnode(self,conn):
|
||||
tdSql.init(conn.cursor())
|
||||
|
|
|
@ -418,7 +418,7 @@ class TDDnode:
|
|||
if i > 50:
|
||||
break
|
||||
with open(logFile) as f:
|
||||
timeout = time.time() + 60 * 2
|
||||
timeout = time.time() + 10 * 2
|
||||
while True:
|
||||
line = f.readline().encode('utf-8')
|
||||
if bkey in line:
|
||||
|
|
|
@ -231,7 +231,7 @@
|
|||
|
||||
# ---- stream
|
||||
./test.sh -f tsim/stream/basic0.sim -g
|
||||
./test.sh -f tsim/stream/basic1.sim
|
||||
# TD-20201 ./test.sh -f tsim/stream/basic1.sim
|
||||
./test.sh -f tsim/stream/basic2.sim
|
||||
./test.sh -f tsim/stream/drop_stream.sim
|
||||
./test.sh -f tsim/stream/fillHistoryBasic1.sim
|
||||
|
|
|
@ -139,7 +139,7 @@ echo "fsDebugFlag 143" >> $TAOS_CFG
|
|||
echo "idxDebugFlag 143" >> $TAOS_CFG
|
||||
echo "udfDebugFlag 143" >> $TAOS_CFG
|
||||
echo "smaDebugFlag 143" >> $TAOS_CFG
|
||||
echo "idxDebugFlag 143" >> $TAOS_CFG
|
||||
echo "metaDebugFlag 143" >> $TAOS_CFG
|
||||
echo "numOfLogLines 20000000" >> $TAOS_CFG
|
||||
echo "asyncLog 0" >> $TAOS_CFG
|
||||
echo "locale en_US.UTF-8" >> $TAOS_CFG
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
{
|
||||
"filetype": "insert",
|
||||
"cfgdir": "/etc/taos",
|
||||
"host": "localhost",
|
||||
"port": 6030,
|
||||
"rest_port": 6041,
|
||||
"user": "root",
|
||||
"password": "taosdata",
|
||||
"thread_count": 100,
|
||||
"create_table_thread_count": 24,
|
||||
"result_file": "taosBenchmark_result.log",
|
||||
"confirm_parameter_prompt": "no",
|
||||
"insert_interval": 0,
|
||||
"num_of_records_per_req": 1000000,
|
||||
"max_sql_len": 1024000,
|
||||
"databases": [
|
||||
{
|
||||
"dbinfo": {
|
||||
"name": "db4096",
|
||||
"drop": "yes",
|
||||
"replica": 1,
|
||||
"duration": 10,
|
||||
"precision": "ms",
|
||||
"keep": 3650,
|
||||
"comp": 2,
|
||||
"vgroups": 2,
|
||||
"buffer": 1000
|
||||
},
|
||||
"super_tables": [
|
||||
{
|
||||
"name": "stb0",
|
||||
"child_table_exists": "no",
|
||||
"childtable_count": 5,
|
||||
"childtable_prefix": "ctb0",
|
||||
"escape_character": "no",
|
||||
"auto_create_table": "no",
|
||||
"batch_create_tbl_num": 500,
|
||||
"data_source": "rand",
|
||||
"insert_mode": "taosc",
|
||||
"rollup": null,
|
||||
"interlace_rows": 0,
|
||||
"line_protocol": null,
|
||||
"tcp_transfer": "no",
|
||||
"insert_rows": 10000,
|
||||
"childtable_limit": 0,
|
||||
"childtable_offset": 0,
|
||||
"rows_per_tbl": 0,
|
||||
"max_sql_len": 1048576,
|
||||
"disorder_ratio": 0,
|
||||
"disorder_range": 1000,
|
||||
"timestamp_step": 1000,
|
||||
"start_timestamp": "2022-10-22 17:20:36",
|
||||
"sample_format": "csv",
|
||||
"sample_file": "./sample.csv",
|
||||
"tags_file": "",
|
||||
"columns": [
|
||||
{
|
||||
"type": "INT",
|
||||
"count": 4094
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"type": "TINYINT",
|
||||
"count": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"prepare_rand": 10000,
|
||||
"chinese": "no",
|
||||
"streams": false,
|
||||
"test_log": "/root/testlog/"
|
||||
}
|
|
@ -93,6 +93,7 @@ class TDTestCase:
|
|||
oldServerVersion=tdsqlF.queryResult[0][0]
|
||||
tdLog.info(f"Base server version is {oldServerVersion}")
|
||||
tdsqlF.query(f"SELECT CLIENT_VERSION();")
|
||||
|
||||
# the oldClientVersion can't be updated in the same python process,so the version is new compiled verison
|
||||
oldClientVersion=tdsqlF.queryResult[0][0]
|
||||
tdLog.info(f"Base client version is {oldClientVersion}")
|
||||
|
@ -105,7 +106,16 @@ class TDTestCase:
|
|||
# tdsqlF.query(f"select count(*) from {stb}")
|
||||
# tdsqlF.checkData(0,0,tableNumbers*recordNumbers1)
|
||||
os.system("pkill taosd")
|
||||
sleep(1)
|
||||
sleep(2)
|
||||
|
||||
print(f"start taosd: nohup taosd -c {cPath} & ")
|
||||
os.system(f" nohup taosd -c {cPath} & " )
|
||||
sleep(10)
|
||||
tdLog.info(" LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/compa4096.json -y ")
|
||||
os.system("LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/compa4096.json -y")
|
||||
os.system("pkill -9 taosd")
|
||||
|
||||
|
||||
|
||||
tdLog.printNoPrefix("==========step2:update new version ")
|
||||
self.buildTaosd(bPath)
|
||||
|
|
|
@ -91,7 +91,7 @@ class TDTestCase:
|
|||
break
|
||||
else:
|
||||
tdLog.info( "wait start taosd ,times: %d "%i)
|
||||
sleep
|
||||
time.sleep(1)
|
||||
i+= 1
|
||||
else :
|
||||
tdLog.exit("taosd %s is not running "%startAction)
|
||||
|
|
|
@ -126,14 +126,12 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -126,14 +126,11 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -126,14 +126,11 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -126,14 +126,11 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -120,14 +120,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
@ -199,7 +195,7 @@ class TDTestCase:
|
|||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=10,stopRole='dnode')
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=4,stopRole='dnode')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -94,14 +94,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -124,14 +124,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -95,16 +95,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdLog.info("create mnode on dnode 2")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdLog.info("create mnode on dnode 3")
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -94,14 +94,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -119,14 +119,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -96,14 +96,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -119,14 +119,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -75,14 +75,17 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
# # fisr add three mnodes;
|
||||
# tdLog.info("check mnode status")
|
||||
# # tdSql.execute("create mnode on dnode 2")
|
||||
# clusterComCheck.checkMnodeStatus(2)
|
||||
# # tdSql.execute("create mnode on dnode 3")
|
||||
# clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -75,14 +75,9 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -75,14 +75,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -75,14 +75,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -75,14 +75,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
#check mnode status
|
||||
tdLog.info("check mnode status")
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
|
|
|
@ -16,8 +16,11 @@ from util.dnodes import TDDnodes
|
|||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
from util.common import *
|
||||
sys.path.append("./6-cluster")
|
||||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
class TDTestCase:
|
||||
def __init__(self):
|
||||
|
@ -26,6 +29,7 @@ class TDTestCase:
|
|||
self.vgroups = 4
|
||||
self.ctbNum = 1000
|
||||
self.rowsPerTbl = 100
|
||||
self.dnodeNumbers = 5
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
|
@ -119,15 +123,19 @@ class TDTestCase:
|
|||
tdLog.info("================= restart dnode 2===========================")
|
||||
cluster.dnodes[1].stoptaosd()
|
||||
cluster.dnodes[1].starttaosd()
|
||||
clusterComCheck.checkDnodes(self.dnodeNumbers)
|
||||
tdLog.info("================= restart dnode 3===========================")
|
||||
cluster.dnodes[2].stoptaosd()
|
||||
cluster.dnodes[2].starttaosd()
|
||||
clusterComCheck.checkDnodes(self.dnodeNumbers)
|
||||
tdLog.info("================= restart dnode 4===========================")
|
||||
cluster.dnodes[3].stoptaosd()
|
||||
cluster.dnodes[3].starttaosd()
|
||||
clusterComCheck.checkDnodes(self.dnodeNumbers)
|
||||
tdLog.info("================= restart dnode 5===========================")
|
||||
cluster.dnodes[4].stoptaosd()
|
||||
cluster.dnodes[4].starttaosd()
|
||||
clusterComCheck.checkDnodes(self.dnodeNumbers)
|
||||
|
||||
pThread.join()
|
||||
# tdLog.info("restart taosd to ensure that the data falls into the disk")
|
||||
|
@ -193,9 +201,9 @@ class TDTestCase:
|
|||
tdSql.query(queryString)
|
||||
totalRowsFromQuery = tdSql.getRows()
|
||||
|
||||
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
|
||||
tdLog.info("act consume rows: %d, act query rows: %d "%(totalConsumeRows, totalRowsFromQuery))
|
||||
|
||||
if totalConsumeRows != totalRowsFromQuery:
|
||||
if totalConsumeRows < totalRowsFromQuery:
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
# tmqCom.checkFileContent(consumerId, queryString)
|
||||
|
@ -224,7 +232,7 @@ class TDTestCase:
|
|||
'rowsPerTbl': 1000,
|
||||
'batchNum': 100,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 15,
|
||||
'pollDelay': 30,
|
||||
'showMsg': 1,
|
||||
'showRow': 1,
|
||||
'snapshot': 1}
|
||||
|
@ -240,6 +248,9 @@ class TDTestCase:
|
|||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
|
||||
tdSql.query(queryString)
|
||||
totalRowsFromQuery = tdSql.getRows()
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
|
||||
topicList = topicFromStb1
|
||||
|
@ -257,15 +268,19 @@ class TDTestCase:
|
|||
tdLog.info("================= restart dnode 2===========================")
|
||||
cluster.dnodes[1].stoptaosd()
|
||||
cluster.dnodes[1].starttaosd()
|
||||
clusterComCheck.checkDnodes(self.dnodeNumbers)
|
||||
tdLog.info("================= restart dnode 3===========================")
|
||||
cluster.dnodes[2].stoptaosd()
|
||||
cluster.dnodes[2].starttaosd()
|
||||
clusterComCheck.checkDnodes(self.dnodeNumbers)
|
||||
tdLog.info("================= restart dnode 4===========================")
|
||||
cluster.dnodes[3].stoptaosd()
|
||||
cluster.dnodes[3].starttaosd()
|
||||
clusterComCheck.checkDnodes(self.dnodeNumbers)
|
||||
tdLog.info("================= restart dnode 5===========================")
|
||||
cluster.dnodes[4].stoptaosd()
|
||||
cluster.dnodes[4].starttaosd()
|
||||
clusterComCheck.checkDnodes(self.dnodeNumbers)
|
||||
|
||||
tdLog.info("start to check consume result")
|
||||
expectRows = 1
|
||||
|
@ -274,12 +289,9 @@ class TDTestCase:
|
|||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
tdSql.query(queryString)
|
||||
totalRowsFromQuery = tdSql.getRows()
|
||||
tdLog.info("act consume rows: %d, act query rows: %d "%(totalConsumeRows, totalRowsFromQuery))
|
||||
|
||||
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
|
||||
|
||||
if totalConsumeRows != totalRowsFromQuery:
|
||||
if totalConsumeRows < totalRowsFromQuery:
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
# tmqCom.checkFileContent(consumerId, queryString)
|
||||
|
@ -290,8 +302,8 @@ class TDTestCase:
|
|||
tdLog.printNoPrefix("======== test case 2 end ...... ")
|
||||
|
||||
def run(self):
|
||||
self.prepareTestEnv()
|
||||
self.tmqCase1()
|
||||
#self.prepareTestEnv()
|
||||
#self.tmqCase1()
|
||||
self.prepareTestEnv()
|
||||
self.tmqCase2()
|
||||
|
||||
|
|
|
@ -209,7 +209,7 @@ python3 ./test.py -f 2-query/varchar.py -R
|
|||
python3 ./test.py -f 1-insert/update_data.py
|
||||
python3 ./test.py -f 1-insert/tb_100w_data_order.py
|
||||
|
||||
python3 ./test.py -f 1-insert/delete_data.py
|
||||
# TD-20200 python3 ./test.py -f 1-insert/delete_data.py
|
||||
python3 ./test.py -f 1-insert/keep_expired.py
|
||||
|
||||
python3 ./test.py -f 2-query/join2.py
|
||||
|
@ -232,12 +232,14 @@ python3 ./test.py -f 2-query/function_diff.py
|
|||
python3 ./test.py -f 2-query/queryQnode.py
|
||||
|
||||
python3 ./test.py -f 6-cluster/5dnode1mnode.py
|
||||
python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3 -i False
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 -i False
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 6 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 6 -M 3 -n 3
|
||||
# TD-20198 python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 6 -M 3 -n 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 6 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 6 -M 3 -n 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 6 -M 3
|
||||
|
|
|
@ -73,8 +73,9 @@ if __name__ == "__main__":
|
|||
createDnodeNums = 1
|
||||
restful = False
|
||||
replicaVar = 1
|
||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RD:n:', [
|
||||
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','adaptercfgupdate','replicaVar'])
|
||||
independentMnode = True
|
||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RD:n:i:', [
|
||||
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','adaptercfgupdate','replicaVar','independentMnode'])
|
||||
for key, value in opts:
|
||||
if key in ['-h', '--help']:
|
||||
tdLog.printNoPrefix(
|
||||
|
@ -97,6 +98,8 @@ if __name__ == "__main__":
|
|||
tdLog.printNoPrefix('-R restful realization form')
|
||||
tdLog.printNoPrefix('-D taosadapter update cfg dict ')
|
||||
tdLog.printNoPrefix('-n the number of replicas')
|
||||
tdLog.printNoPrefix('-i independentMnode Mnode')
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-r', '--restart']:
|
||||
|
@ -158,6 +161,9 @@ if __name__ == "__main__":
|
|||
if key in ['-C', '--createDnodeNums']:
|
||||
createDnodeNums = value
|
||||
|
||||
if key in ['-i', '--independentMnode']:
|
||||
independentMnode = value
|
||||
|
||||
if key in ['-R', '--restful']:
|
||||
restful = True
|
||||
|
||||
|
@ -313,7 +319,7 @@ if __name__ == "__main__":
|
|||
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
|
||||
else :
|
||||
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
|
||||
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums,mnodeNums=mnodeNums)
|
||||
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode)
|
||||
tdDnodes = ClusterDnodes(dnodeslist)
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
|
@ -339,6 +345,7 @@ if __name__ == "__main__":
|
|||
else:
|
||||
createDnodeNums=createDnodeNums
|
||||
cluster.create_dnode(conn,createDnodeNums)
|
||||
cluster.create_mnode(conn,mnodeNums)
|
||||
try:
|
||||
if cluster.check_dnode(conn) :
|
||||
print("check dnode ready")
|
||||
|
@ -446,7 +453,7 @@ if __name__ == "__main__":
|
|||
|
||||
else :
|
||||
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
|
||||
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums,mnodeNums=mnodeNums)
|
||||
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode)
|
||||
tdDnodes = ClusterDnodes(dnodeslist)
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
|
@ -472,6 +479,8 @@ if __name__ == "__main__":
|
|||
else:
|
||||
createDnodeNums=createDnodeNums
|
||||
cluster.create_dnode(conn,createDnodeNums)
|
||||
cluster.create_mnode(conn,mnodeNums)
|
||||
|
||||
try:
|
||||
if cluster.check_dnode(conn) :
|
||||
print("check dnode ready")
|
||||
|
|
Loading…
Reference in New Issue