diff --git a/.drone.yml b/.drone.yml
index 4b14bce0b8..085a07acf9 100644
--- a/.drone.yml
+++ b/.drone.yml
@@ -23,6 +23,7 @@ steps:
branch:
- develop
- master
+ - 2.0
---
kind: pipeline
name: test_arm64_bionic
@@ -150,6 +151,7 @@ steps:
branch:
- develop
- master
+ - 2.0
---
kind: pipeline
name: build_trusty
@@ -176,6 +178,7 @@ steps:
branch:
- develop
- master
+ - 2.0
---
kind: pipeline
name: build_xenial
@@ -201,7 +204,7 @@ steps:
branch:
- develop
- master
-
+ - 2.0
---
kind: pipeline
name: build_bionic
@@ -226,6 +229,7 @@ steps:
branch:
- develop
- master
+ - 2.0
---
kind: pipeline
name: build_centos7
@@ -249,4 +253,4 @@ steps:
branch:
- develop
- master
-
+ - 2.0
\ No newline at end of file
diff --git a/cmake/version.inc b/cmake/version.inc
index ffceecf492..148c33106a 100755
--- a/cmake/version.inc
+++ b/cmake/version.inc
@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "2.1.6.0")
+ SET(TD_VER_NUMBER "2.1.7.1")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/deps/MsvcLibX/src/iconv.c b/deps/MsvcLibX/src/iconv.c
index 40b6e6462d..1ec0dc7354 100644
--- a/deps/MsvcLibX/src/iconv.c
+++ b/deps/MsvcLibX/src/iconv.c
@@ -98,6 +98,7 @@ int ConvertString(char *buf, size_t nBytes, UINT cpFrom, UINT cpTo, LPCSTR lpDef
char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefaultChar) {
int nBytes;
char *pBuf;
+ char *pBuf1;
nBytes = 4 * ((int)lstrlen(string) + 1); /* Worst case for the size needed */
pBuf = (char *)malloc(nBytes);
if (!pBuf) {
@@ -110,8 +111,9 @@ char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefault
free(pBuf);
return NULL;
}
- pBuf = realloc(pBuf, nBytes+1);
- return pBuf;
+ pBuf1 = realloc(pBuf, nBytes+1);
+ if(pBuf1 == NULL && pBuf != NULL) free(pBuf);
+ return pBuf1;
}
int CountCharacters(const char *string, UINT cp) {
diff --git a/deps/MsvcLibX/src/main.c b/deps/MsvcLibX/src/main.c
index f366b081ad..85f4c83f24 100644
--- a/deps/MsvcLibX/src/main.c
+++ b/deps/MsvcLibX/src/main.c
@@ -68,6 +68,7 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
int iString = FALSE; /* TRUE = string mode; FALSE = non-string mode */
int nBackslash = 0;
char **ppszArg;
+ char **ppszArg1;
int iArg = FALSE; /* TRUE = inside an argument; FALSE = between arguments */
ppszArg = (char **)malloc((argc+1)*sizeof(char *));
@@ -89,7 +90,10 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
if ((!iArg) && (c != ' ') && (c != '\t')) { /* Beginning of a new argument */
iArg = TRUE;
ppszArg[argc++] = pszCopy+j;
- ppszArg = (char **)realloc(ppszArg, (argc+1)*sizeof(char *));
+ ppszArg1 = (char **)realloc(ppszArg, (argc+1)*sizeof(char *));
+ if(ppszArg1 == NULL && ppszArg != NULL)
+ free(ppszArg);
+ ppszArg = ppszArg1;
if (!ppszArg) return -1;
pszCopy[j] = c0 = '\0';
}
@@ -212,7 +216,7 @@ int _initU(void) {
fprintf(stderr, "Warning: Can't convert the argument line to UTF-8\n");
_acmdln[0] = '\0';
}
- realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */
+ //realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */
/* Should not fail since we make it smaller */
/* Record the console code page, to allow converting the output accordingly */
diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c
index 5fbcf773a2..e2ba755f2d 100644
--- a/deps/MsvcLibX/src/realpath.c
+++ b/deps/MsvcLibX/src/realpath.c
@@ -196,6 +196,7 @@ not_compact_enough:
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
char *realpath(const char *path, char *outbuf) {
char *pOutbuf = outbuf;
+ char *pOutbuf1 = NULL;
int iErr;
const char *pc;
@@ -242,8 +243,11 @@ realpath_failed:
return NULL;
}
- if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1);
- return pOutbuf;
+ if (!outbuf) {
+ pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
+ if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
+ }
+ return pOutbuf1;
}
#endif
@@ -517,6 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) {
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
char *realpathU(const char *path, char *outbuf) {
char *pOutbuf = outbuf;
+ char *pOutbuf1 = NULL;
char *pPath1 = NULL;
char *pPath2 = NULL;
int iErr;
@@ -590,10 +595,13 @@ realpathU_failed:
}
DEBUG_LEAVE(("return 0x%p; // \"%s\"\n", pOutbuf, pOutbuf));
- if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1);
+ if (!outbuf) {
+ pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
+ if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
+ }
free(pPath1);
free(pPath2);
- return pOutbuf;
+ return pOutbuf1;
}
#endif /* defined(_WIN32) */
diff --git a/deps/TSZ b/deps/TSZ
index 0ca5b15a8e..ceda5bf9fc 160000
--- a/deps/TSZ
+++ b/deps/TSZ
@@ -1 +1 @@
-Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c
+Subproject commit ceda5bf9fcd7836509ac97dcc0056b3f1dd48cc5
diff --git a/documentation20/cn/01.evaluation/docs.md b/documentation20/cn/01.evaluation/docs.md
index 2cc6033ccc..f5af3a4b8d 100644
--- a/documentation20/cn/01.evaluation/docs.md
+++ b/documentation20/cn/01.evaluation/docs.md
@@ -21,7 +21,7 @@ TDengine 的模块之一是时序数据库。但除此之外,为减少研发
## TDengine 总体适用场景
-作为一个 IOT 大数据平台,TDengine 的典型适用场景是在 IOT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRM,ERP 等,不在本文讨论范围内。
+作为一个 IoT 大数据平台,TDengine 的典型适用场景是在 IoT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRM,ERP 等,不在本文讨论范围内。
### 数据源特点和需求
@@ -54,7 +54,7 @@ TDengine 的模块之一是时序数据库。但除此之外,为减少研发
|系统性能需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---|
|要求较大的总体处理能力| | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。|
-|要求高速处理数据 | | | √ | TDengine 的专门为 IOT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。|
+|要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。|
|要求快速处理小粒度数据| | | √ |这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。|
### 系统维护需求
diff --git a/documentation20/cn/02.getting-started/01.docker/docs.md b/documentation20/cn/02.getting-started/01.docker/docs.md
index 32ac8fe7a3..d262589a6f 100644
--- a/documentation20/cn/02.getting-started/01.docker/docs.md
+++ b/documentation20/cn/02.getting-started/01.docker/docs.md
@@ -1,6 +1,6 @@
# 通过 Docker 快速体验 TDengine
-虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。
+虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。另外,从2.0.14.0版本开始,TDengine提供的镜像已经可以同时支持X86-64、X86、arm64、arm32平台,像NAS、树莓派、嵌入式开发板之类可以运行docker的非主流计算机也可以基于本文档轻松体验TDengine。
下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。
@@ -12,7 +12,7 @@ Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.c
```bash
$ docker -v
-Docker version 20.10.5, build 55c4c88
+Docker version 20.10.3, build 48d30b5
```
## 在 Docker 容器中运行 TDengine
@@ -20,21 +20,22 @@ Docker version 20.10.5, build 55c4c88
1,使用命令拉取 TDengine 镜像,并使它在后台运行。
```bash
-$ docker run -d tdengine/tdengine
-cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316
+$ docker run -d --name tdengine tdengine/tdengine
+7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292
```
-- **docker run**:通过 Docker 运行一个容器。
-- **-d**:让容器在后台运行。
-- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像。
-- **cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316**:这个返回的长字符是容器 ID,我们可以通过容器 ID 来查看对应的容器。
+- **docker run**:通过 Docker 运行一个容器
+- **--name tdengine**:设置容器名称,我们可以通过容器名称来查看对应的容器
+- **-d**:让容器在后台运行
+- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像
+- **7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292**:这个返回的长字符是容器 ID,我们也可以通过容器 ID 来查看对应的容器
2,确认容器是否已经正确运行。
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS ···
-cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
+c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
```
- **docker ps**:列出所有正在运行状态的容器信息。
@@ -47,25 +48,25 @@ cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ·
3,进入 Docker 容器内,使用 TDengine。
```bash
-$ docker exec -it cdf548465318 /bin/bash
-root@cdf548465318:~/TDengine-server-2.0.13.0#
+$ docker exec -it tdengine /bin/bash
+root@c452519b0f9b:~/TDengine-server-2.0.20.13#
```
- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。
- **-i**:进入交互模式。
- **-t**:指定一个终端。
-- **cdf548465318**:容器 ID,需要根据 docker ps 指令返回的值进行修改。
+- **c452519b0f9b**:容器 ID,需要根据 docker ps 指令返回的值进行修改。
- **/bin/bash**:载入容器后运行 bash 来进行交互。
4,进入容器后,执行 taos shell 客户端程序。
```bash
-$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
+$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
-Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
+Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
-taos>
+taos>
```
TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。
@@ -78,45 +79,74 @@ TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息
```bash
$ taos> q
-root@cdf548465318:~/TDengine-server-2.0.13.0#
+root@c452519b0f9b:~/TDengine-server-2.0.20.13#
```
2,在命令行界面执行 taosdemo。
```bash
-$ root@cdf548465318:~/TDengine-server-2.0.13.0# taosdemo
-###################################################################
-# Server IP: localhost:0
-# User: root
-# Password: taosdata
-# Use metric: true
-# Datatype of Columns: int int int int int int int float
-# Binary Length(If applicable): -1
-# Number of Columns per record: 3
-# Number of Threads: 10
-# Number of Tables: 10000
-# Number of Data per Table: 100000
-# Records/Request: 1000
-# Database name: test
-# Table prefix: t
-# Delete method: 0
-# Test time: 2021-04-13 02:05:20
-###################################################################
+root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo
+
+taosdemo is simulating data generated by power equipments monitoring...
+
+host: 127.0.0.1:6030
+user: root
+password: taosdata
+configDir:
+resultFile: ./output.txt
+thread num of insert data: 10
+thread num of create table: 10
+top insert interval: 0
+number of records per req: 30000
+max sql length: 1048576
+database count: 1
+database[0]:
+ database[0] name: test
+ drop: yes
+ replica: 1
+ precision: ms
+ super table count: 1
+ super table[0]:
+ stbName: meters
+ autoCreateTable: no
+ childTblExists: no
+ childTblCount: 10000
+ childTblPrefix: d
+ dataSource: rand
+ iface: taosc
+ insertRows: 10000
+ interlaceRows: 0
+ disorderRange: 1000
+ disorderRatio: 0
+ maxSqlLen: 1048576
+ timeStampStep: 1
+ startTimestamp: 2017-07-14 10:40:00.000
+ sampleFormat:
+ sampleFile:
+ tagsFile:
+ columnCount: 3
+column[0]:FLOAT column[1]:INT column[2]:FLOAT
+ tagCount: 2
+ tag[0]:INT tag[1]:BINARY(16)
+
+ Press enter key to continue or Ctrl-C to stop
```
-回车后,该命令将新建一个数据库 test,并且自动创建一张超级表 meters,并以超级表 meters 为模版创建了 1 万张表,表名从 "t0" 到 "t9999"。每张表有 10 万条记录,每条记录有 f1,f2,f3 三个字段,时间戳 ts 字段从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:41:39 999"。每张表带有 areaid 和 loc 两个标签 TAG,areaid 被设置为 1 到 10,loc 被设置为 "beijing" 或 "shanghai"。
+回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
+
+执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
3,进入 TDengine 终端,查看 taosdemo 生成的数据。
- **进入命令行。**
```bash
-$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
+$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
-Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
+Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
-taos>
+taos>
```
- **查看数据库。**
@@ -124,8 +154,8 @@ taos>
```bash
$ taos> show databases;
name | created_time | ntables | vgroups | ···
- test | 2021-04-13 02:14:15.950 | 10000 | 6 | ···
- log | 2021-04-12 09:36:37.549 | 4 | 1 | ···
+ test | 2021-08-18 06:01:11.021 | 10000 | 6 | ···
+ log | 2021-08-18 05:51:51.065 | 4 | 1 | ···
```
@@ -136,10 +166,10 @@ $ taos> use test;
Database changed.
$ taos> show stables;
- name | created_time | columns | tags | tables |
-=====================================================================================
- meters | 2021-04-13 02:14:15.955 | 4 | 2 | 10000 |
-Query OK, 1 row(s) in set (0.001737s)
+ name | created_time | columns | tags | tables |
+============================================================================================
+ meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 |
+Query OK, 1 row(s) in set (0.003259s)
```
@@ -147,42 +177,45 @@ Query OK, 1 row(s) in set (0.001737s)
```bash
$ taos> select * from test.t0 limit 10;
- ts | f1 | f2 | f3 |
-====================================================================
- 2017-07-14 02:40:01.000 | 3 | 9 | 0 |
- 2017-07-14 02:40:02.000 | 0 | 1 | 2 |
- 2017-07-14 02:40:03.000 | 7 | 2 | 3 |
- 2017-07-14 02:40:04.000 | 9 | 4 | 5 |
- 2017-07-14 02:40:05.000 | 1 | 2 | 5 |
- 2017-07-14 02:40:06.000 | 6 | 3 | 2 |
- 2017-07-14 02:40:07.000 | 4 | 7 | 8 |
- 2017-07-14 02:40:08.000 | 4 | 6 | 6 |
- 2017-07-14 02:40:09.000 | 5 | 7 | 7 |
- 2017-07-14 02:40:10.000 | 1 | 5 | 0 |
-Query OK, 10 row(s) in set (0.003638s)
+
+DB error: Table does not exist (0.002857s)
+taos> select * from test.d0 limit 10;
+ ts | current | voltage | phase |
+======================================================================================
+ 2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 |
+ 2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 |
+ 2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 |
+ 2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 |
+ 2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 |
+ 2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 |
+ 2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 |
+ 2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 |
+ 2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 |
+ 2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 |
+Query OK, 10 row(s) in set (0.016791s)
```
-- **查看 t0 表的标签值。**
+- **查看 d0 表的标签值。**
```bash
-$ taos> select areaid, loc from test.t0;
- areaid | loc |
-===========================
- 10 | shanghai |
-Query OK, 1 row(s) in set (0.002904s)
+$ taos> select groupid, location from test.d0;
+ groupid | location |
+=================================
+ 0 | shanghai |
+Query OK, 1 row(s) in set (0.003490s)
```
## 停止正在 Docker 中运行的 TDengine 服务
```bash
-$ docker stop cdf548465318
-cdf548465318
+$ docker stop tdengine
+tdengine
```
- **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。
-- **cdf548465318**:容器 ID,根据 docker ps 指令返回的结果进行修改。
+- **tdengine**:容器名称。
## 编程开发时连接在 Docker 中的 TDengine
@@ -195,7 +228,7 @@ $ docker run -d -v /etc/taos:/etc/taos -P 6041:6041 tdengine/tdengine
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
-{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}
+{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
```
- 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。
@@ -206,6 +239,5 @@ $ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
2,直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。
```bash
-$ docker exec -it 526aa188da /bin/bash
+$ docker exec -it tdengine /bin/bash
```
-
diff --git a/documentation20/cn/04.model/docs.md b/documentation20/cn/04.model/docs.md
index ed1d2f7168..45a4537d9b 100644
--- a/documentation20/cn/04.model/docs.md
+++ b/documentation20/cn/04.model/docs.md
@@ -2,7 +2,7 @@
# TDengine数据建模
-TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库的设计,超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。
+TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库、超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。
关于数据建模请参考[视频教程](https://www.taosdata.com/blog/2020/11/11/1945.html)。
diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md
index 641ef05a2e..fdb07ae179 100644
--- a/documentation20/cn/08.connector/01.java/docs.md
+++ b/documentation20/cn/08.connector/01.java/docs.md
@@ -2,8 +2,6 @@
## 总体介绍
-TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索下载。
-
`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。

@@ -14,12 +12,10 @@ TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实
* RESTful:应用将 SQL 发送给位于物理节点2(pnode2)上的 RESTful 连接器,再调用客户端 API(libtaos.so)。
* JDBC-RESTful:Java 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求,发送给物理节点2的 RESTful 连接器。
-TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
+TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但TDengine与关系对象型数据库的使用场景和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
-* 目前不支持嵌套查询(nested query)。
-* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。
### JDBC-JNI和JDBC-RESTful的对比
@@ -50,9 +46,12 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
-注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。
+注意:与 JNI 方式不同,RESTful 接口是无状态的。在使用JDBC-RESTful时,需要在sql中指定表、超级表的数据库名称。例如:
+```sql
+INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6);
+```
-### TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
+## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
@@ -65,7 +64,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
-### TDengine DataType 和 Java DataType
+## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
@@ -82,36 +81,27 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
| BINARY | byte array |
| NCHAR | java.lang.String |
-## 安装
+## 安装Java Connector
-Java连接器支持的系统有: Linux 64/Windows x64/Windows x86。
-
-**安装前准备:**
-
-- 已安装TDengine服务器端
-- 已安装好TDengine应用驱动,具体请参照 [安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver) 章节
-
-TDengine 为了方便 Java 应用使用,遵循 JDBC 标准(3.0)API 规范提供了 `taos-jdbcdriver` 实现。可以通过 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索并下载。
-
-由于 TDengine 的应用驱动是使用C语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
+### 安装前准备
+使用Java Connector连接数据库前,需要具备以下条件:
+1. Linux或Windows操作系统
+2. Java 1.8以上运行时环境
+3. TDengine-client(使用JDBC-JNI时必须,使用JDBC-RESTful时非必须)
+**注意**:由于 TDengine 的应用驱动是使用C语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
- libtaos.so 在 Linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
-
- taos.dll 在 Windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
-注意:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client),Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。
-
-### 如何获取 TAOS-JDBCDriver
-
-**maven仓库**
+**注意**:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client),Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。
+### 通过maven获取JDBC driver
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。
-
- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [maven.aliyun](https://maven.aliyun.com/mvn/search)
-maven 项目中使用如下 pom.xml 配置即可:
+maven 项目中,在pom.xml 中添加以下依赖:
```xml-dtd
com.taosdata.jdbc
@@ -119,39 +109,22 @@ maven 项目中使用如下 pom.xml 配置即可:
2.0.18
```
-**源码编译打包**
-下载 TDengine 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
+### 通过源码编译获取JDBC driver
-### 示例程序
-
-示例程序源码位于install_directory/examples/JDBC,有如下目录:
-
-JDBCDemo JDBC示例源程序
-
-JDBCConnectorChecker JDBC安装校验源程序及jar包
-
-Springbootdemo springboot示例源程序
-
-SpringJdbcTemplate SpringJDBC模板
-
-### 安装验证
-
-运行如下指令:
-
-```Bash
-cd {install_directory}/examples/JDBC/JDBCConnectorChecker
-java -jar JDBCConnectorChecker.jar -host
+可以通过下载TDengine的源码,自己编译最新版本的java connector
+```shell
+git clone https://github.com/taosdata/TDengine.git
+cd TDengine/src/connector/jdbc
+mvn clean package -Dmaven.test.skip=true
```
-
-验证通过将打印出成功信息。
+编译后,在target目录下会产生taos-jdbcdriver-2.0.XX-dist.jar的jar包。
## Java连接器的使用
### 获取连接
#### 指定URL获取连接
-
通过指定URL获取连接,如下所示:
```java
@@ -159,23 +132,19 @@ Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
-
以上示例,使用 **JDBC-RESTful** 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要:
-
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”;
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
3. 使用 6041 作为连接端口。
如果希望获得更好的写入和查询性能,Java 应用可以使用 **JDBC-JNI** 的driver,如下所示:
-
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
-
以上示例,使用了 JDBC-JNI 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
**注意**:使用 JDBC-JNI 的 driver,taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库(Linux 下是 libtaos.so;Windows 下是 taos.dll)。
@@ -194,6 +163,9 @@ url中的配置参数如下:
* charset:客户端使用的字符集,默认值为系统字符集。
* locale:客户端语言环境,默认值系统当前 locale。
* timezone:客户端使用的时区,默认值为系统当前时区。
+* batchfetch: 仅在使用JDBC-JNI时生效。true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。
+* timestampFormat: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP':结果集中timestamp类型的字段为一个long值; 'UTC':结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING':结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。
+* batchErrorIgnore:true:在执行Statement的executeBatch时,如果中间有一条sql执行失败,继续执行下面的sq了。false:不再执行失败sql后的任何语句。默认值为:false。
#### 指定URL和Properties获取连接
@@ -222,11 +194,13 @@ properties 中的配置参数如下:
* TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。
* TSDBDriver.PROPERTY_KEY_LOCALE:客户端语言环境,默认值系统当前 locale。
* TSDBDriver.PROPERTY_KEY_TIME_ZONE:客户端使用的时区,默认值为系统当前时区。
+* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: 仅在使用JDBC-JNI时生效。true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。
+* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP':结果集中timestamp类型的字段为一个long值; 'UTC':结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING':结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。
+* TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE:true:在执行Statement的executeBatch时,如果中间有一条sql执行失败,继续执行下面的sq了。false:不再执行失败sql后的任何语句。默认值为:false。
#### 使用客户端配置文件建立连接
当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。如下所示:
-
1. 在 Java 应用中不指定 hostname 和 port
```java
@@ -243,7 +217,6 @@ public Connection getConn() throws Exception{
```
2. 在配置文件中指定 firstEp 和 secondEp
-
```
# first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030
@@ -424,9 +397,9 @@ public void setNString(int columnIndex, ArrayList list, int size) throws
```
其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。
-### 订阅
+## 订阅
-#### 创建
+### 创建
```java
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
@@ -440,7 +413,7 @@ TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from met
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
-#### 消费数据
+### 消费数据
```java
int total = 0;
@@ -458,7 +431,7 @@ while(true) {
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
-#### 关闭订阅
+### 关闭订阅
```java
sub.close(true);
@@ -466,7 +439,7 @@ sub.close(true);
`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
-### 关闭资源
+## 关闭资源
```java
resultSet.close();
@@ -478,19 +451,8 @@ conn.close();
## 与连接池使用
-**HikariCP**
-
-* 引入相应 HikariCP maven 依赖:
-
-```xml
-
- com.zaxxer
- HikariCP
- 3.4.1
-
-```
-
-* 使用示例如下:
+### HikariCP
+使用示例如下:
```java
public static void main(String[] args) throws SQLException {
@@ -522,19 +484,8 @@ conn.close();
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP)。
-**Druid**
-
-* 引入相应 Druid maven 依赖:
-
-```xml
-
- com.alibaba
- druid
- 1.1.20
-
-```
-
-* 使用示例如下:
+### Druid
+使用示例如下:
```java
public static void main(String[] args) throws Exception {
@@ -580,6 +531,16 @@ Query OK, 1 row(s) in set (0.000141s)
* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate)
* Springboot + Mybatis 中使用,可参考 [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo)
+## 示例程序
+
+示例程序源码位于TDengine/test/examples/JDBC下:
+* JDBCDemo:JDBC示例源程序
+* JDBCConnectorChecker:JDBC安装校验源程序及jar包
+* Springbootdemo:springboot示例源程序
+* SpringJdbcTemplate:SpringJDBC模板
+
+请参考:
+
## 常见问题
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md
index 5b695b845a..0ac5a91b50 100644
--- a/documentation20/cn/08.connector/docs.md
+++ b/documentation20/cn/08.connector/docs.md
@@ -315,10 +315,6 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
1. 调用 `taos_stmt_init` 创建参数绑定对象;
2. 调用 `taos_stmt_prepare` 解析 INSERT 语句;
3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname` 来设置表名;
- * 从 2.1.6.0 版本开始,对于向一个超级表下的多个子表同时写入数据(每个子表写入的数据较少,可能只有一行)的情形,提供了一个专用的优化接口 `taos_stmt_set_sub_tbname`,可以通过提前载入 meta 数据以及避免对 SQL 语法的重复解析来节省总体的处理时间(但这个优化方法并不支持自动建表语法)。具体使用方法如下:
- 1. 必须先提前调用 `taos_load_table_info` 来加载所有需要的超级表和子表的 table meta;
- 2. 然后对一个超级表的第一个子表调用 `taos_stmt_set_tbname` 来设置表名;
- 3. 后续子表用 `taos_stmt_set_sub_tbname` 来设置表名。
4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags` 来设置表名和 TAGS 的值;
5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param` 以单行的方式设置 VALUES 的值;
6. 调用 `taos_stmt_add_batch` 把当前绑定的参数加入批处理;
@@ -362,12 +358,6 @@ typedef struct TAOS_BIND {
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。
-- `int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name)`
-
- (2.1.6.0 版本新增,仅支持用于替换 INSERT 语句中、属于同一个超级表下的多个子表中、作为写入目标的第 2 个到第 n 个子表的表名)
- 当 SQL 语句中的表名使用了 `?` 占位时,如果想要一批写入的表是多个属于同一个超级表的子表,那么可以使用此函数绑定除第一个子表之外的其他子表的表名。
- *注意:*在使用时,客户端必须先调用 `taos_load_table_info` 来加载所有需要的超级表和子表的 table meta,然后对一个超级表的第一个子表调用 `taos_stmt_set_tbname`,后续子表用 `taos_stmt_set_sub_tbname`。
-
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
(2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
@@ -976,13 +966,17 @@ Go连接器支持的系统有:
**提示:建议Go版本是1.13及以上,并开启模块支持:**
```sh
- go env -w GO111MODULE=on
- go env -w GOPROXY=https://goproxy.io,direct
+go env -w GO111MODULE=on
+go env -w GOPROXY=https://goproxy.io,direct
```
在taosdemo.go所在目录下进行编译和执行:
```sh
- go mod init *demo*
- go build ./demo -h fqdn -p serverPort
+go mod init taosdemo
+go get github.com/taosdata/driver-go/taosSql
+# use win branch in Windows platform.
+#go get github.com/taosdata/driver-go/taosSql@win
+go build
+./taosdemo -h fqdn -p serverPort
```
### Go连接器的使用
diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md
index f9061200f9..29e49aa902 100644
--- a/documentation20/cn/11.administrator/docs.md
+++ b/documentation20/cn/11.administrator/docs.md
@@ -375,7 +375,7 @@ taos -C 或 taos --dump-config
timezone GMT-8
timezone Asia/Shanghai
```
- 均是合法的设置东八区时区的格式。
+ 均是合法的设置东八区时区的格式。但需注意,Windows 下并不支持 `timezone Asia/Shanghai` 这样的写法,而必须写成 `timezone UTC-8`。
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词now的解析)产生影响。例如:
```sql
@@ -800,7 +800,7 @@ taos -n sync -P 6042 -h
`taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP`
-从 2.1.7.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下:
+从 2.1.8.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下:
-n:设为“speed”时,表示对网络速度进行诊断。
-h:所要连接的服务端的 FQDN 或 ip 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。
@@ -809,6 +809,15 @@ taos -n sync -P 6042 -h
-l:单个网络包的大小(单位:字节)。最小值是 1024、最大值是 1024*1024*1024,默认值为 1000。
-S:网络封包的类型。可以是 TCP 或 UDP,默认值为 TCP。
+#### FQDN 解析速度诊断
+
+`taos -n fqdn -h `
+
+从 2.1.8.0 版本开始,taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下:
+
+-n:设为“fqdn”时,表示对 FQDN 解析进行诊断。
+-h:所要解析的目标 FQDN 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。
+
#### 服务端日志
taosd 服务端日志文件标志位 debugflag 默认为 131,在 debug 时往往需要将其提升到 135 或 143 。
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index 16b52f5773..b183b6e419 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -206,10 +206,6 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
显示当前数据库下的所有数据表信息。
- 说明:可在 like 中使用通配符进行名称的匹配,这一通配符字符串最长不能超过 20 字节。( 从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
-
- 通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。
-
- **显示一个数据表的创建语句**
```mysql
@@ -718,15 +714,19 @@ Query OK, 1 row(s) in set (0.001091s)
| = | equal to | all types |
| <> | not equal to | all types |
| between and | within a certain range | **`timestamp`** and all numeric types |
-| in | matches any value in a set | all types except first column `timestamp` |
+| in | match any value in a set | all types except first column `timestamp` |
+| like | match a wildcard string | **`binary`** **`nchar`** |
| % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** |
1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。
-2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
-3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
-4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
-5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。
+2. like 算子使用通配符字符串进行匹配检查。
+ * 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意字符。
+ * 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
+3. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
+4. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
+5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
+6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。
### UNION ALL 操作符
@@ -1197,8 +1197,6 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
适用于:**表、超级表**。
- 说明:与LAST函数不同,LAST_ROW不支持时间范围限制,强制返回最后一条记录。
-
限制:LAST_ROW()不能与INTERVAL一起使用。
示例:
diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md
index de555b0a9c..e9eb88242f 100644
--- a/documentation20/en/08.connector/01.java/docs.md
+++ b/documentation20/en/08.connector/01.java/docs.md
@@ -203,7 +203,7 @@ The configuration parameters in properties are as follows:
* TSDBDriver.PROPERTY_KEY_LOCALE: client locale. The default value is the current system locale.
* TSDBDriver.PROPERTY_KEY_TIME_ZONE: timezone used by the client. The default value is the current timezone of the system.
* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-JNI. True if batch ResultSet fetching is enabled; false if row-by-row ResultSet fetching is enabled. Default value is flase.
-* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'.
+* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'.
* TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false.
#### Establishing a connection with configuration file
@@ -317,14 +317,17 @@ Since version 2.1.2.0, TDengine's JDBC-JNI implementation has significantly impr
Statement stmt = conn.createStatement();
Random r = new Random();
+// In the INSERT statement, the VALUES clause allows you to specify a specific column; If automatic table creation is adopted, the TAGS clause needs to set the parameter values of all TAGS columns
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)");
s.setTableName("w1");
+// set tags
s.setTagInt(0, r.nextInt(10));
s.setTagString(1, "Beijing");
int numOfRows = 10;
+// set values
ArrayList ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
ts.add(System.currentTimeMillis() + i);
@@ -341,9 +344,10 @@ for (int i = 0; i < numOfRows; i++){
}
s.setString(2, s2, 10);
+// The cache is not cleared after AddBatch. Do not bind new data again before ExecuteBatch
s.columnDataAddBatch();
s.columnDataExecuteBatch();
-
+// Clear the cache, after which you can bind new data(including table names, tags, values):
s.columnDataClearBatch();
s.columnDataCloseBatch();
```
@@ -499,6 +503,10 @@ Query OK, 1 row(s) in set (0.000141s)
- Please refer to [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) if using taos-jdbcdriver in Spring JdbcTemplate.
- Please refer to [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) if using taos-jdbcdriver in Spring JdbcTemplate.
+## Example Codes
+you see sample code here: 
+
+
## FAQ
- java.lang.UnsatisfiedLinkError: no taos in java.library.path
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
index 7851587c82..55ca1174c9 100755
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -142,6 +142,7 @@ function install_bin() {
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
+ ${csudo} rm -f ${bin_link_dir}/perfMonitor || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/set_core || :
fi
@@ -167,6 +168,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
+ [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
fi
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index c04fa3298b..859d40cf69 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,6 +1,6 @@
name: tdengine
base: core18
-version: '2.1.6.0'
+version: '2.1.7.1'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- - usr/lib/libtaos.so.2.1.6.0
+ - usr/lib/libtaos.so.2.1.7.1
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
diff --git a/src/balance/src/bnScore.c b/src/balance/src/bnScore.c
index 7d94df1c23..04a14357c9 100644
--- a/src/balance/src/bnScore.c
+++ b/src/balance/src/bnScore.c
@@ -116,8 +116,17 @@ void bnCleanupDnodes() {
static void bnCheckDnodesSize(int32_t dnodesNum) {
if (tsBnDnodes.maxSize <= dnodesNum) {
- tsBnDnodes.maxSize = dnodesNum * 2;
- tsBnDnodes.list = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *));
+ int32_t maxSize = dnodesNum * 2;
+ SDnodeObj** list1 = NULL;
+ int32_t retry = 0;
+
+ while(list1 == NULL && retry++ < 3) {
+ list1 = realloc(tsBnDnodes.list, maxSize * sizeof(SDnodeObj *));
+ }
+ if(list1) {
+ tsBnDnodes.list = list1;
+ tsBnDnodes.maxSize = maxSize;
+ }
}
}
diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt
index 2f83557d63..0d06e5d39c 100644
--- a/src/client/CMakeLists.txt
+++ b/src/client/CMakeLists.txt
@@ -4,6 +4,8 @@ PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
INCLUDE_DIRECTORIES(jni)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc)
+INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/plugins/http/inc)
AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX)
diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h
index f0349c2b3d..a012ca5a7f 100644
--- a/src/client/inc/tscSubquery.h
+++ b/src/client/inc/tscSubquery.h
@@ -50,6 +50,12 @@ void tscUnlockByThread(int64_t *lockedBy);
int tsInsertInitialCheck(SSqlObj *pSql);
+void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs);
+
+void tscFreeRetrieveSup(SSqlObj *pSql);
+
+
+
#ifdef __cplusplus
}
#endif
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index 4b3b8899d9..64b5f8d7ee 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -144,6 +144,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
+int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo);
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
bool hasTagValOutput(SQueryInfo* pQueryInfo);
bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo);
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index 4847a34dd1..b8eb0a5286 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -38,6 +38,11 @@ extern "C" {
#include "qUtil.h"
#include "tcmdtype.h"
+typedef enum {
+ TAOS_REQ_FROM_SHELL,
+ TAOS_REQ_FROM_HTTP
+} SReqOrigin;
+
// forward declaration
struct SSqlInfo;
@@ -123,7 +128,7 @@ typedef struct {
int32_t kvLen; // len of SKVRow
} SMemRowInfo;
typedef struct {
- uint8_t memRowType; // default is 0, that is SDataRow
+ uint8_t memRowType; // default is 0, that is SDataRow
uint8_t compareStat; // 0 no need, 1 need compare
TDRowTLenT kvRowInitLen;
SMemRowInfo *rowInfo;
@@ -340,6 +345,7 @@ typedef struct STscObj {
SRpcCorEpSet *tscCorMgmtEpSet;
pthread_mutex_t mutex;
int32_t numOfObj; // number of sqlObj from this tscObj
+ SReqOrigin from;
} STscObj;
typedef struct SSubqueryState {
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index 35b37b7ae6..b3d6ade319 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -1693,7 +1693,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
- SInsertStatementParam* pInsertParam = &pCmd->insertParam;
+ SInsertStatementParam *pInsertParam = &pCmd->insertParam;
destroyTableNameList(pInsertParam);
pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks);
diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c
index 48393a3dda..e1f036fea6 100644
--- a/src/client/src/tscPrepare.c
+++ b/src/client/src/tscPrepare.c
@@ -1527,8 +1527,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT;
pCmd->insertParam.objectId = pSql->self;
- pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
-
+ char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
+ if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
+ pSql->sqlstr = sqlstr;
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY);
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index f80d3e7fd1..fcc82a5e81 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -40,6 +40,7 @@
#include "qScript.h"
#include "ttype.h"
#include "qFilter.h"
+#include "httpInt.h"
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
@@ -1671,8 +1672,28 @@ static bool has(SArray* pFieldList, int32_t startIdx, const char* name) {
static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; }
static char* cloneCurrentDBName(SSqlObj* pSql) {
+ char *p = NULL;
+ HttpContext *pCtx = NULL;
+
pthread_mutex_lock(&pSql->pTscObj->mutex);
- char *p = strdup(pSql->pTscObj->db);
+ STscObj *pTscObj = pSql->pTscObj;
+ switch (pTscObj->from) {
+ case TAOS_REQ_FROM_HTTP:
+ pCtx = pSql->param;
+ if (pCtx && pCtx->db[0] != '\0') {
+ char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN] = {0};
+ int32_t len = sprintf(db, "%s%s%s", pTscObj->acctId, TS_PATH_DELIMITER, pCtx->db);
+ assert(len <= sizeof(db));
+
+ p = strdup(db);
+ }
+ break;
+ default:
+ break;
+ }
+ if (p == NULL) {
+ p = strdup(pSql->pTscObj->db);
+ }
pthread_mutex_unlock(&pSql->pTscObj->mutex);
return p;
@@ -2033,9 +2054,10 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
}
+
bool hasDistinct = false;
bool hasAgg = false;
- size_t numOfExpr = taosArrayGetSize(pSelNodeList);
+ size_t numOfExpr = taosArrayGetSize(pSelNodeList);
int32_t distIdx = -1;
for (int32_t i = 0; i < numOfExpr; ++i) {
int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo);
@@ -2090,7 +2112,6 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
}
}
-
//TODO(dengyihao), refactor as function
//handle distinct func mixed with other func
if (hasDistinct == true) {
@@ -2106,6 +2127,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
if (pQueryInfo->pDownstream != NULL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
+
pQueryInfo->distinct = true;
}
@@ -2629,7 +2651,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO);
} else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI);
- }
+ }
if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
@@ -2663,8 +2685,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
assert(ids.num == 1);
tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema);
}
-
tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid);
+
return TSDB_CODE_SUCCESS;
}
@@ -3046,7 +3068,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, &s);
}
}
-
tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
return TSDB_CODE_SUCCESS;
}
@@ -4644,7 +4665,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
}
pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY;
- ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg);
+ ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pCmd->payload);
*pExpr = NULL;
if (type) {
*type |= TSQL_EXPR_JOIN;
@@ -5626,6 +5647,7 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
const char* msg3 = "top/bottom not support fill";
const char* msg4 = "illegal value or data overflow";
const char* msg5 = "fill only available for interval query";
+ const char* msg6 = "not supported function now";
if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
@@ -5664,6 +5686,9 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
}
} else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_PREV;
+ if (tscIsPointInterpQuery(pQueryInfo) && pQueryInfo->order.order == TSDB_ORDER_DESC) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
+ }
} else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_NEXT;
} else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) {
@@ -5768,14 +5793,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
const char* msg7 = "only primary timestamp/column in groupby clause allowed as order column";
const char* msg8 = "only column in groupby clause allowed as order column";
const char* msg9 = "orderby column must projected in subquery";
+ const char* msg10 = "not support distinct mixed with order by";
setDefaultOrderInfo(pQueryInfo);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
-
- if (pQueryInfo->distinct || pSqlNode->pSortOrder == NULL) {
- return TSDB_CODE_SUCCESS;
- }
-
+ if (pSqlNode->pSortOrder == NULL) {
+ return TSDB_CODE_SUCCESS;
+ }
char* pMsgBuf = tscGetErrorMsgPayload(pCmd);
SArray* pSortOrder = pSqlNode->pSortOrder;
@@ -5795,6 +5819,9 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return invalidOperationMsg(pMsgBuf, msg2);
}
}
+ if (size > 0 && pQueryInfo->distinct) {
+ return invalidOperationMsg(pMsgBuf, msg10);
+ }
// handle the first part of order by
tVariant* pVar = taosArrayGet(pSortOrder, 0);
@@ -5863,12 +5890,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
} else if (isTopBottomQuery(pQueryInfo)) {
/* order of top/bottom query in interval is not valid */
+
int32_t pos = tscExprTopBottomIndex(pQueryInfo);
assert(pos > 0);
SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1);
assert(pExpr->base.functionId == TSDB_FUNC_TS);
pExpr = tscExprGet(pQueryInfo, pos);
+
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(pMsgBuf, msg5);
}
@@ -5959,13 +5988,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return invalidOperationMsg(pMsgBuf, msg8);
}
} else {
- /* order of top/bottom query in interval is not valid */
int32_t pos = tscExprTopBottomIndex(pQueryInfo);
assert(pos > 0);
SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1);
assert(pExpr->base.functionId == TSDB_FUNC_TS);
pExpr = tscExprGet(pQueryInfo, pos);
+
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(pMsgBuf, msg5);
}
@@ -8672,6 +8701,8 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) {
n += 1;
}
+ info->numOfColumns = n;
+
return meta;
}
@@ -8700,7 +8731,6 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS
return code;
}
-
// create dummy table meta info
STableMetaInfo* pTableMetaInfo1 = calloc(1, sizeof(STableMetaInfo));
if (pTableMetaInfo1 == NULL) {
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index 5f55e1c50d..5fdaad0d66 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -892,7 +892,9 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
}
- pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
+ char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
+ if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
+ pSql->sqlstr = sqlstr;
if (pSql->sqlstr == NULL) {
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tfree(pSql);
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 45bcf4095a..b8901a0288 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -2038,17 +2038,14 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
tscAsyncResultOnError(pSql);
}
-static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) {
+void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) {
assert(numOfSubs <= pSql->subState.numOfSub && numOfSubs >= 0);
for(int32_t i = 0; i < numOfSubs; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
assert(pSub != NULL);
-
- SRetrieveSupport* pSupport = pSub->param;
-
- tfree(pSupport->localBuffer);
- tfree(pSupport);
+
+ tscFreeRetrieveSup(pSub);
taos_free_result(pSub);
}
@@ -2406,6 +2403,10 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
} else {
SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex};
tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss);
+ int32_t ti = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid);
+ assert(ti >= 0);
+ SColumn* x = taosArrayGetP(pNewQueryInfo->colList, ti);
+ tscColumnCopy(x, pCol);
}
}
}
@@ -2607,7 +2608,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
-static void tscFreeRetrieveSup(SSqlObj *pSql) {
+void tscFreeRetrieveSup(SSqlObj *pSql) {
SRetrieveSupport *trsupport = pSql->param;
void* p = atomic_val_compare_exchange_ptr(&pSql->param, trsupport, 0);
@@ -2765,27 +2766,43 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
int32_t code = pParentSql->res.code;
- if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) {
- // remove the cached tableMeta and vgroup id list, and then parse the sql again
- tscResetSqlCmd( &pParentSql->cmd, true, pParentSql->self);
+ SSqlObj *userSql = NULL;
+ if (pParentSql->param) {
+ userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql;
+ }
- pParentSql->retry++;
- pParentSql->res.code = TSDB_CODE_SUCCESS;
- tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self,
- tstrerror(code), pParentSql->retry);
+ if (userSql == NULL) {
+ userSql = pParentSql;
+ }
- code = tsParseSql(pParentSql, true);
+ if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) {
+ if (userSql != pParentSql) {
+ tscFreeRetrieveSup(pParentSql);
+ }
+
+ tscFreeSubobj(userSql);
+ tfree(userSql->pSubs);
+
+ userSql->res.code = TSDB_CODE_SUCCESS;
+ userSql->retry++;
+
+ tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self,
+ tstrerror(code), userSql->retry);
+
+ tscResetSqlCmd(&userSql->cmd, true, userSql->self);
+ code = tsParseSql(userSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
}
if (code != TSDB_CODE_SUCCESS) {
- pParentSql->res.code = code;
- tscAsyncResultOnError(pParentSql);
+ userSql->res.code = code;
+ tscAsyncResultOnError(userSql);
return;
}
- executeQuery(pParentSql, pQueryInfo);
+ pQueryInfo = tscGetQueryInfo(&userSql->cmd);
+ executeQuery(userSql, pQueryInfo);
} else {
(*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code);
}
@@ -2855,7 +2872,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
pParentSql->self, pState->numOfSub, pState->numOfRetrievedRows);
SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
- tscClearInterpInfo(pPQueryInfo);
code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self);
pParentSql->res.code = code;
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 21d8a552cb..192f7ae3d7 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -403,6 +403,27 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) {
return false;
}
+int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) {
+ size_t numOfExprs = tscNumOfExprs(pQueryInfo);
+
+ for (int32_t i = 0; i < numOfExprs; ++i) {
+ SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
+ if (pExpr == NULL) {
+ continue;
+ }
+
+ if (pExpr->base.functionId == TSDB_FUNC_TS) {
+ continue;
+ }
+
+ if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
@@ -659,8 +680,10 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo
} else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
- pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
-
+ char* buffer = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
+ if(buffer == NULL)
+ return ;
+ pRes->buffer[i] = buffer;
// string terminated char for binary data
memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows);
@@ -1236,6 +1259,7 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
}
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilters);
+
pOutput->precision = pSqlObjList[0]->res.precision;
SSchema* schema = NULL;
@@ -3634,10 +3658,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNewQueryInfo->prjOffset = pQueryInfo->prjOffset;
pNewQueryInfo->numOfTables = 0;
pNewQueryInfo->pTableMetaInfo = NULL;
- pNewQueryInfo->bufLen = pQueryInfo->bufLen;
- pNewQueryInfo->distinct = pQueryInfo->distinct;
-
+ pNewQueryInfo->bufLen = pQueryInfo->bufLen;
pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
+
+ pNewQueryInfo->distinct = pQueryInfo->distinct;
if (pNewQueryInfo->buf == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
@@ -3853,8 +3877,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
int32_t index = ps->subqueryIndex;
bool ret = subAndCheckDone(pSql, pParentSql, index);
- tfree(ps);
- pSql->param = NULL;
+ tscFreeRetrieveSup(pSql);
if (!ret) {
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index);
@@ -3863,7 +3886,13 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
// todo refactor
tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self);
- tscResetSqlCmd(&pParentSql->cmd, true, pParentSql->self);
+ if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry)) {
+ tscAsyncResultOnError(pParentSql);
+ return;
+ }
+
+ tscFreeSubobj(pParentSql);
+ tfree(pParentSql->pSubs);
pParentSql->res.code = TSDB_CODE_SUCCESS;
pParentSql->retry++;
@@ -3871,6 +3900,9 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self,
tstrerror(code), pParentSql->retry);
+
+ tscResetSqlCmd(&pParentSql->cmd, true, pParentSql->self);
+
code = tsParseSql(pParentSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
@@ -3905,9 +3937,11 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
}
if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly
+ assert(pSql->subState.numOfSub == 0);
pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream);
-
+ assert(pSql->pSubs == NULL);
pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES);
+ assert(pSql->subState.states == NULL);
pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t));
code = pthread_mutex_init(&pSql->subState.mutex, NULL);
@@ -3933,6 +3967,9 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
pNew->sqlstr = strdup(pSql->sqlstr);
pNew->fp = tscSubqueryCompleteCallback;
pNew->maxRetry = pSql->maxRetry;
+
+ pNew->cmd.resColumnId = TSDB_RES_COL_ID;
+
tsem_init(&pNew->rspSem, 0, 0);
SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id
@@ -4490,10 +4527,14 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
assert(*ppChild != NULL);
STableMeta* p = *ppSTable;
STableMeta* pChild = *ppChild;
+
size_t sz = (p != NULL) ? tscGetTableMetaSize(p) : 0; //ppSTableBuf actually capacity may larger than sz, dont care
if (p != NULL && sz != 0) {
memset((char *)p, 0, sz);
}
+
+ STableMeta* pChild1;
+
taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz);
*ppSTable = p;
@@ -4504,7 +4545,10 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema);
int32_t tableMetaSize = sizeof(STableMeta) + totalBytes;
if (*tableMetaCapacity < tableMetaSize) {
- pChild = realloc(pChild, tableMetaSize);
+ pChild1 = realloc(pChild, tableMetaSize);
+ if(pChild1 == NULL)
+ return -1;
+ pChild = pChild1;
*tableMetaCapacity = (size_t)tableMetaSize;
}
diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h
index 46259c8488..a01c377539 100644
--- a/src/common/inc/tdataformat.h
+++ b/src/common/inc/tdataformat.h
@@ -547,8 +547,9 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder);
static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) {
if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2;
- pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
- if (pBuilder->pColIdx == NULL) return -1;
+ SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
+ if (pColIdx == NULL) return -1;
+ pBuilder->pColIdx = pColIdx;
}
pBuilder->pColIdx[pBuilder->nCols].colId = colId;
@@ -561,8 +562,9 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId,
while (tlen > pBuilder->alloc - pBuilder->size) {
pBuilder->alloc *= 2;
}
- pBuilder->buf = realloc(pBuilder->buf, pBuilder->alloc);
- if (pBuilder->buf == NULL) return -1;
+ void* buf = realloc(pBuilder->buf, pBuilder->alloc);
+ if (buf == NULL) return -1;
+ pBuilder->buf = buf;
}
memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen);
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index 4b8347ead0..30ae6faf1c 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -163,6 +163,7 @@ extern char tsDataDir[];
extern char tsLogDir[];
extern char tsScriptDir[];
extern int64_t tsTickPerDay[3];
+extern int32_t tsTopicBianryLen;
// system info
extern char tsOsName[];
diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c
index a5aabbe1f6..aa60803dac 100644
--- a/src/common/src/tdataformat.c
+++ b/src/common/src/tdataformat.c
@@ -138,8 +138,9 @@ int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int1
if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2;
- pBuilder->columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols);
- if (pBuilder->columns == NULL) return -1;
+ STColumn* columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols);
+ if (columns == NULL) return -1;
+ pBuilder->columns = columns;
}
STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]);
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index f169b07bb2..2d1c6780d1 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -84,7 +84,7 @@ int32_t tsCompressColData = -1;
// client
int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN;
-int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN;
+int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN;
int8_t tsTscEnableRecordSql = 0;
// the maximum number of results for projection query on super table that are returned from
@@ -152,7 +152,6 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int32_t tsTsdbMetaCompactRatio = TSDB_META_COMPACT_RATIO;
// tsdb config
-
// For backward compatibility
bool tsdbForceKeepFile = false;
@@ -210,6 +209,7 @@ char tsScriptDir[PATH_MAX] = {0};
char tsTempDir[PATH_MAX] = "/tmp/";
int32_t tsDiskCfgNum = 0;
+int32_t tsTopicBianryLen = 16000;
#ifndef _STORAGE
SDiskCfg tsDiskCfg[1];
@@ -570,7 +570,6 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
-
cfg.option = "numOfMnodes";
cfg.ptr = &tsNumOfMnodes;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@@ -1238,6 +1237,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ cfg.option = "topicBianryLen";
+ cfg.ptr = &tsTopicBianryLen;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
+ cfg.minValue = 16;
+ cfg.maxValue = 16000;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
cfg.option = "httpEnableRecordSql";
cfg.ptr = &tsHttpEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT8;
diff --git a/src/connector/go b/src/connector/go
index b8f76da4a7..050667e5b4 160000
--- a/src/connector/go
+++ b/src/connector/go
@@ -1 +1 @@
-Subproject commit b8f76da4a708d158ec3cc4b844571dc4414e36b4
+Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f
diff --git a/src/connector/hivemq-tdengine-extension b/src/connector/hivemq-tdengine-extension
index ce52010141..b62a26ecc1 160000
--- a/src/connector/hivemq-tdengine-extension
+++ b/src/connector/hivemq-tdengine-extension
@@ -1 +1 @@
-Subproject commit ce5201014136503d34fecbd56494b67b4961056c
+Subproject commit b62a26ecc164a310104df57691691b237e091c89
diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py
index 51e9a8667d..42dac3c2e8 100644
--- a/src/connector/python/taos/cinterface.py
+++ b/src/connector/python/taos/cinterface.py
@@ -49,7 +49,7 @@ def _load_taos():
try:
return load_func[platform.system()]()
except:
- sys.exit("unsupported platform to TDengine connector")
+ raise InterfaceError('unsupported platform or failed to load taos client library')
_libtaos = _load_taos()
diff --git a/src/kit/shell/src/shellCheck.c b/src/kit/shell/src/shellCheck.c
index d78f1a6b99..7fc8b1409a 100644
--- a/src/kit/shell/src/shellCheck.c
+++ b/src/kit/shell/src/shellCheck.c
@@ -72,12 +72,13 @@ static int32_t shellShowTables(TAOS *con, char *db) {
int32_t tbIndex = tbNum++;
if (tbMallocNum < tbNum) {
tbMallocNum = (tbMallocNum * 2 + 1);
- tbNames = realloc(tbNames, tbMallocNum * sizeof(char *));
- if (tbNames == NULL) {
+ char** tbNames1 = realloc(tbNames, tbMallocNum * sizeof(char *));
+ if (tbNames1 == NULL) {
fprintf(stdout, "failed to malloc tablenames, num:%d\n", tbMallocNum);
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
break;
}
+ tbNames = tbNames1;
}
tbNames[tbIndex] = malloc(TSDB_TABLE_NAME_LEN);
diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c
index bf19394d05..efc37403b4 100644
--- a/src/kit/shell/src/shellEngine.c
+++ b/src/kit/shell/src/shellEngine.c
@@ -254,8 +254,12 @@ int32_t shellRunCommand(TAOS* con, char* command) {
}
if (c == '\\') {
- esc = true;
- continue;
+ if (quote != 0 && (*command == '_' || *command == '\\')) {
+ //DO nothing
+ } else {
+ esc = true;
+ continue;
+ }
}
if (quote == c) {
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index f222266ee8..e0cc76d5a8 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -443,6 +443,7 @@ typedef struct SThreadInfo_S {
uint64_t start_table_from;
uint64_t end_table_to;
int64_t ntables;
+ int64_t tables_created;
uint64_t data_of_rate;
int64_t start_time;
char* cols;
@@ -639,6 +640,7 @@ SArguments g_args = {
static SDbs g_Dbs;
static int64_t g_totalChildTables = 0;
+static int64_t g_actualChildTables = 0;
static SQueryMetaInfo g_queryInfo;
static FILE * g_fpOfInsertResult = NULL;
@@ -659,6 +661,13 @@ static FILE * g_fpOfInsertResult = NULL;
fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0)
#define errorPrint(fmt, ...) \
+ do {\
+ fprintf(stderr, " \033[31m");\
+ fprintf(stderr, "ERROR: "fmt, __VA_ARGS__);\
+ fprintf(stderr, " \033[0m");\
+ } while(0)
+
+#define errorPrint2(fmt, ...) \
do {\
struct tm Tm, *ptm;\
struct timeval timeSecs; \
@@ -671,8 +680,8 @@ static FILE * g_fpOfInsertResult = NULL;
ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour,\
ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec,\
taosGetSelfPthreadId());\
- fprintf(stderr, "ERROR: "fmt, __VA_ARGS__);\
fprintf(stderr, " \033[0m");\
+ errorPrint(fmt, __VA_ARGS__);\
} while(0)
// for strncpy buffer overflow
@@ -815,6 +824,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-f") == 0) {
arguments->demo_mode = false;
+
+ if (NULL == argv[i+1]) {
+ printHelp();
+ errorPrint("%s", "\n\t-f need a valid json file following!\n");
+ exit(EXIT_FAILURE);
+ }
arguments->metaFile = argv[++i];
} else if (strcmp(argv[i], "-c") == 0) {
if (argc == i+1) {
@@ -951,6 +966,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
exit(EXIT_FAILURE);
}
arguments->num_of_tables = atoi(argv[++i]);
+ g_totalChildTables = arguments->num_of_tables;
} else if (strcmp(argv[i], "-n") == 0) {
if ((argc == i+1) ||
(!isStringNumber(argv[i+1]))) {
@@ -1121,7 +1137,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
exit(EXIT_FAILURE);
}
} else if ((strcmp(argv[i], "--version") == 0) ||
- (strcmp(argv[i], "-V") == 0)){
+ (strcmp(argv[i], "-V") == 0)) {
printVersion();
exit(0);
} else if (strcmp(argv[i], "--help") == 0) {
@@ -1227,7 +1243,7 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
if (code != 0) {
if (!quiet) {
- errorPrint("Failed to execute %s, reason: %s\n",
+ errorPrint2("Failed to execute %s, reason: %s\n",
command, taos_errstr(res));
}
taos_free_result(res);
@@ -1249,7 +1265,7 @@ static void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo)
{
pThreadInfo->fp = fopen(pThreadInfo->filePath, "at");
if (pThreadInfo->fp == NULL) {
- errorPrint(
+ errorPrint2(
"%s() LN%d, failed to open result file: %s, result will not save to file\n",
__func__, __LINE__, pThreadInfo->filePath);
return;
@@ -1268,7 +1284,7 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
char* databuf = (char*) calloc(1, 100*1024*1024);
if (databuf == NULL) {
- errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n",
+ errorPrint2("%s() LN%d, failed to malloc, warning: save result to file slowly!\n",
__func__, __LINE__);
return ;
}
@@ -1308,7 +1324,7 @@ static void selectAndGetResult(
if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) {
TAOS_RES *res = taos_query(pThreadInfo->taos, command);
if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n",
+ errorPrint2("%s() LN%d, failed to execute sql:%s, reason:%s\n",
__func__, __LINE__, command, taos_errstr(res));
taos_free_result(res);
return;
@@ -1327,19 +1343,19 @@ static void selectAndGetResult(
}
} else {
- errorPrint("%s() LN%d, unknown query mode: %s\n",
+ errorPrint2("%s() LN%d, unknown query mode: %s\n",
__func__, __LINE__, g_queryInfo.queryMode);
}
}
-static char *rand_bool_str(){
+static char *rand_bool_str() {
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_randbool_buff + (cursor * BOOL_BUFF_LEN);
+ return g_randbool_buff + ((cursor % MAX_PREPARED_RAND) * BOOL_BUFF_LEN);
}
-static int32_t rand_bool(){
+static int32_t rand_bool() {
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
@@ -1351,7 +1367,8 @@ static char *rand_tinyint_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_randtinyint_buff + (cursor * TINYINT_BUFF_LEN);
+ return g_randtinyint_buff +
+ ((cursor % MAX_PREPARED_RAND) * TINYINT_BUFF_LEN);
}
static int32_t rand_tinyint()
@@ -1367,7 +1384,8 @@ static char *rand_smallint_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_randsmallint_buff + (cursor * SMALLINT_BUFF_LEN);
+ return g_randsmallint_buff +
+ ((cursor % MAX_PREPARED_RAND) * SMALLINT_BUFF_LEN);
}
static int32_t rand_smallint()
@@ -1383,7 +1401,7 @@ static char *rand_int_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_randint_buff + (cursor * INT_BUFF_LEN);
+ return g_randint_buff + ((cursor % MAX_PREPARED_RAND) * INT_BUFF_LEN);
}
static int32_t rand_int()
@@ -1399,7 +1417,8 @@ static char *rand_bigint_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_randbigint_buff + (cursor * BIGINT_BUFF_LEN);
+ return g_randbigint_buff +
+ ((cursor % MAX_PREPARED_RAND) * BIGINT_BUFF_LEN);
}
static int64_t rand_bigint()
@@ -1415,7 +1434,7 @@ static char *rand_float_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_randfloat_buff + (cursor * FLOAT_BUFF_LEN);
+ return g_randfloat_buff + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN);
}
@@ -1432,7 +1451,8 @@ static char *demo_current_float_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_rand_current_buff + (cursor * FLOAT_BUFF_LEN);
+ return g_rand_current_buff +
+ ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN);
}
static float UNUSED_FUNC demo_current_float()
@@ -1449,7 +1469,8 @@ static char *demo_voltage_int_str()
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_rand_voltage_buff + (cursor * INT_BUFF_LEN);
+ return g_rand_voltage_buff +
+ ((cursor % MAX_PREPARED_RAND) * INT_BUFF_LEN);
}
static int32_t UNUSED_FUNC demo_voltage_int()
@@ -1464,10 +1485,10 @@ static char *demo_phase_float_str() {
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
- return g_rand_phase_buff + (cursor * FLOAT_BUFF_LEN);
+ return g_rand_phase_buff + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN);
}
-static float UNUSED_FUNC demo_phase_float(){
+static float UNUSED_FUNC demo_phase_float() {
static int cursor;
cursor++;
if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0;
@@ -1546,7 +1567,7 @@ static void init_rand_data() {
g_randdouble_buff = calloc(1, DOUBLE_BUFF_LEN * MAX_PREPARED_RAND);
assert(g_randdouble_buff);
- for (int i = 0; i < MAX_PREPARED_RAND; i++){
+ for (int i = 0; i < MAX_PREPARED_RAND; i++) {
g_randint[i] = (int)(taosRandom() % 65535);
sprintf(g_randint_buff + i * INT_BUFF_LEN, "%d",
g_randint[i]);
@@ -2141,7 +2162,7 @@ static void xDumpFieldToFile(FILE* fp, const char* val,
fprintf(fp, "%d", *((int32_t *)val));
break;
case TSDB_DATA_TYPE_BIGINT:
- fprintf(fp, "%" PRId64, *((int64_t *)val));
+ fprintf(fp, "%"PRId64"", *((int64_t *)val));
break;
case TSDB_DATA_TYPE_FLOAT:
fprintf(fp, "%.5f", GET_FLOAT_VAL(val));
@@ -2172,7 +2193,7 @@ static int xDumpResultToFile(const char* fname, TAOS_RES* tres) {
FILE* fp = fopen(fname, "at");
if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file: %s\n",
+ errorPrint2("%s() LN%d, failed to open file: %s\n",
__func__, __LINE__, fname);
return -1;
}
@@ -2219,7 +2240,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
int32_t code = taos_errno(res);
if (code != 0) {
- errorPrint( "failed to run , reason: %s\n",
+ errorPrint2("failed to run , reason: %s\n",
taos_errstr(res));
return -1;
}
@@ -2235,7 +2256,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) {
dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
if (dbInfos[count] == NULL) {
- errorPrint( "failed to allocate memory for some dbInfo[%d]\n", count);
+ errorPrint2("failed to allocate memory for some dbInfo[%d]\n", count);
return -1;
}
@@ -2388,7 +2409,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
request_buf = malloc(req_buf_len);
if (NULL == request_buf) {
- errorPrint("%s", "ERROR, cannot allocate memory.\n");
+ errorPrint("%s", "cannot allocate memory.\n");
exit(EXIT_FAILURE);
}
@@ -2527,7 +2548,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) {
char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1);
if (NULL == dataBuf) {
- errorPrint("%s() LN%d, calloc failed! size:%d\n",
+ errorPrint2("%s() LN%d, calloc failed! size:%d\n",
__func__, __LINE__, TSDB_MAX_SQL_LEN+1);
return NULL;
}
@@ -2627,7 +2648,7 @@ static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) {
dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen,
"%"PRId64",", rand_bigint());
} else {
- errorPrint("No support data type: %s\n", stbInfo->tags[i].dataType);
+ errorPrint2("No support data type: %s\n", stbInfo->tags[i].dataType);
tmfree(dataBuf);
return NULL;
}
@@ -2666,7 +2687,7 @@ static int calcRowLen(SSuperTable* superTbls) {
} else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
lenOfOneRow += TIMESTAMP_BUFF_LEN;
} else {
- errorPrint("get error data type : %s\n", dataType);
+ errorPrint2("get error data type : %s\n", dataType);
exit(EXIT_FAILURE);
}
}
@@ -2697,7 +2718,7 @@ static int calcRowLen(SSuperTable* superTbls) {
} else if (strcasecmp(dataType, "DOUBLE") == 0) {
lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN;
} else {
- errorPrint("get error tag type : %s\n", dataType);
+ errorPrint2("get error tag type : %s\n", dataType);
exit(EXIT_FAILURE);
}
}
@@ -2734,7 +2755,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
if (code != 0) {
taos_free_result(res);
taos_close(taos);
- errorPrint("%s() LN%d, failed to run command %s\n",
+ errorPrint2("%s() LN%d, failed to run command %s\n",
__func__, __LINE__, command);
exit(EXIT_FAILURE);
}
@@ -2746,7 +2767,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
if (NULL == childTblName) {
taos_free_result(res);
taos_close(taos);
- errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__);
+ errorPrint2("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__);
exit(EXIT_FAILURE);
}
}
@@ -2756,7 +2777,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
int32_t* len = taos_fetch_lengths(res);
if (0 == strlen((char *)row[0])) {
- errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n",
+ errorPrint2("%s() LN%d, No.%"PRId64" table return empty name\n",
__func__, __LINE__, count);
exit(EXIT_FAILURE);
}
@@ -2777,7 +2798,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
tmfree(childTblName);
taos_free_result(res);
taos_close(taos);
- errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n",
+ errorPrint2("%s() LN%d, realloc fail for save child table name of %s.%s\n",
__func__, __LINE__, dbName, sTblName);
exit(EXIT_FAILURE);
}
@@ -2874,7 +2895,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName,
int childTblCount = 10000;
superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
if (superTbls->childTblName == NULL) {
- errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
+ errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
return -1;
}
getAllChildNameOfSuperTable(taos, dbName,
@@ -2900,7 +2921,7 @@ static int createSuperTable(
int lenOfOneRow = 0;
if (superTbl->columnCount == 0) {
- errorPrint("%s() LN%d, super table column count is %d\n",
+ errorPrint2("%s() LN%d, super table column count is %d\n",
__func__, __LINE__, superTbl->columnCount);
free(command);
return -1;
@@ -2964,7 +2985,7 @@ static int createSuperTable(
} else {
taos_close(taos);
free(command);
- errorPrint("%s() LN%d, config error data type : %s\n",
+ errorPrint2("%s() LN%d, config error data type : %s\n",
__func__, __LINE__, dataType);
exit(EXIT_FAILURE);
}
@@ -2977,7 +2998,7 @@ static int createSuperTable(
if (NULL == superTbl->colsOfCreateChildTable) {
taos_close(taos);
free(command);
- errorPrint("%s() LN%d, Failed when calloc, size:%d",
+ errorPrint2("%s() LN%d, Failed when calloc, size:%d",
__func__, __LINE__, len+1);
exit(EXIT_FAILURE);
}
@@ -2987,7 +3008,7 @@ static int createSuperTable(
__func__, __LINE__, superTbl->colsOfCreateChildTable);
if (superTbl->tagCount == 0) {
- errorPrint("%s() LN%d, super table tag count is %d\n",
+ errorPrint2("%s() LN%d, super table tag count is %d\n",
__func__, __LINE__, superTbl->tagCount);
free(command);
return -1;
@@ -3054,7 +3075,7 @@ static int createSuperTable(
} else {
taos_close(taos);
free(command);
- errorPrint("%s() LN%d, config error tag type : %s\n",
+ errorPrint2("%s() LN%d, config error tag type : %s\n",
__func__, __LINE__, dataType);
exit(EXIT_FAILURE);
}
@@ -3069,7 +3090,7 @@ static int createSuperTable(
"create table if not exists %s.%s (ts timestamp%s) tags %s",
dbName, superTbl->sTblName, cols, tags);
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
- errorPrint( "create supertable %s failed!\n\n",
+ errorPrint2("create supertable %s failed!\n\n",
superTbl->sTblName);
free(command);
return -1;
@@ -3085,7 +3106,7 @@ int createDatabasesAndStables(char *command) {
int ret = 0;
taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port);
if (taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
+ errorPrint2("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
return -1;
}
@@ -3181,7 +3202,7 @@ int createDatabasesAndStables(char *command) {
if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) {
taos_close(taos);
- errorPrint( "\ncreate database %s failed!\n\n",
+ errorPrint("\ncreate database %s failed!\n\n",
g_Dbs.db[i].dbName);
return -1;
}
@@ -3211,7 +3232,7 @@ int createDatabasesAndStables(char *command) {
ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName,
&g_Dbs.db[i].superTbls[j]);
if (0 != ret) {
- errorPrint("\nget super table %s.%s info failed!\n\n",
+ errorPrint2("\nget super table %s.%s info failed!\n\n",
g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName);
continue;
}
@@ -3239,7 +3260,7 @@ static void* createTable(void *sarg)
pThreadInfo->buffer = calloc(buff_len, 1);
if (pThreadInfo->buffer == NULL) {
- errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__);
+ errorPrint2("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__);
exit(EXIT_FAILURE);
}
@@ -3258,10 +3279,11 @@ static void* createTable(void *sarg)
pThreadInfo->db_name,
g_args.tb_prefix, i,
pThreadInfo->cols);
+ batchNum ++;
} else {
if (stbInfo == NULL) {
free(pThreadInfo->buffer);
- errorPrint("%s() LN%d, use metric, but super table info is NULL\n",
+ errorPrint2("%s() LN%d, use metric, but super table info is NULL\n",
__func__, __LINE__);
exit(EXIT_FAILURE);
} else {
@@ -3307,13 +3329,14 @@ static void* createTable(void *sarg)
len = 0;
if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer,
- NO_INSERT_TYPE, false)){
- errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
+ NO_INSERT_TYPE, false)) {
+ errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
free(pThreadInfo->buffer);
return NULL;
}
+ pThreadInfo->tables_created += batchNum;
- uint64_t currentPrintTime = taosGetTimestampMs();
+ uint64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n",
pThreadInfo->threadID, pThreadInfo->start_table_from, i);
@@ -3324,7 +3347,7 @@ static void* createTable(void *sarg)
if (0 != len) {
if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer,
NO_INSERT_TYPE, false)) {
- errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
+ errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
}
}
@@ -3369,7 +3392,7 @@ static int startMultiThreadCreateChildTable(
db_name,
g_Dbs.port);
if (pThreadInfo->taos == NULL) {
- errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("%s() LN%d, Failed to connect to TDengine, reason:%s\n",
__func__, __LINE__, taos_errstr(NULL));
free(pids);
free(infos);
@@ -3383,6 +3406,7 @@ static int startMultiThreadCreateChildTable(
pThreadInfo->use_metric = true;
pThreadInfo->cols = cols;
pThreadInfo->minDelay = UINT64_MAX;
+ pThreadInfo->tables_created = 0;
pthread_create(pids + i, NULL, createTable, pThreadInfo);
}
@@ -3393,6 +3417,8 @@ static int startMultiThreadCreateChildTable(
for (int i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
taos_close(pThreadInfo->taos);
+
+ g_actualChildTables += pThreadInfo->tables_created;
}
free(pids);
@@ -3419,7 +3445,6 @@ static void createChildTables() {
verbosePrint("%s() LN%d: %s\n", __func__, __LINE__,
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable);
uint64_t startFrom = 0;
- g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
verbosePrint("%s() LN%d: create %"PRId64" child tables from %"PRIu64"\n",
__func__, __LINE__, g_totalChildTables, startFrom);
@@ -3544,7 +3569,7 @@ static int readSampleFromCsvFileToMem(
FILE* fp = fopen(stbInfo->sampleFile, "r");
if (fp == NULL) {
- errorPrint( "Failed to open sample file: %s, reason:%s\n",
+ errorPrint("Failed to open sample file: %s, reason:%s\n",
stbInfo->sampleFile, strerror(errno));
return -1;
}
@@ -3556,7 +3581,7 @@ static int readSampleFromCsvFileToMem(
readLen = tgetline(&line, &n, fp);
if (-1 == readLen) {
if(0 != fseek(fp, 0, SEEK_SET)) {
- errorPrint( "Failed to fseek file: %s, reason:%s\n",
+ errorPrint("Failed to fseek file: %s, reason:%s\n",
stbInfo->sampleFile, strerror(errno));
fclose(fp);
return -1;
@@ -3599,7 +3624,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
// columns
cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns");
if (columns && columns->type != cJSON_Array) {
- printf("ERROR: failed to read json, columns not found\n");
+ errorPrint("%s", "failed to read json, columns not found\n");
goto PARSE_OVER;
} else if (NULL == columns) {
superTbls->columnCount = 0;
@@ -3609,8 +3634,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
int columnSize = cJSON_GetArraySize(columns);
if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) {
- errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n",
- __func__, __LINE__, TSDB_MAX_COLUMNS);
+ errorPrint("failed to read json, column size overflow, max column size is %d\n",
+ TSDB_MAX_COLUMNS);
goto PARSE_OVER;
}
@@ -3628,8 +3653,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (countObj && countObj->type == cJSON_Number) {
count = countObj->valueint;
} else if (countObj && countObj->type != cJSON_Number) {
- errorPrint("%s() LN%d, failed to read json, column count not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, column count not found\n");
goto PARSE_OVER;
} else {
count = 1;
@@ -3640,8 +3664,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
cJSON *dataType = cJSON_GetObjectItem(column, "type");
if (!dataType || dataType->type != cJSON_String
|| dataType->valuestring == NULL) {
- errorPrint("%s() LN%d: failed to read json, column type not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, column type not found\n");
goto PARSE_OVER;
}
//tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, DATATYPE_BUFF_LEN);
@@ -3669,8 +3692,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) {
- errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n",
- __func__, __LINE__, MAX_NUM_COLUMNS);
+ errorPrint("failed to read json, column size overflow, allowed max column size is %d\n",
+ MAX_NUM_COLUMNS);
goto PARSE_OVER;
}
@@ -3681,15 +3704,14 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
// tags
cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags");
if (!tags || tags->type != cJSON_Array) {
- errorPrint("%s() LN%d, failed to read json, tags not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, tags not found\n");
goto PARSE_OVER;
}
int tagSize = cJSON_GetArraySize(tags);
if (tagSize > TSDB_MAX_TAGS) {
- errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n",
- __func__, __LINE__, TSDB_MAX_TAGS);
+ errorPrint("failed to read json, tags size overflow, max tag size is %d\n",
+ TSDB_MAX_TAGS);
goto PARSE_OVER;
}
@@ -3703,7 +3725,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (countObj && countObj->type == cJSON_Number) {
count = countObj->valueint;
} else if (countObj && countObj->type != cJSON_Number) {
- printf("ERROR: failed to read json, column count not found\n");
+ errorPrint("%s", "failed to read json, column count not found\n");
goto PARSE_OVER;
} else {
count = 1;
@@ -3714,8 +3736,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
cJSON *dataType = cJSON_GetObjectItem(tag, "type");
if (!dataType || dataType->type != cJSON_String
|| dataType->valuestring == NULL) {
- errorPrint("%s() LN%d, failed to read json, tag type not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, tag type not found\n");
goto PARSE_OVER;
}
tstrncpy(columnCase.dataType, dataType->valuestring,
@@ -3725,8 +3746,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
if (dataLen && dataLen->type == cJSON_Number) {
columnCase.dataLen = dataLen->valueint;
} else if (dataLen && dataLen->type != cJSON_Number) {
- errorPrint("%s() LN%d, failed to read json, column len not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, column len not found\n");
goto PARSE_OVER;
} else {
columnCase.dataLen = 0;
@@ -3741,16 +3761,16 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
}
if (index > TSDB_MAX_TAGS) {
- errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n",
- __func__, __LINE__, TSDB_MAX_TAGS);
+ errorPrint("failed to read json, tags size overflow, allowed max tag count is %d\n",
+ TSDB_MAX_TAGS);
goto PARSE_OVER;
}
superTbls->tagCount = index;
if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) {
- errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n",
- __func__, __LINE__, TSDB_MAX_COLUMNS);
+ errorPrint("columns + tags is more than allowed max columns count: %d\n",
+ TSDB_MAX_COLUMNS);
goto PARSE_OVER;
}
ret = true;
@@ -3773,7 +3793,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!host) {
tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
} else {
- printf("ERROR: failed to read json, host not found\n");
+ errorPrint("%s", "failed to read json, host not found\n");
goto PARSE_OVER;
}
@@ -3811,7 +3831,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!threads) {
g_Dbs.threadCount = 1;
} else {
- printf("ERROR: failed to read json, threads not found\n");
+ errorPrint("%s", "failed to read json, threads not found\n");
goto PARSE_OVER;
}
@@ -3821,32 +3841,28 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!threads2) {
g_Dbs.threadCountByCreateTbl = 1;
} else {
- errorPrint("%s() LN%d, failed to read json, threads2 not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, threads2 not found\n");
goto PARSE_OVER;
}
cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval");
if (gInsertInterval && gInsertInterval->type == cJSON_Number) {
if (gInsertInterval->valueint <0) {
- errorPrint("%s() LN%d, failed to read json, insert interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert interval input mistake\n");
goto PARSE_OVER;
}
g_args.insert_interval = gInsertInterval->valueint;
} else if (!gInsertInterval) {
g_args.insert_interval = 0;
} else {
- errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_interval input mistake\n");
goto PARSE_OVER;
}
cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
if (interlaceRows->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, interlace_rows input mistake\n");
goto PARSE_OVER;
}
@@ -3854,8 +3870,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!interlaceRows) {
g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else {
- errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, interlace_rows input mistake\n");
goto PARSE_OVER;
}
@@ -3928,14 +3943,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* dbs = cJSON_GetObjectItem(root, "databases");
if (!dbs || dbs->type != cJSON_Array) {
- printf("ERROR: failed to read json, databases not found\n");
+ errorPrint("%s", "failed to read json, databases not found\n");
goto PARSE_OVER;
}
int dbSize = cJSON_GetArraySize(dbs);
if (dbSize > MAX_DB_COUNT) {
errorPrint(
- "ERROR: failed to read json, databases size overflow, max database is %d\n",
+ "failed to read json, databases size overflow, max database is %d\n",
MAX_DB_COUNT);
goto PARSE_OVER;
}
@@ -3948,13 +3963,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
// dbinfo
cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo");
if (!dbinfo || dbinfo->type != cJSON_Object) {
- printf("ERROR: failed to read json, dbinfo not found\n");
+ errorPrint("%s", "failed to read json, dbinfo not found\n");
goto PARSE_OVER;
}
cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name");
if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) {
- printf("ERROR: failed to read json, db name not found\n");
+ errorPrint("%s", "failed to read json, db name not found\n");
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN);
@@ -3969,8 +3984,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!drop) {
g_Dbs.db[i].drop = g_args.drop_database;
} else {
- errorPrint("%s() LN%d, failed to read json, drop input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, drop input mistake\n");
goto PARSE_OVER;
}
@@ -3982,7 +3996,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!precision) {
memset(g_Dbs.db[i].dbCfg.precision, 0, SMALL_BUFF_LEN);
} else {
- printf("ERROR: failed to read json, precision not found\n");
+ errorPrint("%s", "failed to read json, precision not found\n");
goto PARSE_OVER;
}
@@ -3992,7 +4006,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!update) {
g_Dbs.db[i].dbCfg.update = -1;
} else {
- printf("ERROR: failed to read json, update not found\n");
+ errorPrint("%s", "failed to read json, update not found\n");
goto PARSE_OVER;
}
@@ -4002,7 +4016,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!replica) {
g_Dbs.db[i].dbCfg.replica = -1;
} else {
- printf("ERROR: failed to read json, replica not found\n");
+ errorPrint("%s", "failed to read json, replica not found\n");
goto PARSE_OVER;
}
@@ -4012,7 +4026,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!keep) {
g_Dbs.db[i].dbCfg.keep = -1;
} else {
- printf("ERROR: failed to read json, keep not found\n");
+ errorPrint("%s", "failed to read json, keep not found\n");
goto PARSE_OVER;
}
@@ -4022,7 +4036,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!days) {
g_Dbs.db[i].dbCfg.days = -1;
} else {
- printf("ERROR: failed to read json, days not found\n");
+ errorPrint("%s", "failed to read json, days not found\n");
goto PARSE_OVER;
}
@@ -4032,7 +4046,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!cache) {
g_Dbs.db[i].dbCfg.cache = -1;
} else {
- printf("ERROR: failed to read json, cache not found\n");
+ errorPrint("%s", "failed to read json, cache not found\n");
goto PARSE_OVER;
}
@@ -4042,7 +4056,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!blocks) {
g_Dbs.db[i].dbCfg.blocks = -1;
} else {
- printf("ERROR: failed to read json, block not found\n");
+ errorPrint("%s", "failed to read json, block not found\n");
goto PARSE_OVER;
}
@@ -4062,7 +4076,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!minRows) {
g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default
} else {
- printf("ERROR: failed to read json, minRows not found\n");
+ errorPrint("%s", "failed to read json, minRows not found\n");
goto PARSE_OVER;
}
@@ -4072,7 +4086,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!maxRows) {
g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default
} else {
- printf("ERROR: failed to read json, maxRows not found\n");
+ errorPrint("%s", "failed to read json, maxRows not found\n");
goto PARSE_OVER;
}
@@ -4082,7 +4096,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!comp) {
g_Dbs.db[i].dbCfg.comp = -1;
} else {
- printf("ERROR: failed to read json, comp not found\n");
+ errorPrint("%s", "failed to read json, comp not found\n");
goto PARSE_OVER;
}
@@ -4092,7 +4106,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!walLevel) {
g_Dbs.db[i].dbCfg.walLevel = -1;
} else {
- printf("ERROR: failed to read json, walLevel not found\n");
+ errorPrint("%s", "failed to read json, walLevel not found\n");
goto PARSE_OVER;
}
@@ -4102,7 +4116,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!cacheLast) {
g_Dbs.db[i].dbCfg.cacheLast = -1;
} else {
- printf("ERROR: failed to read json, cacheLast not found\n");
+ errorPrint("%s", "failed to read json, cacheLast not found\n");
goto PARSE_OVER;
}
@@ -4122,24 +4136,22 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!fsync) {
g_Dbs.db[i].dbCfg.fsync = -1;
} else {
- errorPrint("%s() LN%d, failed to read json, fsync input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, fsync input mistake\n");
goto PARSE_OVER;
}
// super_talbes
cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables");
if (!stables || stables->type != cJSON_Array) {
- errorPrint("%s() LN%d, failed to read json, super_tables not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, super_tables not found\n");
goto PARSE_OVER;
}
int stbSize = cJSON_GetArraySize(stables);
if (stbSize > MAX_SUPER_TABLE_COUNT) {
errorPrint(
- "%s() LN%d, failed to read json, supertable size overflow, max supertable is %d\n",
- __func__, __LINE__, MAX_SUPER_TABLE_COUNT);
+ "failed to read json, supertable size overflow, max supertable is %d\n",
+ MAX_SUPER_TABLE_COUNT);
goto PARSE_OVER;
}
@@ -4152,8 +4164,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name");
if (!stbName || stbName->type != cJSON_String
|| stbName->valuestring == NULL) {
- errorPrint("%s() LN%d, failed to read json, stb name not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, stb name not found\n");
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring,
@@ -4161,7 +4172,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix");
if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) {
- printf("ERROR: failed to read json, childtable_prefix not found\n");
+ errorPrint("%s", "failed to read json, childtable_prefix not found\n");
goto PARSE_OVER;
}
tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring,
@@ -4182,7 +4193,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!autoCreateTbl) {
g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL;
} else {
- printf("ERROR: failed to read json, auto_create_table not found\n");
+ errorPrint("%s", "failed to read json, auto_create_table not found\n");
goto PARSE_OVER;
}
@@ -4192,7 +4203,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!batchCreateTbl) {
g_Dbs.db[i].superTbls[j].batchCreateTableNum = 1000;
} else {
- printf("ERROR: failed to read json, batch_create_tbl_num not found\n");
+ errorPrint("%s", "failed to read json, batch_create_tbl_num not found\n");
goto PARSE_OVER;
}
@@ -4212,8 +4223,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!childTblExists) {
g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS;
} else {
- errorPrint("%s() LN%d, failed to read json, child_table_exists not found\n",
- __func__, __LINE__);
+ errorPrint("%s",
+ "failed to read json, child_table_exists not found\n");
goto PARSE_OVER;
}
@@ -4223,11 +4234,12 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count");
if (!count || count->type != cJSON_Number || 0 >= count->valueint) {
- errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s",
+ "failed to read json, childtable_count input mistake\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblCount = count->valueint;
+ g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source");
if (dataSource && dataSource->type == cJSON_String
@@ -4239,8 +4251,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand",
min(SMALL_BUFF_LEN, strlen("rand") + 1));
} else {
- errorPrint("%s() LN%d, failed to read json, data_source not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, data_source not found\n");
goto PARSE_OVER;
}
@@ -4254,8 +4265,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (0 == strcasecmp(stbIface->valuestring, "stmt")) {
g_Dbs.db[i].superTbls[j].iface= STMT_IFACE;
} else {
- errorPrint("%s() LN%d, failed to read json, insert_mode %s not recognized\n",
- __func__, __LINE__, stbIface->valuestring);
+ errorPrint("failed to read json, insert_mode %s not recognized\n",
+ stbIface->valuestring);
goto PARSE_OVER;
}
} else if (!stbIface) {
@@ -4269,7 +4280,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if ((childTbl_limit) && (g_Dbs.db[i].drop != true)
&& (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
if (childTbl_limit->type != cJSON_Number) {
- printf("ERROR: failed to read json, childtable_limit\n");
+ errorPrint("%s", "failed to read json, childtable_limit\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint;
@@ -4282,7 +4293,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
&& (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) {
if ((childTbl_offset->type != cJSON_Number)
|| (0 > childTbl_offset->valueint)) {
- printf("ERROR: failed to read json, childtable_offset\n");
+ errorPrint("%s", "failed to read json, childtable_offset\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].childTblOffset = childTbl_offset->valueint;
@@ -4298,7 +4309,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp,
"now", TSDB_DB_NAME_LEN);
} else {
- printf("ERROR: failed to read json, start_timestamp not found\n");
+ errorPrint("%s", "failed to read json, start_timestamp not found\n");
goto PARSE_OVER;
}
@@ -4308,7 +4319,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!timestampStep) {
g_Dbs.db[i].superTbls[j].timeStampStep = g_args.timestamp_step;
} else {
- printf("ERROR: failed to read json, timestamp_step not found\n");
+ errorPrint("%s", "failed to read json, timestamp_step not found\n");
goto PARSE_OVER;
}
@@ -4323,7 +4334,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv",
SMALL_BUFF_LEN);
} else {
- printf("ERROR: failed to read json, sample_format not found\n");
+ errorPrint("%s", "failed to read json, sample_format not found\n");
goto PARSE_OVER;
}
@@ -4338,7 +4349,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
memset(g_Dbs.db[i].superTbls[j].sampleFile, 0,
MAX_FILE_NAME_LEN);
} else {
- printf("ERROR: failed to read json, sample_file not found\n");
+ errorPrint("%s", "failed to read json, sample_file not found\n");
goto PARSE_OVER;
}
@@ -4356,7 +4367,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN);
g_Dbs.db[i].superTbls[j].tagSource = 0;
} else {
- printf("ERROR: failed to read json, tags_file not found\n");
+ errorPrint("%s", "failed to read json, tags_file not found\n");
goto PARSE_OVER;
}
@@ -4372,8 +4383,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!maxSqlLen) {
g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len;
} else {
- errorPrint("%s() LN%d, failed to read json, stbMaxSqlLen input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, stbMaxSqlLen input mistake\n");
goto PARSE_OVER;
}
/*
@@ -4390,31 +4400,28 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!multiThreadWriteOneTbl) {
g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0;
} else {
- printf("ERROR: failed to read json, multiThreadWriteOneTbl not found\n");
+ errorPrint("%s", "failed to read json, multiThreadWriteOneTbl not found\n");
goto PARSE_OVER;
}
*/
cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows");
if (insertRows && insertRows->type == cJSON_Number) {
if (insertRows->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_rows input mistake\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint;
} else if (!insertRows) {
g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
} else {
- errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_rows input mistake\n");
goto PARSE_OVER;
}
cJSON* stbInterlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) {
if (stbInterlaceRows->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, interlace rows input mistake\n");
goto PARSE_OVER;
}
g_Dbs.db[i].superTbls[j].interlaceRows = stbInterlaceRows->valueint;
@@ -4432,8 +4439,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else {
errorPrint(
- "%s() LN%d, failed to read json, interlace rows input mistake\n",
- __func__, __LINE__);
+ "%s", "failed to read json, interlace rows input mistake\n");
goto PARSE_OVER;
}
@@ -4449,7 +4455,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!disorderRatio) {
g_Dbs.db[i].superTbls[j].disorderRatio = 0;
} else {
- printf("ERROR: failed to read json, disorderRatio not found\n");
+ errorPrint("%s", "failed to read json, disorderRatio not found\n");
goto PARSE_OVER;
}
@@ -4459,7 +4465,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
} else if (!disorderRange) {
g_Dbs.db[i].superTbls[j].disorderRange = 1000;
} else {
- printf("ERROR: failed to read json, disorderRange not found\n");
+ errorPrint("%s", "failed to read json, disorderRange not found\n");
goto PARSE_OVER;
}
@@ -4467,8 +4473,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (insertInterval && insertInterval->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint;
if (insertInterval->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_interval input mistake\n");
goto PARSE_OVER;
}
} else if (!insertInterval) {
@@ -4476,8 +4481,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
__func__, __LINE__, g_args.insert_interval);
g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval;
} else {
- errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, insert_interval input mistake\n");
goto PARSE_OVER;
}
@@ -4509,7 +4513,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (!host) {
tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE);
} else {
- printf("ERROR: failed to read json, host not found\n");
+ errorPrint("%s", "failed to read json, host not found\n");
goto PARSE_OVER;
}
@@ -4547,23 +4551,21 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (!answerPrompt) {
g_args.answer_yes = false;
} else {
- printf("ERROR: failed to read json, confirm_parameter_prompt not found\n");
+ errorPrint("%s", "failed to read json, confirm_parameter_prompt not found\n");
goto PARSE_OVER;
}
cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times");
if (gQueryTimes && gQueryTimes->type == cJSON_Number) {
if (gQueryTimes->valueint <= 0) {
- errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s()", "failed to read json, query_times input mistake\n");
goto PARSE_OVER;
}
g_args.query_times = gQueryTimes->valueint;
} else if (!gQueryTimes) {
g_args.query_times = 1;
} else {
- errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, query_times input mistake\n");
goto PARSE_OVER;
}
@@ -4571,7 +4573,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) {
tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN);
} else if (!dbs) {
- printf("ERROR: failed to read json, databases not found\n");
+ errorPrint("%s", "failed to read json, databases not found\n");
goto PARSE_OVER;
}
@@ -4585,7 +4587,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
tstrncpy(g_queryInfo.queryMode, "taosc",
min(SMALL_BUFF_LEN, strlen("taosc") + 1));
} else {
- printf("ERROR: failed to read json, query_mode not found\n");
+ errorPrint("%s", "failed to read json, query_mode not found\n");
goto PARSE_OVER;
}
@@ -4595,7 +4597,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.specifiedQueryInfo.concurrent = 1;
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (specifiedQuery->type != cJSON_Object) {
- printf("ERROR: failed to read json, super_table_query not found\n");
+ errorPrint("%s", "failed to read json, super_table_query not found\n");
goto PARSE_OVER;
} else {
cJSON* queryInterval = cJSON_GetObjectItem(specifiedQuery, "query_interval");
@@ -4610,8 +4612,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) {
if (specifiedQueryTimes->valueint <= 0) {
errorPrint(
- "%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
- __func__, __LINE__, specifiedQueryTimes->valueint);
+ "failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
+ specifiedQueryTimes->valueint);
goto PARSE_OVER;
}
@@ -4628,8 +4630,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (concurrent && concurrent->type == cJSON_Number) {
if (concurrent->valueint <= 0) {
errorPrint(
- "%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n",
- __func__, __LINE__,
+ "query sqlCount %d or concurrent %d is not correct.\n",
g_queryInfo.specifiedQueryInfo.sqlCount,
g_queryInfo.specifiedQueryInfo.concurrent);
goto PARSE_OVER;
@@ -4647,8 +4648,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) {
g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE;
} else {
- errorPrint("%s() LN%d, failed to read json, async mode input error\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, async mode input error\n");
goto PARSE_OVER;
}
} else {
@@ -4671,7 +4671,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("no", restart->valuestring)) {
g_queryInfo.specifiedQueryInfo.subscribeRestart = false;
} else {
- printf("ERROR: failed to read json, subscribe restart error\n");
+ errorPrint("%s", "failed to read json, subscribe restart error\n");
goto PARSE_OVER;
}
} else {
@@ -4687,7 +4687,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("no", keepProgress->valuestring)) {
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0;
} else {
- printf("ERROR: failed to read json, subscribe keepProgress error\n");
+ errorPrint("%s", "failed to read json, subscribe keepProgress error\n");
goto PARSE_OVER;
}
} else {
@@ -4699,15 +4699,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (!specifiedSqls) {
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (specifiedSqls->type != cJSON_Array) {
- errorPrint("%s() LN%d, failed to read json, super sqls not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, super sqls not found\n");
goto PARSE_OVER;
} else {
int superSqlSize = cJSON_GetArraySize(specifiedSqls);
if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent
> MAX_QUERY_SQL_COUNT) {
- errorPrint("%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n",
- __func__, __LINE__,
+ errorPrint("failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n",
superSqlSize,
g_queryInfo.specifiedQueryInfo.concurrent,
MAX_QUERY_SQL_COUNT);
@@ -4721,7 +4719,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) {
- printf("ERROR: failed to read json, sql not found\n");
+ errorPrint("%s", "failed to read json, sql not found\n");
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j],
@@ -4761,7 +4759,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
memset(g_queryInfo.specifiedQueryInfo.result[j],
0, MAX_FILE_NAME_LEN);
} else {
- printf("ERROR: failed to read json, super query result file not found\n");
+ errorPrint("%s",
+ "failed to read json, super query result file not found\n");
goto PARSE_OVER;
}
}
@@ -4774,7 +4773,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.superQueryInfo.threadCnt = 1;
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (superQuery->type != cJSON_Object) {
- printf("ERROR: failed to read json, sub_table_query not found\n");
+ errorPrint("%s", "failed to read json, sub_table_query not found\n");
ret = true;
goto PARSE_OVER;
} else {
@@ -4788,24 +4787,22 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times");
if (superQueryTimes && superQueryTimes->type == cJSON_Number) {
if (superQueryTimes->valueint <= 0) {
- errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
- __func__, __LINE__, superQueryTimes->valueint);
+ errorPrint("failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
+ superQueryTimes->valueint);
goto PARSE_OVER;
}
g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint;
} else if (!superQueryTimes) {
g_queryInfo.superQueryInfo.queryTimes = g_args.query_times;
} else {
- errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, query_times input mistake\n");
goto PARSE_OVER;
}
cJSON* threads = cJSON_GetObjectItem(superQuery, "threads");
if (threads && threads->type == cJSON_Number) {
if (threads->valueint <= 0) {
- errorPrint("%s() LN%d, failed to read json, threads input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, threads input mistake\n");
goto PARSE_OVER;
}
@@ -4827,8 +4824,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring,
TSDB_TABLE_NAME_LEN);
} else {
- errorPrint("%s() LN%d, failed to read json, super table name input error\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, super table name input error\n");
goto PARSE_OVER;
}
@@ -4840,8 +4836,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("async", superAsyncMode->valuestring)) {
g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE;
} else {
- errorPrint("%s() LN%d, failed to read json, async mode input error\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, async mode input error\n");
goto PARSE_OVER;
}
} else {
@@ -4851,8 +4846,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval");
if (superInterval && superInterval->type == cJSON_Number) {
if (superInterval->valueint < 0) {
- errorPrint("%s() LN%d, failed to read json, interval input mistake\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, interval input mistake\n");
goto PARSE_OVER;
}
g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint;
@@ -4870,7 +4864,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("no", subrestart->valuestring)) {
g_queryInfo.superQueryInfo.subscribeRestart = false;
} else {
- printf("ERROR: failed to read json, subscribe restart error\n");
+ errorPrint("%s", "failed to read json, subscribe restart error\n");
goto PARSE_OVER;
}
} else {
@@ -4886,7 +4880,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
} else if (0 == strcmp("no", superkeepProgress->valuestring)) {
g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
} else {
- printf("ERROR: failed to read json, subscribe super table keepProgress error\n");
+ errorPrint("%s",
+ "failed to read json, subscribe super table keepProgress error\n");
goto PARSE_OVER;
}
} else {
@@ -4923,14 +4918,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (!superSqls) {
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (superSqls->type != cJSON_Array) {
- errorPrint("%s() LN%d: failed to read json, super sqls not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, super sqls not found\n");
goto PARSE_OVER;
} else {
int superSqlSize = cJSON_GetArraySize(superSqls);
if (superSqlSize > MAX_QUERY_SQL_COUNT) {
- errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n",
- __func__, __LINE__, MAX_QUERY_SQL_COUNT);
+ errorPrint("failed to read json, query sql size overflow, max is %d\n",
+ MAX_QUERY_SQL_COUNT);
goto PARSE_OVER;
}
@@ -4942,8 +4936,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
if (!sqlStr || sqlStr->type != cJSON_String
|| sqlStr->valuestring == NULL) {
- errorPrint("%s() LN%d, failed to read json, sql not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, sql not found\n");
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
@@ -4951,14 +4944,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON *result = cJSON_GetObjectItem(sql, "result");
if (result != NULL && result->type == cJSON_String
- && result->valuestring != NULL){
+ && result->valuestring != NULL) {
tstrncpy(g_queryInfo.superQueryInfo.result[j],
result->valuestring, MAX_FILE_NAME_LEN);
} else if (NULL == result) {
memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN);
} else {
- errorPrint("%s() LN%d, failed to read json, sub query result file not found\n",
- __func__, __LINE__);
+ errorPrint("%s", "failed to read json, sub query result file not found\n");
goto PARSE_OVER;
}
}
@@ -4976,7 +4968,7 @@ static bool getInfoFromJsonFile(char* file) {
FILE *fp = fopen(file, "r");
if (!fp) {
- printf("failed to read %s, reason:%s\n", file, strerror(errno));
+ errorPrint("failed to read %s, reason:%s\n", file, strerror(errno));
return false;
}
@@ -4987,14 +4979,14 @@ static bool getInfoFromJsonFile(char* file) {
if (len <= 0) {
free(content);
fclose(fp);
- printf("failed to read %s, content is null", file);
+ errorPrint("failed to read %s, content is null", file);
return false;
}
content[len] = 0;
cJSON* root = cJSON_Parse(content);
if (root == NULL) {
- printf("ERROR: failed to cjson parse %s, invalid json format\n", file);
+ errorPrint("failed to cjson parse %s, invalid json format\n", file);
goto PARSE_OVER;
}
@@ -5007,13 +4999,13 @@ static bool getInfoFromJsonFile(char* file) {
} else if (0 == strcasecmp("subscribe", filetype->valuestring)) {
g_args.test_mode = SUBSCRIBE_TEST;
} else {
- printf("ERROR: failed to read json, filetype not support\n");
+ errorPrint("%s", "failed to read json, filetype not support\n");
goto PARSE_OVER;
}
} else if (!filetype) {
g_args.test_mode = INSERT_TEST;
} else {
- printf("ERROR: failed to read json, filetype not found\n");
+ errorPrint("%s", "failed to read json, filetype not found\n");
goto PARSE_OVER;
}
@@ -5023,8 +5015,8 @@ static bool getInfoFromJsonFile(char* file) {
|| (SUBSCRIBE_TEST == g_args.test_mode)) {
ret = getMetaFromQueryJsonFile(root);
} else {
- errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n",
- __func__, __LINE__);
+ errorPrint("%s",
+ "input json file type error! please input correct file type: insert or query or subscribe\n");
goto PARSE_OVER;
}
@@ -5142,7 +5134,7 @@ static int64_t generateStbRowData(
|| (0 == strncasecmp(stbInfo->columns[i].dataType,
"NCHAR", 5))) {
if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "binary or nchar length overflow, max size:%u\n",
+ errorPrint2("binary or nchar length overflow, max size:%u\n",
(uint32_t)TSDB_MAX_BINARY_LEN);
return -1;
}
@@ -5154,7 +5146,7 @@ static int64_t generateStbRowData(
}
char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1);
if (NULL == buf) {
- errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen);
+ errorPrint2("calloc failed! size:%d\n", stbInfo->columns[i].dataLen);
return -1;
}
rand_string(buf, stbInfo->columns[i].dataLen);
@@ -5199,7 +5191,8 @@ static int64_t generateStbRowData(
"SMALLINT", 8)) {
tmp = rand_smallint_str();
tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN));
+ tstrncpy(pstr + dataLen, tmp,
+ min(tmpLen + 1, SMALLINT_BUFF_LEN));
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
"TINYINT", 7)) {
tmp = rand_tinyint_str();
@@ -5212,11 +5205,11 @@ static int64_t generateStbRowData(
tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN));
} else if (0 == strncasecmp(stbInfo->columns[i].dataType,
"TIMESTAMP", 9)) {
- tmp = rand_int_str();
+ tmp = rand_bigint_str();
tmpLen = strlen(tmp);
- tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, INT_BUFF_LEN));
+ tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BIGINT_BUFF_LEN));
} else {
- errorPrint( "Not support data type: %s\n",
+ errorPrint2("Not support data type: %s\n",
stbInfo->columns[i].dataType);
return -1;
}
@@ -5242,7 +5235,7 @@ static int64_t generateData(char *recBuf, char **data_type,
int64_t timestamp, int lenOfBinary) {
memset(recBuf, 0, MAX_DATA_SIZE);
char *pstr = recBuf;
- pstr += sprintf(pstr, "(%" PRId64, timestamp);
+ pstr += sprintf(pstr, "(%"PRId64"", timestamp);
int columnCount = g_args.num_of_CPR;
@@ -5254,9 +5247,9 @@ static int64_t generateData(char *recBuf, char **data_type,
} else if (strcasecmp(data_type[i % columnCount], "INT") == 0) {
pstr += sprintf(pstr, ",%d", rand_int());
} else if (strcasecmp(data_type[i % columnCount], "BIGINT") == 0) {
- pstr += sprintf(pstr, ",%" PRId64, rand_bigint());
+ pstr += sprintf(pstr, ",%"PRId64"", rand_bigint());
} else if (strcasecmp(data_type[i % columnCount], "TIMESTAMP") == 0) {
- pstr += sprintf(pstr, ",%" PRId64, rand_bigint());
+ pstr += sprintf(pstr, ",%"PRId64"", rand_bigint());
} else if (strcasecmp(data_type[i % columnCount], "FLOAT") == 0) {
pstr += sprintf(pstr, ",%10.4f", rand_float());
} else if (strcasecmp(data_type[i % columnCount], "DOUBLE") == 0) {
@@ -5268,7 +5261,7 @@ static int64_t generateData(char *recBuf, char **data_type,
} else if (strcasecmp(data_type[i % columnCount], "BINARY") == 0) {
char *s = malloc(lenOfBinary + 1);
if (s == NULL) {
- errorPrint("%s() LN%d, memory allocation %d bytes failed\n",
+ errorPrint2("%s() LN%d, memory allocation %d bytes failed\n",
__func__, __LINE__, lenOfBinary + 1);
exit(EXIT_FAILURE);
}
@@ -5278,7 +5271,7 @@ static int64_t generateData(char *recBuf, char **data_type,
} else if (strcasecmp(data_type[i % columnCount], "NCHAR") == 0) {
char *s = malloc(lenOfBinary + 1);
if (s == NULL) {
- errorPrint("%s() LN%d, memory allocation %d bytes failed\n",
+ errorPrint2("%s() LN%d, memory allocation %d bytes failed\n",
__func__, __LINE__, lenOfBinary + 1);
exit(EXIT_FAILURE);
}
@@ -5305,7 +5298,7 @@ static int prepareSampleDataForSTable(SSuperTable *stbInfo) {
sampleDataBuf = calloc(
stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1);
if (sampleDataBuf == NULL) {
- errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
+ errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__,
stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE,
strerror(errno));
@@ -5316,7 +5309,7 @@ static int prepareSampleDataForSTable(SSuperTable *stbInfo) {
int ret = readSampleFromCsvFileToMem(stbInfo);
if (0 != ret) {
- errorPrint("%s() LN%d, read sample from csv file failed.\n",
+ errorPrint2("%s() LN%d, read sample from csv file failed.\n",
__func__, __LINE__);
tmfree(sampleDataBuf);
stbInfo->sampleDataBuf = NULL;
@@ -5371,7 +5364,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
debugPrint("%s() LN%d, stmt=%p",
__func__, __LINE__, pThreadInfo->stmt);
if (0 != taos_stmt_execute(pThreadInfo->stmt)) {
- errorPrint("%s() LN%d, failied to execute insert statement. reason: %s\n",
+ errorPrint2("%s() LN%d, failied to execute insert statement. reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt));
fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n");
@@ -5381,7 +5374,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
break;
default:
- errorPrint("%s() LN%d: unknown insert mode: %d\n",
+ errorPrint2("%s() LN%d: unknown insert mode: %d\n",
__func__, __LINE__, stbInfo->iface);
affectedRows = 0;
}
@@ -5609,7 +5602,7 @@ static int generateStbSQLHead(
tableSeq % stbInfo->tagSampleCount);
}
if (NULL == tagsValBuf) {
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
__func__, __LINE__);
return -1;
}
@@ -5760,7 +5753,7 @@ static int32_t prepareStmtBindArrayByType(
if (0 == strncasecmp(dataType,
"BINARY", strlen("BINARY"))) {
if (dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "binary length overflow, max size:%u\n",
+ errorPrint2("binary length overflow, max size:%u\n",
(uint32_t)TSDB_MAX_BINARY_LEN);
return -1;
}
@@ -5783,7 +5776,7 @@ static int32_t prepareStmtBindArrayByType(
} else if (0 == strncasecmp(dataType,
"NCHAR", strlen("NCHAR"))) {
if (dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "nchar length overflow, max size:%u\n",
+ errorPrint2("nchar length overflow, max size:%u\n",
(uint32_t)TSDB_MAX_BINARY_LEN);
return -1;
}
@@ -5931,7 +5924,7 @@ static int32_t prepareStmtBindArrayByType(
value, &tmpEpoch, strlen(value),
timePrec, 0)) {
free(bind_ts2);
- errorPrint("Input %s, time format error!\n", value);
+ errorPrint2("Input %s, time format error!\n", value);
return -1;
}
*bind_ts2 = tmpEpoch;
@@ -5947,7 +5940,7 @@ static int32_t prepareStmtBindArrayByType(
bind->length = &bind->buffer_length;
bind->is_null = NULL;
} else {
- errorPrint( "No support data type: %s\n", dataType);
+ errorPrint2("Not support data type: %s\n", dataType);
return -1;
}
@@ -5964,7 +5957,7 @@ static int32_t prepareStmtBindArrayByTypeForRand(
if (0 == strncasecmp(dataType,
"BINARY", strlen("BINARY"))) {
if (dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "binary length overflow, max size:%u\n",
+ errorPrint2("binary length overflow, max size:%u\n",
(uint32_t)TSDB_MAX_BINARY_LEN);
return -1;
}
@@ -5987,7 +5980,7 @@ static int32_t prepareStmtBindArrayByTypeForRand(
} else if (0 == strncasecmp(dataType,
"NCHAR", strlen("NCHAR"))) {
if (dataLen > TSDB_MAX_BINARY_LEN) {
- errorPrint( "nchar length overflow, max size:%u\n",
+ errorPrint2("nchar length overflow, max size: %u\n",
(uint32_t)TSDB_MAX_BINARY_LEN);
return -1;
}
@@ -6139,7 +6132,7 @@ static int32_t prepareStmtBindArrayByTypeForRand(
if (TSDB_CODE_SUCCESS != taosParseTime(
value, &tmpEpoch, strlen(value),
timePrec, 0)) {
- errorPrint("Input %s, time format error!\n", value);
+ errorPrint2("Input %s, time format error!\n", value);
return -1;
}
*bind_ts2 = tmpEpoch;
@@ -6157,7 +6150,7 @@ static int32_t prepareStmtBindArrayByTypeForRand(
*ptr += bind->buffer_length;
} else {
- errorPrint( "No support data type: %s\n", dataType);
+ errorPrint2("No support data type: %s\n", dataType);
return -1;
}
@@ -6175,7 +6168,7 @@ static int32_t prepareStmtWithoutStb(
TAOS_STMT *stmt = pThreadInfo->stmt;
int ret = taos_stmt_set_tbname(stmt, tableName);
if (ret != 0) {
- errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n",
+ errorPrint2("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n",
tableName, ret, taos_stmt_errstr(stmt));
return ret;
}
@@ -6184,7 +6177,7 @@ static int32_t prepareStmtWithoutStb(
char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.num_of_CPR + 1));
if (bindArray == NULL) {
- errorPrint("Failed to allocate %d bind params\n",
+ errorPrint2("Failed to allocate %d bind params\n",
(g_args.num_of_CPR + 1));
return -1;
}
@@ -6225,13 +6218,13 @@ static int32_t prepareStmtWithoutStb(
}
}
if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) {
- errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
break;
}
// if msg > 3MB, break
if (0 != taos_stmt_add_batch(stmt)) {
- errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
break;
}
@@ -6254,7 +6247,7 @@ static int32_t prepareStbStmtBindTag(
{
char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary);
if (bindBuffer == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n",
+ errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n",
__func__, __LINE__, DOUBLE_BUFF_LEN);
return -1;
}
@@ -6286,7 +6279,7 @@ static int32_t prepareStbStmtBindRand(
{
char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary);
if (bindBuffer == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n",
+ errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n",
__func__, __LINE__, DOUBLE_BUFF_LEN);
return -1;
}
@@ -6389,7 +6382,7 @@ static int32_t prepareStbStmtRand(
}
if (NULL == tagsValBuf) {
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
__func__, __LINE__);
return -1;
}
@@ -6397,7 +6390,7 @@ static int32_t prepareStbStmtRand(
char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount);
if (NULL == tagsArray) {
tmfree(tagsValBuf);
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
__func__, __LINE__);
return -1;
}
@@ -6416,14 +6409,14 @@ static int32_t prepareStbStmtRand(
tmfree(tagsArray);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
} else {
ret = taos_stmt_set_tbname(stmt, tableName);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
@@ -6431,7 +6424,7 @@ static int32_t prepareStbStmtRand(
char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1));
if (bindArray == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %d bind params\n",
+ errorPrint2("%s() LN%d, Failed to allocate %d bind params\n",
__func__, __LINE__, (stbInfo->columnCount + 1));
return -1;
}
@@ -6450,7 +6443,7 @@ static int32_t prepareStbStmtRand(
}
ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
free(bindArray);
return -1;
@@ -6458,7 +6451,7 @@ static int32_t prepareStbStmtRand(
// if msg > 3MB, break
ret = taos_stmt_add_batch(stmt);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
free(bindArray);
return -1;
@@ -6502,7 +6495,7 @@ static int32_t prepareStbStmtWithSample(
}
if (NULL == tagsValBuf) {
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
__func__, __LINE__);
return -1;
}
@@ -6510,7 +6503,7 @@ static int32_t prepareStbStmtWithSample(
char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount);
if (NULL == tagsArray) {
tmfree(tagsValBuf);
- errorPrint("%s() LN%d, tag buf failed to allocate memory\n",
+ errorPrint2("%s() LN%d, tag buf failed to allocate memory\n",
__func__, __LINE__);
return -1;
}
@@ -6529,14 +6522,14 @@ static int32_t prepareStbStmtWithSample(
tmfree(tagsArray);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
} else {
ret = taos_stmt_set_tbname(stmt, tableName);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
@@ -6558,14 +6551,14 @@ static int32_t prepareStbStmtWithSample(
}
ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
// if msg > 3MB, break
ret = taos_stmt_add_batch(stmt);
if (0 != ret) {
- errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
+ errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n",
__func__, __LINE__, taos_stmt_errstr(stmt));
return -1;
}
@@ -6726,7 +6719,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->buffer = calloc(maxSqlLen, 1);
if (NULL == pThreadInfo->buffer) {
- errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n",
+ errorPrint2( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__, maxSqlLen, strerror(errno));
return NULL;
}
@@ -6774,7 +6767,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
getTableName(tableName, pThreadInfo, tableSeq);
if (0 == strlen(tableName)) {
- errorPrint("[%d] %s() LN%d, getTableName return null\n",
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
pThreadInfo->threadID, __func__, __LINE__);
free(pThreadInfo->buffer);
return NULL;
@@ -6841,7 +6834,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
debugPrint("[%d] %s() LN%d, generated records is %d\n",
pThreadInfo->threadID, __func__, __LINE__, generated);
if (generated < 0) {
- errorPrint("[%d] %s() LN%d, generated records is %d\n",
+ errorPrint2("[%d] %s() LN%d, generated records is %d\n",
pThreadInfo->threadID, __func__, __LINE__, generated);
goto free_of_interlace;
} else if (generated == 0) {
@@ -6895,7 +6888,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
startTs = taosGetTimestampUs();
if (recOfBatch == 0) {
- errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n",
+ errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n",
pThreadInfo->threadID, __func__, __LINE__,
batchPerTbl);
if (batchPerTbl > 0) {
@@ -6922,7 +6915,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->totalDelay += delay;
if (recOfBatch != affectedRows) {
- errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n",
+ errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n",
pThreadInfo->threadID, __func__, __LINE__,
recOfBatch, affectedRows, pThreadInfo->buffer);
goto free_of_interlace;
@@ -6980,7 +6973,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->buffer = calloc(maxSqlLen, 1);
if (NULL == pThreadInfo->buffer) {
- errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n",
+ errorPrint2("Failed to alloc %"PRIu64" bytes, reason:%s\n",
maxSqlLen,
strerror(errno));
return NULL;
@@ -7021,7 +7014,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
__func__, __LINE__,
pThreadInfo->threadID, tableSeq, tableName);
if (0 == strlen(tableName)) {
- errorPrint("[%d] %s() LN%d, getTableName return null\n",
+ errorPrint2("[%d] %s() LN%d, getTableName return null\n",
pThreadInfo->threadID, __func__, __LINE__);
free(pThreadInfo->buffer);
return NULL;
@@ -7110,7 +7103,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->totalDelay += delay;
if (affectedRows < 0) {
- errorPrint("%s() LN%d, affected rows: %d\n",
+ errorPrint2("%s() LN%d, affected rows: %d\n",
__func__, __LINE__, affectedRows);
goto free_of_progressive;
}
@@ -7272,7 +7265,7 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *
uint16_t rest_port = port + TSDB_PORT_HTTP;
struct hostent *server = gethostbyname(host);
if ((server == NULL) || (server->h_addr == NULL)) {
- errorPrint("%s", "ERROR, no such host");
+ errorPrint2("%s", "no such host");
return -1;
}
@@ -7297,7 +7290,7 @@ static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec)
{
stbInfo->sampleBindArray = calloc(1, sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE);
if (stbInfo->sampleBindArray == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n",
+ errorPrint2("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n",
__func__, __LINE__, (uint64_t)sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE);
return -1;
}
@@ -7306,7 +7299,7 @@ static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec)
for (int i=0; i < MAX_SAMPLES_ONCE_FROM_FILE; i++) {
char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1));
if (bindArray == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %d bind params\n",
+ errorPrint2("%s() LN%d, Failed to allocate %d bind params\n",
__func__, __LINE__, (stbInfo->columnCount + 1));
return -1;
}
@@ -7338,7 +7331,7 @@ static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec)
char *bindBuffer = calloc(1, index + 1);
if (bindBuffer == NULL) {
- errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n",
+ errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n",
__func__, __LINE__, DOUBLE_BUFF_LEN);
return -1;
}
@@ -7376,7 +7369,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
} else if (0 == strncasecmp(precision, "ns", 2)) {
timePrec = TSDB_TIME_PRECISION_NANO;
} else {
- errorPrint("Not support precision: %s\n", precision);
+ errorPrint2("Not support precision: %s\n", precision);
exit(EXIT_FAILURE);
}
}
@@ -7406,7 +7399,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
if ((stbInfo) && (0 == strncasecmp(stbInfo->dataSource,
"sample", strlen("sample")))) {
if (0 != prepareSampleDataForSTable(stbInfo)) {
- errorPrint("%s() LN%d, prepare sample data for stable failed!\n",
+ errorPrint2("%s() LN%d, prepare sample data for stable failed!\n",
__func__, __LINE__);
exit(EXIT_FAILURE);
}
@@ -7416,7 +7409,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
g_Dbs.host, g_Dbs.user,
g_Dbs.password, db_name, g_Dbs.port);
if (NULL == taos0) {
- errorPrint("%s() LN%d, connect to server fail , reason: %s\n",
+ errorPrint2("%s() LN%d, connect to server fail , reason: %s\n",
__func__, __LINE__, taos_errstr(NULL));
exit(EXIT_FAILURE);
}
@@ -7471,7 +7464,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
limit * TSDB_TABLE_NAME_LEN);
if (stbInfo->childTblName == NULL) {
taos_close(taos0);
- errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
+ errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__);
exit(EXIT_FAILURE);
}
@@ -7577,7 +7570,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
g_Dbs.password, db_name, g_Dbs.port);
if (NULL == pThreadInfo->taos) {
free(infos);
- errorPrint(
+ errorPrint2(
"%s() LN%d, connect to server fail from insert sub thread, reason: %s\n",
__func__, __LINE__,
taos_errstr(NULL));
@@ -7593,7 +7586,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
if (NULL == pThreadInfo->stmt) {
free(pids);
free(infos);
- errorPrint(
+ errorPrint2(
"%s() LN%d, failed init stmt, reason: %s\n",
__func__, __LINE__,
taos_errstr(NULL));
@@ -7601,11 +7594,11 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
int ret = taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0);
- if (ret != 0){
+ if (ret != 0) {
free(pids);
free(infos);
free(stmtBuffer);
- errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n",
+ errorPrint2("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n",
ret, taos_stmt_errstr(pThreadInfo->stmt));
exit(EXIT_FAILURE);
}
@@ -7749,7 +7742,7 @@ static void *readTable(void *sarg) {
char *tb_prefix = pThreadInfo->tb_prefix;
FILE *fp = fopen(pThreadInfo->filePath, "a");
if (NULL == fp) {
- errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
+ errorPrint2("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
free(command);
return NULL;
}
@@ -7785,7 +7778,7 @@ static void *readTable(void *sarg) {
int32_t code = taos_errno(pSql);
if (code != 0) {
- errorPrint( "Failed to query:%s\n", taos_errstr(pSql));
+ errorPrint2("Failed to query:%s\n", taos_errstr(pSql));
taos_free_result(pSql);
taos_close(taos);
fclose(fp);
@@ -7867,7 +7860,7 @@ static void *readMetric(void *sarg) {
int32_t code = taos_errno(pSql);
if (code != 0) {
- errorPrint( "Failed to query:%s\n", taos_errstr(pSql));
+ errorPrint2("Failed to query:%s\n", taos_errstr(pSql));
taos_free_result(pSql);
taos_close(taos);
fclose(fp);
@@ -7914,7 +7907,7 @@ static int insertTestProcess() {
debugPrint("%d result file: %s\n", __LINE__, g_Dbs.resultFile);
g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a");
if (NULL == g_fpOfInsertResult) {
- errorPrint( "Failed to open %s for save result\n", g_Dbs.resultFile);
+ errorPrint("Failed to open %s for save result\n", g_Dbs.resultFile);
return -1;
}
@@ -7947,18 +7940,30 @@ static int insertTestProcess() {
double start;
double end;
- // create child tables
- start = taosGetTimestampMs();
- createChildTables();
- end = taosGetTimestampMs();
-
if (g_totalChildTables > 0) {
- fprintf(stderr, "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n",
- (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
+ fprintf(stderr,
+ "creating %"PRId64" table(s) with %d thread(s)\n\n",
+ g_totalChildTables, g_Dbs.threadCountByCreateTbl);
if (g_fpOfInsertResult) {
fprintf(g_fpOfInsertResult,
- "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n",
- (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
+ "creating %"PRId64" table(s) with %d thread(s)\n\n",
+ g_totalChildTables, g_Dbs.threadCountByCreateTbl);
+ }
+
+ // create child tables
+ start = taosGetTimestampMs();
+ createChildTables();
+ end = taosGetTimestampMs();
+
+ fprintf(stderr,
+ "Spent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n",
+ (end - start)/1000.0, g_totalChildTables,
+ g_Dbs.threadCountByCreateTbl, g_actualChildTables);
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "Spent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n",
+ (end - start)/1000.0, g_totalChildTables,
+ g_Dbs.threadCountByCreateTbl, g_actualChildTables);
}
}
@@ -8016,7 +8021,7 @@ static void *specifiedTableQuery(void *sarg) {
NULL,
g_queryInfo.port);
if (taos == NULL) {
- errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
} else {
@@ -8028,7 +8033,7 @@ static void *specifiedTableQuery(void *sarg) {
sprintf(sqlStr, "use %s", g_queryInfo.dbName);
if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
taos_close(pThreadInfo->taos);
- errorPrint( "use database %s failed!\n\n",
+ errorPrint("use database %s failed!\n\n",
g_queryInfo.dbName);
return NULL;
}
@@ -8069,7 +8074,7 @@ static void *specifiedTableQuery(void *sarg) {
uint64_t currentPrintTime = taosGetTimestampMs();
uint64_t endTs = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n",
+ debugPrint("%s() LN%d, endTs=%"PRIu64" ms, startTs=%"PRIu64" ms\n",
__func__, __LINE__, endTs, startTs);
printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n",
pThreadInfo->threadID,
@@ -8194,7 +8199,7 @@ static int queryTestProcess() {
NULL,
g_queryInfo.port);
if (taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n",
+ errorPrint("Failed to connect to TDengine, reason:%s\n",
taos_errstr(NULL));
exit(EXIT_FAILURE);
}
@@ -8252,7 +8257,7 @@ static int queryTestProcess() {
taos_close(taos);
free(infos);
free(pids);
- errorPrint( "use database %s failed!\n\n",
+ errorPrint2("use database %s failed!\n\n",
g_queryInfo.dbName);
return -1;
}
@@ -8350,7 +8355,7 @@ static int queryTestProcess() {
static void stable_sub_callback(
TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
+ errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
__func__, __LINE__, code, taos_errstr(res));
return;
}
@@ -8363,7 +8368,7 @@ static void stable_sub_callback(
static void specified_sub_callback(
TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
if (res == NULL || taos_errno(res) != 0) {
- errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
+ errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n",
__func__, __LINE__, code, taos_errstr(res));
return;
}
@@ -8402,7 +8407,7 @@ static TAOS_SUB* subscribeImpl(
}
if (tsub == NULL) {
- errorPrint("failed to create subscription. topic:%s, sql:%s\n", topic, sql);
+ errorPrint2("failed to create subscription. topic:%s, sql:%s\n", topic, sql);
return NULL;
}
@@ -8433,7 +8438,7 @@ static void *superSubscribe(void *sarg) {
g_queryInfo.dbName,
g_queryInfo.port);
if (pThreadInfo->taos == NULL) {
- errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
free(subSqlStr);
return NULL;
@@ -8444,7 +8449,7 @@ static void *superSubscribe(void *sarg) {
sprintf(sqlStr, "USE %s", g_queryInfo.dbName);
if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) {
taos_close(pThreadInfo->taos);
- errorPrint( "use database %s failed!\n\n",
+ errorPrint2("use database %s failed!\n\n",
g_queryInfo.dbName);
free(subSqlStr);
return NULL;
@@ -8580,7 +8585,7 @@ static void *specifiedSubscribe(void *sarg) {
g_queryInfo.dbName,
g_queryInfo.port);
if (pThreadInfo->taos == NULL) {
- errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
}
@@ -8687,7 +8692,7 @@ static int subscribeTestProcess() {
g_queryInfo.dbName,
g_queryInfo.port);
if (taos == NULL) {
- errorPrint( "Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("Failed to connect to TDengine, reason:%s\n",
taos_errstr(NULL));
exit(EXIT_FAILURE);
}
@@ -8715,7 +8720,7 @@ static int subscribeTestProcess() {
g_queryInfo.specifiedQueryInfo.sqlCount);
} else {
if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
- errorPrint("%s() LN%d, sepcified query sqlCount %d.\n",
+ errorPrint2("%s() LN%d, sepcified query sqlCount %d.\n",
__func__, __LINE__,
g_queryInfo.specifiedQueryInfo.sqlCount);
exit(EXIT_FAILURE);
@@ -8732,7 +8737,7 @@ static int subscribeTestProcess() {
g_queryInfo.specifiedQueryInfo.concurrent *
sizeof(threadInfo));
if ((NULL == pids) || (NULL == infos)) {
- errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
+ errorPrint2("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__);
exit(EXIT_FAILURE);
}
@@ -8767,7 +8772,7 @@ static int subscribeTestProcess() {
g_queryInfo.superQueryInfo.threadCnt *
sizeof(threadInfo));
if ((NULL == pidsOfStable) || (NULL == infosOfStable)) {
- errorPrint("%s() LN%d, malloc failed for create threads\n",
+ errorPrint2("%s() LN%d, malloc failed for create threads\n",
__func__, __LINE__);
// taos_close(taos);
exit(EXIT_FAILURE);
@@ -9033,7 +9038,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
memcpy(cmd + cmd_len, line, read_len);
if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) {
- errorPrint("%s() LN%d, queryDbExec %s failed!\n",
+ errorPrint2("%s() LN%d, queryDbExec %s failed!\n",
__func__, __LINE__, cmd);
tmfree(cmd);
tmfree(line);
@@ -9107,7 +9112,7 @@ static void queryResult() {
g_Dbs.port);
if (pThreadInfo->taos == NULL) {
free(pThreadInfo);
- errorPrint( "Failed to connect to TDengine, reason:%s\n",
+ errorPrint2("Failed to connect to TDengine, reason:%s\n",
taos_errstr(NULL));
exit(EXIT_FAILURE);
}
@@ -9129,7 +9134,7 @@ static void testCmdLine() {
if (strlen(configDir)) {
wordexp_t full_path;
if (wordexp(configDir, &full_path, 0) != 0) {
- errorPrint( "Invalid path %s\n", configDir);
+ errorPrint("Invalid path %s\n", configDir);
return;
}
taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]);
diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c
index 33d779dfcf..ddb9e660af 100644
--- a/src/kit/taospack/taospack.c
+++ b/src/kit/taospack/taospack.c
@@ -18,6 +18,7 @@
#include
#include
+
#if defined(WINDOWS)
int main(int argc, char *argv[]) {
printf("welcome to use taospack tools v1.3 for windows.\n");
@@ -148,7 +149,10 @@ float* read_float(const char* inFile, int* pcount){
//printf(" buff=%s float=%.50f \n ", buf, floats[fi]);
if ( ++fi == malloc_cnt ) {
malloc_cnt += 100000;
- floats = realloc(floats, malloc_cnt*sizeof(float));
+ float* floats1 = realloc(floats, malloc_cnt*sizeof(float));
+ if(floats1 == NULL)
+ break;
+ floats = floats1;
}
memset(buf, 0, sizeof(buf));
}
@@ -601,7 +605,6 @@ void test_threadsafe_double(int thread_count){
}
-
void unitTestFloat() {
float ft1 [] = {1.11, 2.22, 3.333};
@@ -662,7 +665,50 @@ void unitTestFloat() {
free(ft2);
free(buff);
free(output);
-
+}
+
+void leakFloat() {
+
+ int cnt = sizeof(g_ft1)/sizeof(float);
+ float* floats = g_ft1;
+ int algorithm = 2;
+
+ // compress
+ const char* input = (const char*)floats;
+ int input_len = cnt * sizeof(float);
+ int output_len = input_len + 1024;
+ char* output = (char*) malloc(output_len);
+ char* buff = (char*) malloc(input_len);
+ int buff_len = input_len;
+
+ int ret_len = 0;
+ ret_len = tsCompressFloatLossy(input, input_len, cnt, output, output_len, algorithm, buff, buff_len);
+
+ if(ret_len == 0) {
+ printf(" compress float error.\n");
+ free(buff);
+ free(output);
+ return ;
+ }
+
+ float* ft2 = (float*)malloc(input_len);
+ ret_len = tsDecompressFloatLossy(output, ret_len, cnt, (char*)ft2, input_len, algorithm, buff, buff_len);
+ if(ret_len == 0) {
+ printf(" decompress float error.\n");
+ }
+
+ free(ft2);
+ free(buff);
+ free(output);
+}
+
+
+void leakTest(){
+ for(int i=0; i< 90000000000000; i++){
+ if(i%10000==0)
+ printf(" ---------- %d ---------------- \n", i);
+ leakFloat();
+ }
}
#define DB_CNT 500
@@ -689,7 +735,7 @@ extern char Compressor [];
// ----------------- main ----------------------
//
int main(int argc, char *argv[]) {
- printf("welcome to use taospack tools v1.3\n");
+ printf("welcome to use taospack tools v1.6\n");
//printf(" sizeof(int)=%d\n", (int)sizeof(int));
//printf(" sizeof(long)=%d\n", (int)sizeof(long));
@@ -753,6 +799,9 @@ int main(int argc, char *argv[]) {
if(strcmp(argv[1], "-mem") == 0) {
memTest();
}
+ else if(strcmp(argv[1], "-leak") == 0) {
+ leakTest();
+ }
}
else{
unitTestFloat();
diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c
index 7a0108026e..a8a2615672 100644
--- a/src/mnode/src/mnodeTable.c
+++ b/src/mnode/src/mnodeTable.c
@@ -2934,10 +2934,11 @@ static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray
(*totalMallocLen) *= 2;
}
- pMultiMeta = realloc(pMultiMeta, *totalMallocLen);
- if (pMultiMeta == NULL) {
+ SMultiTableMeta* pMultiMeta1 = realloc(pMultiMeta, *totalMallocLen);
+ if (pMultiMeta1 == NULL) {
return NULL;
}
+ pMultiMeta = pMultiMeta1;
}
return pMultiMeta;
diff --git a/src/os/src/detail/osMemory.c b/src/os/src/detail/osMemory.c
index d8194feab4..22954f1523 100644
--- a/src/os/src/detail/osMemory.c
+++ b/src/os/src/detail/osMemory.c
@@ -504,8 +504,9 @@ void * taosTRealloc(void *ptr, size_t size) {
void * tptr = (void *)((char *)ptr - sizeof(size_t));
size_t tsize = size + sizeof(size_t);
- tptr = realloc(tptr, tsize);
- if (tptr == NULL) return NULL;
+ void* tptr1 = realloc(tptr, tsize);
+ if (tptr1 == NULL) return NULL;
+ tptr = tptr1;
*(size_t *)tptr = size;
diff --git a/src/os/src/windows/wGetline.c b/src/os/src/windows/wGetline.c
index 553aecaf0a..aa45854884 100644
--- a/src/os/src/windows/wGetline.c
+++ b/src/os/src/windows/wGetline.c
@@ -81,11 +81,13 @@ int32_t getstr(char **lineptr, size_t *n, FILE *stream, char terminator, int32_t
*n += MIN_CHUNK;
nchars_avail = (int32_t)(*n + *lineptr - read_pos);
- *lineptr = realloc(*lineptr, *n);
- if (!*lineptr) {
+ char* lineptr1 = realloc(*lineptr, *n);
+ if (!lineptr1) {
errno = ENOMEM;
return -1;
}
+ *lineptr = lineptr1;
+
read_pos = *n - nchars_avail + *lineptr;
assert((*lineptr + *n) == (read_pos + nchars_avail));
}
diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h
index 0a5822b908..99a5b770aa 100644
--- a/src/plugins/http/inc/httpInt.h
+++ b/src/plugins/http/inc/httpInt.h
@@ -150,6 +150,7 @@ typedef struct HttpContext {
char ipstr[22];
char user[TSDB_USER_LEN]; // parsed from auth token or login message
char pass[HTTP_PASSWORD_LEN];
+ char db[/*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN];
TAOS * taos;
void * ppContext;
HttpSession *session;
diff --git a/src/plugins/http/inc/httpRestHandle.h b/src/plugins/http/inc/httpRestHandle.h
index 632a1dc647..df405685e9 100644
--- a/src/plugins/http/inc/httpRestHandle.h
+++ b/src/plugins/http/inc/httpRestHandle.h
@@ -22,12 +22,12 @@
#include "httpResp.h"
#include "httpSql.h"
-#define REST_ROOT_URL_POS 0
-#define REST_ACTION_URL_POS 1
-#define REST_USER_URL_POS 2
-#define REST_PASS_URL_POS 3
+#define REST_ROOT_URL_POS 0
+#define REST_ACTION_URL_POS 1
+#define REST_USER_USEDB_URL_POS 2
+#define REST_PASS_URL_POS 3
void restInitHandle(HttpServer* pServer);
bool restProcessRequest(struct HttpContext* pContext);
-#endif
\ No newline at end of file
+#endif
diff --git a/src/plugins/http/src/httpRestHandle.c b/src/plugins/http/src/httpRestHandle.c
index a285670d20..a029adec0c 100644
--- a/src/plugins/http/src/httpRestHandle.c
+++ b/src/plugins/http/src/httpRestHandle.c
@@ -62,11 +62,11 @@ void restInitHandle(HttpServer* pServer) {
bool restGetUserFromUrl(HttpContext* pContext) {
HttpParser* pParser = pContext->parser;
- if (pParser->path[REST_USER_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_URL_POS].pos <= 0) {
+ if (pParser->path[REST_USER_USEDB_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_USEDB_URL_POS].pos <= 0) {
return false;
}
- tstrncpy(pContext->user, pParser->path[REST_USER_URL_POS].str, TSDB_USER_LEN);
+ tstrncpy(pContext->user, pParser->path[REST_USER_USEDB_URL_POS].str, TSDB_USER_LEN);
return true;
}
@@ -107,6 +107,16 @@ bool restProcessSqlRequest(HttpContext* pContext, int32_t timestampFmt) {
HttpSqlCmd* cmd = &(pContext->singleCmd);
cmd->nativSql = sql;
+ /* find if there is db_name in url */
+ pContext->db[0] = '\0';
+
+ HttpString *path = &pContext->parser->path[REST_USER_USEDB_URL_POS];
+ if (path->pos > 0 && !(strlen(sql) > 4 && (sql[0] == 'u' || sql[0] == 'U') &&
+ (sql[1] == 's' || sql[1] == 'S') && (sql[2] == 'e' || sql[2] == 'E') && sql[3] == ' '))
+ {
+ snprintf(pContext->db, /*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN, "%s", path->str);
+ }
+
pContext->reqType = HTTP_REQTYPE_SINGLE_SQL;
if (timestampFmt == REST_TIMESTAMP_FMT_LOCAL_STRING) {
pContext->encodeMethod = &restEncodeSqlLocalTimeStringMethod;
diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c
index c2e723732a..0dd451f72d 100644
--- a/src/plugins/http/src/httpSql.c
+++ b/src/plugins/http/src/httpSql.c
@@ -419,6 +419,11 @@ void httpProcessRequest(HttpContext *pContext) {
&(pContext->taos));
httpDebug("context:%p, fd:%d, user:%s, try connect tdengine, taos:%p", pContext, pContext->fd, pContext->user,
pContext->taos);
+
+ if (pContext->taos != NULL) {
+ STscObj *pObj = pContext->taos;
+ pObj->from = TAOS_REQ_FROM_HTTP;
+ }
} else {
httpExecCmd(pContext);
}
diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h
index b5ea9932b9..d4a9ed0cbc 100644
--- a/src/query/inc/qExtbuffer.h
+++ b/src/query/inc/qExtbuffer.h
@@ -220,6 +220,8 @@ tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrder
void tOrderDescDestroy(tOrderDescriptor *pDesc);
+void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn);
+
void tColModelAppend(SColumnModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows,
int32_t numOfRowsToWrite, int32_t srcCapacity);
diff --git a/src/query/inc/queryLog.h b/src/query/inc/queryLog.h
index 5c48c43c45..87a221943a 100644
--- a/src/query/inc/queryLog.h
+++ b/src/query/inc/queryLog.h
@@ -24,10 +24,10 @@ extern "C" {
extern uint32_t qDebugFlag;
-#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", 255, __VA_ARGS__); }} while(0)
-#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", 255, __VA_ARGS__); }} while(0)
-#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", 255, __VA_ARGS__); }} while(0)
-#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", 255, __VA_ARGS__); }} while(0)
+#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", qDebugFlag, __VA_ARGS__); }} while(0)
+#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", qDebugFlag, __VA_ARGS__); }} while(0)
+#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", qDebugFlag, __VA_ARGS__); }} while(0)
+#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qDebug(...) do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qTrace(...) do { if (qDebugFlag & DEBUG_TRACE) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qDump(a, l) do { if (qDebugFlag & DEBUG_DUMP) { taosDumpData((unsigned char *)a, l); }} while(0)
diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c
index c19628eb37..4078ea74ea 100644
--- a/src/query/src/qAggMain.c
+++ b/src/query/src/qAggMain.c
@@ -3670,6 +3670,8 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
return;
}
+ bool ascQuery = (pCtx->order == TSDB_ORDER_ASC);
+
if (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) {
*(TSKEY *)pCtx->pOutput = pCtx->startTs;
} else if (type == TSDB_FILL_NULL) {
@@ -3677,7 +3679,7 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
} else if (type == TSDB_FILL_SET_VALUE) {
tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true);
} else {
- if (pCtx->start.key != INT64_MIN && pCtx->start.key < pCtx->startTs && pCtx->end.key > pCtx->startTs) {
+ if (pCtx->start.key != INT64_MIN && ((ascQuery && pCtx->start.key <= pCtx->startTs && pCtx->end.key >= pCtx->startTs) || ((!ascQuery) && pCtx->start.key >= pCtx->startTs && pCtx->end.key <= pCtx->startTs))) {
if (type == TSDB_FILL_PREV) {
if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) {
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val);
@@ -3716,13 +3718,14 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY skey = GET_TS_DATA(pCtx, 0);
if (type == TSDB_FILL_PREV) {
- if (skey > pCtx->startTs) {
+ if ((ascQuery && skey > pCtx->startTs) || ((!ascQuery) && skey < pCtx->startTs)) {
return;
}
if (pCtx->size > 1) {
TSKEY ekey = GET_TS_DATA(pCtx, 1);
- if (ekey > skey && ekey <= pCtx->startTs) {
+ if ((ascQuery && ekey > skey && ekey <= pCtx->startTs) ||
+ ((!ascQuery) && ekey < skey && ekey >= pCtx->startTs)){
skey = ekey;
}
}
@@ -3731,10 +3734,10 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY ekey = skey;
char* val = NULL;
- if (ekey < pCtx->startTs) {
+ if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) {
if (pCtx->size > 1) {
ekey = GET_TS_DATA(pCtx, 1);
- if (ekey < pCtx->startTs) {
+ if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) {
return;
}
@@ -3755,12 +3758,11 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY ekey = GET_TS_DATA(pCtx, 1);
// no data generated yet
- if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) {
+ if ((ascQuery && !(skey <= pCtx->startTs && ekey >= pCtx->startTs))
+ || ((!ascQuery) && !(skey >= pCtx->startTs && ekey <= pCtx->startTs))) {
return;
}
- assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs);
-
char *start = GET_INPUT_DATA(pCtx, 0);
char *end = GET_INPUT_DATA(pCtx, 1);
@@ -3788,11 +3790,37 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
static void interp_function(SQLFunctionCtx *pCtx) {
// at this point, the value is existed, return directly
if (pCtx->size > 0) {
- // impose the timestamp check
- TSKEY key = GET_TS_DATA(pCtx, 0);
+ bool ascQuery = (pCtx->order == TSDB_ORDER_ASC);
+ TSKEY key;
+ char *pData;
+ int32_t typedData = 0;
+
+ if (ascQuery) {
+ key = GET_TS_DATA(pCtx, 0);
+ pData = GET_INPUT_DATA(pCtx, 0);
+ } else {
+ key = pCtx->start.key;
+ if (key == INT64_MIN) {
+ key = GET_TS_DATA(pCtx, 0);
+ pData = GET_INPUT_DATA(pCtx, 0);
+ } else {
+ if (!(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL)) {
+ pData = pCtx->start.ptr;
+ } else {
+ typedData = 1;
+ pData = (char *)&pCtx->start.val;
+ }
+ }
+ }
+
+ //if (key == pCtx->startTs && (ascQuery || !(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL))) {
if (key == pCtx->startTs) {
- char *pData = GET_INPUT_DATA(pCtx, 0);
- assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType);
+ if (typedData) {
+ SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, *(double *)pData);
+ } else {
+ assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType);
+ }
+
SET_VAL(pCtx, 1, 1);
} else {
interp_function_impl(pCtx);
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index e745ef74f5..dad0e544f1 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -45,7 +45,7 @@
#define MULTI_KEY_DELIM "-"
-#define HASH_CAPACITY_LIMIT 10000000
+#define HASH_CAPACITY_LIMIT 10000000
#define TIME_WINDOW_COPY(_dst, _src) do {\
(_dst).skey = (_src).skey;\
@@ -1327,6 +1327,16 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
pCtx[k].end.key = curTs;
pCtx[k].end.val = v2;
+
+ if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
+ if (prevRowIndex == -1) {
+ pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[index];
+ } else {
+ pCtx[k].start.ptr = (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes;
+ }
+
+ pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes;
+ }
}
} else if (functionId == TSDB_FUNC_TWA) {
SPoint point1 = (SPoint){.key = prevTs, .val = &v1};
@@ -1596,6 +1606,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
SResultRow* pResult = NULL;
int32_t forwardStep = 0;
int32_t ret = 0;
+ STimeWindow preWin = win;
while (1) {
// null data, failed to allocate more memory buffer
@@ -1610,12 +1621,13 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
// window start(end) key interpolation
doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
- doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
-
+ doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
+ preWin = win;
+
int32_t prevEndPos = (forwardStep - 1) * step + startPos;
startPos = getNextQualifiedWindow(pQueryAttr, &win, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos);
if (startPos < 0) {
- if (win.skey <= pQueryAttr->window.ekey) {
+ if ((ascQuery && win.skey <= pQueryAttr->window.ekey) || ((!ascQuery) && win.ekey >= pQueryAttr->window.ekey)) {
int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId,
pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
@@ -1626,7 +1638,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
// window start(end) key interpolation
doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
- doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
+ doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
}
break;
@@ -3570,7 +3582,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i
SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo;
int64_t tid = 0;
- pRuntimeEnv->keyBuf = realloc(pRuntimeEnv->keyBuf, sizeof(tid) + sizeof(int64_t) + POINTER_BYTES);
+ pRuntimeEnv->keyBuf = realloc(pRuntimeEnv->keyBuf, sizeof(tid) + sizeof(int64_t) + POINTER_BYTES);
SResultRow* pRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&tid, sizeof(tid), true, uid);
for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
@@ -7156,14 +7168,14 @@ static bool initMultiDistinctInfo(SDistinctOperatorInfo *pInfo, SOperatorInfo* p
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, j);
if (pColDataInfo->info.colId == pOperator->pExpr[i].base.resColId) {
SDistinctDataInfo item = {.index = j, .type = pColDataInfo->info.type, .bytes = pColDataInfo->info.bytes};
- taosArrayInsert(pInfo->pDistinctDataInfo, i, &item);
+ taosArrayInsert(pInfo->pDistinctDataInfo, i, &item);
}
}
}
pInfo->totalBytes += (int32_t)strlen(MULTI_KEY_DELIM) * (pOperator->numOfOutput);
pInfo->buf = calloc(1, pInfo->totalBytes);
return taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput ? true : false;
-}
+}
static void buildMultiDistinctKey(SDistinctOperatorInfo *pInfo, SSDataBlock *pBlock, int32_t rowId) {
char *p = pInfo->buf;
@@ -7188,11 +7200,13 @@ static void buildMultiDistinctKey(SDistinctOperatorInfo *pInfo, SSDataBlock *pBl
p += strlen(MULTI_KEY_DELIM);
}
}
+
static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
+
SDistinctOperatorInfo* pInfo = pOperator->info;
SSDataBlock* pRes = pInfo->pRes;
@@ -7247,11 +7261,11 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
pRes->info.rows += 1;
}
}
+
if (pRes->info.rows >= pInfo->threshold) {
break;
}
}
-
return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
}
diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c
index 9f9347b327..5994099a0d 100644
--- a/src/query/src/qExtbuffer.c
+++ b/src/query/src/qExtbuffer.c
@@ -768,60 +768,6 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
free(buf);
}
-void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) {
- assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols);
-
- int32_t bytes = pSchema[index].bytes;
- int32_t size = bytes + sizeof(int32_t);
-
- char* buf = calloc(1, size * numOfRows);
-
- for(int32_t i = 0; i < numOfRows; ++i) {
- char* dest = buf + size * i;
- memcpy(dest, ((char*)pCols[index]) + bytes * i, bytes);
- *(int32_t*)(dest+bytes) = i;
- }
-
- qsort(buf, numOfRows, size, compareFn);
-
- int32_t prevLength = 0;
- char* p = NULL;
-
- for(int32_t i = 0; i < numOfCols; ++i) {
- int32_t bytes1 = pSchema[i].bytes;
-
- if (i == index) {
- for(int32_t j = 0; j < numOfRows; ++j){
- char* src = buf + (j * size);
- char* dest = (char*) pCols[i] + (j * bytes1);
- memcpy(dest, src, bytes1);
- }
- } else {
- // make sure memory buffer is enough
- if (prevLength < bytes1) {
- char *tmp = realloc(p, bytes1 * numOfRows);
- assert(tmp);
-
- p = tmp;
- prevLength = bytes1;
- }
-
- memcpy(p, pCols[i], bytes1 * numOfRows);
-
- for(int32_t j = 0; j < numOfRows; ++j){
- char* dest = (char*) pCols[i] + bytes1 * j;
-
- int32_t newPos = *(int32_t*)(buf + (j * size) + bytes);
- char* src = p + (newPos * bytes1);
- memcpy(dest, src, bytes1);
- }
- }
- }
-
- tfree(buf);
- tfree(p);
-}
-
/*
* deep copy of sschema
*/
@@ -1157,3 +1103,57 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) {
destroyColumnModel(pDesc->pColumnModel);
tfree(pDesc);
}
+
+void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) {
+ assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols);
+
+ int32_t bytes = pSchema[index].bytes;
+ int32_t size = bytes + sizeof(int32_t);
+
+ char* buf = calloc(1, size * numOfRows);
+
+ for(int32_t i = 0; i < numOfRows; ++i) {
+ char* dest = buf + size * i;
+ memcpy(dest, ((char*) pCols[index]) + bytes * i, bytes);
+ *(int32_t*)(dest+bytes) = i;
+ }
+
+ qsort(buf, numOfRows, size, compareFn);
+
+ int32_t prevLength = 0;
+ char* p = NULL;
+
+ for(int32_t i = 0; i < numOfCols; ++i) {
+ int32_t bytes1 = pSchema[i].bytes;
+
+ if (i == index) {
+ for(int32_t j = 0; j < numOfRows; ++j){
+ char* src = buf + (j * size);
+ char* dest = ((char*)pCols[i]) + (j * bytes1);
+ memcpy(dest, src, bytes1);
+ }
+ } else {
+ // make sure memory buffer is enough
+ if (prevLength < bytes1) {
+ char *tmp = realloc(p, bytes1 * numOfRows);
+ assert(tmp);
+
+ p = tmp;
+ prevLength = bytes1;
+ }
+
+ memcpy(p, pCols[i], bytes1 * numOfRows);
+
+ for(int32_t j = 0; j < numOfRows; ++j){
+ char* dest = ((char*)pCols[i]) + bytes1 * j;
+
+ int32_t newPos = *(int32_t*)(buf + (j * size) + bytes);
+ char* src = p + (newPos * bytes1);
+ memcpy(dest, src, bytes1);
+ }
+ }
+ }
+
+ tfree(buf);
+ tfree(p);
+}
diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c
index f72f70c911..1988fc9df7 100644
--- a/src/query/src/qPlan.c
+++ b/src/query/src/qPlan.c
@@ -698,7 +698,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
}
// fill operator
- if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) {
+ if (pQueryAttr->fillType != TSDB_FILL_NONE && pQueryAttr->interval.interval > 0) {
op = OP_Fill;
taosArrayPush(plan, &op);
}
diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c
index 825b7960de..4cf05dd2c7 100644
--- a/src/query/src/qTsbuf.c
+++ b/src/query/src/qTsbuf.c
@@ -223,8 +223,11 @@ static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) {
static void shrinkBuffer(STSList* ptsData) {
// shrink tmp buffer size if it consumes too many memory compared to the pre-defined size
if (ptsData->allocSize >= ptsData->threshold * 2) {
- ptsData->rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE);
- ptsData->allocSize = MEM_BUF_SIZE;
+ char* rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE);
+ if(rawBuf) {
+ ptsData->rawBuf = rawBuf;
+ ptsData->allocSize = MEM_BUF_SIZE;
+ }
}
}
diff --git a/src/tsdb/inc/tsdbFS.h b/src/tsdb/inc/tsdbFS.h
index 3b6b6449f6..e89e10f766 100644
--- a/src/tsdb/inc/tsdbFS.h
+++ b/src/tsdb/inc/tsdbFS.h
@@ -18,6 +18,9 @@
#define TSDB_FS_VERSION 0
+// ================== TSDB global config
+extern bool tsdbForceKeepFile;
+
// ================== CURRENT file header info
typedef struct {
uint32_t version; // Current file system version (relating to code)
@@ -110,4 +113,4 @@ static FORCE_INLINE int tsdbUnLockFS(STsdbFS* pFs) {
return 0;
}
-#endif /* _TD_TSDB_FS_H_ */
\ No newline at end of file
+#endif /* _TD_TSDB_FS_H_ */
diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c
index a3d6c59f72..a40e67ca59 100644
--- a/src/tsdb/src/tsdbFS.c
+++ b/src/tsdb/src/tsdbFS.c
@@ -37,6 +37,7 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired);
static int tsdbProcessExpiredFS(STsdbRepo *pRepo);
static int tsdbCreateMeta(STsdbRepo *pRepo);
+// For backward compatibility
// ================== CURRENT file header info
static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) {
int tlen = 0;
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index 9cc9b7224c..870e343090 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -1572,7 +1572,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
int32_t numOfColsOfRow1 = 0;
if (pSchema1 == NULL) {
- pSchema1 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row1));
+ pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1));
}
if(isRow1DataRow) {
numOfColsOfRow1 = schemaNCols(pSchema1);
@@ -1584,7 +1584,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
if(row2) {
isRow2DataRow = isDataRow(row2);
if (pSchema2 == NULL) {
- pSchema2 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row2));
+ pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2));
}
if(isRow2DataRow) {
numOfColsOfRow2 = schemaNCols(pSchema2);
@@ -3480,6 +3480,7 @@ void filterPrepare(void* expr, void* param) {
SArray *arr = (SArray *)(pCond->arr);
for (size_t i = 0; i < taosArrayGetSize(arr); i++) {
char* p = taosArrayGetP(arr, i);
+ strtolower(varDataVal(p), varDataVal(p));
taosHashPut(pObj, varDataVal(p),varDataLen(p), &dummy, sizeof(dummy));
}
} else {
diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h
index d1760ab28c..e29015c7cb 100644
--- a/src/util/inc/tcompare.h
+++ b/src/util/inc/tcompare.h
@@ -22,10 +22,10 @@ extern "C" {
#include "os.h"
-#define TSDB_PATTERN_MATCH 0
-#define TSDB_PATTERN_NOMATCH 1
-#define TSDB_PATTERN_NOWILDCARDMATCH 2
-#define TSDB_PATTERN_STRING_MAX_LEN 100
+#define TSDB_PATTERN_MATCH 0
+#define TSDB_PATTERN_NOMATCH 1
+#define TSDB_PATTERN_NOWILDCARDMATCH 2
+#define TSDB_PATTERN_STRING_DEFAULT_LEN 100
#define FLT_COMPAR_TOL_FACTOR 4
#define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON))
diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c
index 69b3741e13..b4cf2b6658 100644
--- a/src/util/src/tcache.c
+++ b/src/util/src/tcache.c
@@ -537,7 +537,8 @@ void taosCacheCleanup(SCacheObj *pCacheObj) {
pCacheObj->deleting = 1;
// wait for the refresh thread quit before destroying the cache object.
- while(atomic_load_8(&pCacheObj->deleting) != 0) {
+ // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for 2 seconds.
+ for (int i = 0; i < 40&&atomic_load_8(&pCacheObj->deleting) != 0; i++) {
taosMsleep(50);
}
diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c
index 36480418c9..47cc751318 100644
--- a/src/util/src/tcompare.c
+++ b/src/util/src/tcompare.c
@@ -139,8 +139,8 @@ int32_t compareFloatVal(const void *pLeft, const void *pRight) {
}
if (FLT_EQUAL(p1, p2)) {
return 0;
- }
- return FLT_GREATER(p1, p2) ? 1: -1;
+ }
+ return FLT_GREATER(p1, p2) ? 1: -1;
}
int32_t compareFloatValDesc(const void* pLeft, const void* pRight) {
@@ -164,8 +164,8 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight) {
}
if (FLT_EQUAL(p1, p2)) {
return 0;
- }
- return FLT_GREATER(p1, p2) ? 1: -1;
+ }
+ return FLT_GREATER(p1, p2) ? 1: -1;
}
int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) {
@@ -175,7 +175,7 @@ int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) {
int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) {
int32_t len1 = varDataLen(pLeft);
int32_t len2 = varDataLen(pRight);
-
+
if (len1 != len2) {
return len1 > len2? 1:-1;
} else {
@@ -224,33 +224,33 @@ int32_t compareLenPrefixedWStrDesc(const void* pLeft, const void* pRight) {
*/
int patternMatch(const char *patterStr, const char *str, size_t size, const SPatternCompareInfo *pInfo) {
char c, c1;
-
+
int32_t i = 0;
int32_t j = 0;
-
+
while ((c = patterStr[i++]) != 0) {
if (c == pInfo->matchAll) { /* Match "*" */
-
+
while ((c = patterStr[i++]) == pInfo->matchAll || c == pInfo->matchOne) {
if (c == pInfo->matchOne && (j > size || str[j++] == 0)) {
// empty string, return not match
return TSDB_PATTERN_NOWILDCARDMATCH;
}
}
-
+
if (c == 0) {
return TSDB_PATTERN_MATCH; /* "*" at the end of the pattern matches */
}
-
+
char next[3] = {toupper(c), tolower(c), 0};
while (1) {
size_t n = strcspn(str, next);
str += n;
-
+
if (str[0] == 0 || (n >= size)) {
break;
}
-
+
int32_t ret = patternMatch(&patterStr[i], ++str, size - n - 1, pInfo);
if (ret != TSDB_PATTERN_NOMATCH) {
return ret;
@@ -258,18 +258,19 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat
}
return TSDB_PATTERN_NOWILDCARDMATCH;
}
-
+
c1 = str[j++];
-
+
if (j <= size) {
+ if (c == '\\' && patterStr[i] == '_' && c1 == '_') { i++; continue; }
if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) {
continue;
}
}
-
+
return TSDB_PATTERN_NOMATCH;
}
-
+
return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH;
}
@@ -277,13 +278,13 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c
wchar_t c, c1;
wchar_t matchOne = L'_'; // "_"
wchar_t matchAll = L'%'; // "%"
-
+
int32_t i = 0;
int32_t j = 0;
-
+
while ((c = patterStr[i++]) != 0) {
if (c == matchAll) { /* Match "%" */
-
+
while ((c = patterStr[i++]) == matchAll || c == matchOne) {
if (c == matchOne && (j > size || str[j++] == 0)) {
return TSDB_PATTERN_NOWILDCARDMATCH;
@@ -292,33 +293,33 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c
if (c == 0) {
return TSDB_PATTERN_MATCH;
}
-
+
wchar_t accept[3] = {towupper(c), towlower(c), 0};
while (1) {
size_t n = wcscspn(str, accept);
-
+
str += n;
if (str[0] == 0 || (n >= size)) {
break;
}
-
+
int32_t ret = WCSPatternMatch(&patterStr[i], ++str, size - n - 1, pInfo);
if (ret != TSDB_PATTERN_NOMATCH) {
return ret;
}
}
-
+
return TSDB_PATTERN_NOWILDCARDMATCH;
}
-
+
c1 = str[j++];
-
+
if (j <= size) {
if (c == c1 || towlower(c) == towlower(c1) || (c == matchOne && c1 != 0)) {
continue;
}
}
-
+
return TSDB_PATTERN_NOMATCH;
}
@@ -358,12 +359,13 @@ int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
SPatternCompareInfo pInfo = {'%', '_'};
assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE);
- wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
+ wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo);
free(pattern);
+
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
}
@@ -410,10 +412,10 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) {
} else { /* normal relational comparFn */
comparFn = compareLenPrefixedStr;
}
-
+
break;
}
-
+
case TSDB_DATA_TYPE_NCHAR: {
if (optr == TSDB_RELATION_LIKE) {
comparFn = compareWStrPatternComp;
@@ -434,13 +436,13 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) {
comparFn = compareInt32Val;
break;
}
-
+
return comparFn;
}
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) {
__compar_fn_t comparFn = NULL;
-
+
switch (keyType) {
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_BOOL:
@@ -484,7 +486,7 @@ __compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) {
comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc;
break;
}
-
+
return comparFn;
}
@@ -517,7 +519,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) {
default: { // todo refactor
tstr* t1 = (tstr*) f1;
tstr* t2 = (tstr*) f2;
-
+
if (t1->len != t2->len) {
return t1->len > t2->len? 1:-1;
} else {
diff --git a/src/util/tests/skiplistTest.cpp b/src/util/tests/skiplistTest.cpp
index dfbe0f6716..df4c5af5e2 100644
--- a/src/util/tests/skiplistTest.cpp
+++ b/src/util/tests/skiplistTest.cpp
@@ -70,7 +70,7 @@ void doubleSkipListTest() {
}
void randKeyTest() {
- SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT),
+ SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT, TSDB_ORDER_ASC),
false, getkey);
int32_t size = 200000;
diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c
index cfadafebdd..e991bf02aa 100644
--- a/src/wal/src/walWrite.c
+++ b/src/wal/src/walWrite.c
@@ -540,7 +540,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
pWal->version = pHead->version;
- //wInfo("writeFp: %ld", offset);
+ // wInfo("writeFp: %ld", offset);
if (0 != walSMemRowCheck(pHead)) {
wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64,
pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset);
diff --git a/tests/http/restful/http_create_db.c b/tests/http/restful/http_create_db.c
new file mode 100644
index 0000000000..0bc52fa6cc
--- /dev/null
+++ b/tests/http/restful/http_create_db.c
@@ -0,0 +1,429 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#define RECV_MAX_LINE 2048
+#define ITEM_MAX_LINE 128
+#define REQ_MAX_LINE 2048
+#define REQ_CLI_COUNT 100
+
+
+typedef enum
+{
+ uninited,
+ connecting,
+ connected,
+ datasent
+} conn_stat;
+
+
+typedef enum
+{
+ false,
+ true
+} bool;
+
+
+typedef unsigned short u16_t;
+typedef unsigned int u32_t;
+
+
+typedef struct
+{
+ int sockfd;
+ int index;
+ conn_stat state;
+ size_t nsent;
+ size_t nrecv;
+ size_t nlen;
+ bool error;
+ bool success;
+ struct sockaddr_in serv_addr;
+} socket_ctx;
+
+
+int set_nonblocking(int sockfd)
+{
+ int ret;
+
+ ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
+ if (ret == -1) {
+ printf("failed to fcntl for %d\r\n", sockfd);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
+{
+ int ret;
+
+ if (ip == NULL || port == 0 || pctx == NULL) {
+ printf("invalid parameter\r\n");
+ return -1;
+ }
+
+ pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (pctx->sockfd == -1) {
+ printf("failed to create socket\r\n");
+ return -1;
+ }
+
+ bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
+
+ pctx->serv_addr.sin_family = AF_INET;
+ pctx->serv_addr.sin_port = htons(port);
+
+ ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
+ if (ret <= 0) {
+ printf("inet_pton error, ip: %s\r\n", ip);
+ return -1;
+ }
+
+ ret = set_nonblocking(pctx->sockfd);
+ if (ret == -1) {
+ printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
+ return -1;
+ }
+
+ return pctx->sockfd;
+}
+
+
+void close_sockets(socket_ctx *pctx, int cnt)
+{
+ int i;
+
+ if (pctx == NULL) {
+ return;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (pctx[i].sockfd > 0) {
+ close(pctx[i].sockfd);
+ pctx[i].sockfd = -1;
+ }
+ }
+}
+
+
+int proc_pending_error(socket_ctx *ctx)
+{
+ int ret;
+ int err;
+ socklen_t len;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+
+ err = 0;
+ len = sizeof(int);
+
+ ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
+ if (ret == -1) {
+ err = errno;
+ }
+
+ if (err) {
+ printf("failed to connect at index: %d\r\n", ctx->index);
+
+ close(ctx->sockfd);
+ ctx->sockfd = -1;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
+{
+ char req_line[ITEM_MAX_LINE];
+ char req_host[ITEM_MAX_LINE];
+ char req_cont_type[ITEM_MAX_LINE];
+ char req_cont_len[ITEM_MAX_LINE];
+ const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
+
+ if (ip == NULL || port == 0 ||
+ url == NULL || url[0] == '\0' ||
+ sql == NULL || sql[0] == '\0' ||
+ req_buf == NULL || len <= 0)
+ {
+ return;
+ }
+
+ snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
+ snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
+ snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
+ snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
+
+ snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
+}
+
+
+int add_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
+}
+
+
+int mod_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
+}
+
+
+int del_event(int epfd, int sockfd)
+{
+ struct epoll_event evs_op;
+
+ evs_op.events = 0;
+ evs_op.data.ptr = NULL;
+
+ return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
+}
+
+
+int main()
+{
+ int i;
+ int ret, n, nsent, nrecv;
+ int epfd;
+ u32_t events;
+ char *str;
+ socket_ctx *pctx, ctx[REQ_CLI_COUNT];
+ char *ip = "127.0.0.1";
+ char *url = "/rest/sql";
+ u16_t port = 6041;
+ struct epoll_event evs[REQ_CLI_COUNT];
+ char sql[REQ_MAX_LINE];
+ char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
+ char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
+ int count;
+
+ signal(SIGPIPE, SIG_IGN);
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ctx[i].sockfd = -1;
+ ctx[i].index = i;
+ ctx[i].state = uninited;
+ ctx[i].nsent = 0;
+ ctx[i].nrecv = 0;
+ ctx[i].error = false;
+ ctx[i].success = false;
+
+ memset(sql, 0, REQ_MAX_LINE);
+ memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+ memset(recv_buf[i], 0, RECV_MAX_LINE);
+
+ snprintf(sql, REQ_MAX_LINE, "create database if not exists db%d precision 'us'", i);
+ build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+
+ ctx[i].nlen = strlen(send_buf[i]);
+ }
+
+ epfd = epoll_create(REQ_CLI_COUNT);
+ if (epfd <= 0) {
+ printf("failed to create epoll\r\n");
+ goto failed;
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = create_socket(ip, port, &ctx[i]);
+ if (ret == -1) {
+ printf("failed to create socket ar %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ events = EPOLLET | EPOLLIN | EPOLLOUT;
+ ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
+ if (ret == -1) {
+ printf("failed to add sockfd at %d to epoll\r\n", i);
+ goto failed;
+ }
+ }
+
+ count = 0;
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
+ if (ret == -1) {
+ if (errno != EINPROGRESS) {
+ printf("connect error, index: %d\r\n", ctx[i].index);
+ (void) del_event(epfd, ctx[i].sockfd);
+ close(ctx[i].sockfd);
+ ctx[i].sockfd = -1;
+ } else {
+ ctx[i].state = connecting;
+ count++;
+ }
+
+ continue;
+ }
+
+ ctx[i].state = connected;
+ count++;
+ }
+
+ printf("clients: %d\r\n", count);
+
+ while (count > 0) {
+ n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
+ if (n == -1) {
+ if (errno != EINTR) {
+ printf("epoll_wait error, reason: %s\r\n", strerror(errno));
+ break;
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (evs[i].events & EPOLLERR) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("event error, index: %d\r\n", pctx->index);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ } else if (evs[i].events & EPOLLIN) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
+ if (nrecv == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ } else if (nrecv == 0) {
+ printf("peer closed connection, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ break;
+ }
+
+ pctx->nrecv += nrecv;
+ if (pctx->nrecv > 12) {
+ if (pctx->error == false && pctx->success == false) {
+ str = recv_buf[pctx->index] + 9;
+ if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
+ printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
+ pctx->error = true;
+ } else {
+ printf("response ok, index: %d\r\n", pctx->index);
+ pctx->success = true;
+ }
+ }
+ }
+ }
+ } else if (evs[i].events & EPOLLOUT) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
+ if (nsent == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to send, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ }
+
+ if (nsent == (int) (pctx->nlen - pctx->nsent)) {
+ printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
+
+ pctx->state = datasent;
+
+ events = EPOLLET | EPOLLIN;
+ (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
+
+ break;
+ } else {
+ pctx->nsent += nsent;
+ }
+ }
+ } else {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+ }
+ }
+ }
+
+failed:
+
+ if (epfd > 0) {
+ close(epfd);
+ }
+
+ close_sockets(ctx, REQ_CLI_COUNT);
+
+ return 0;
+}
diff --git a/tests/http/restful/http_create_tb.c b/tests/http/restful/http_create_tb.c
new file mode 100644
index 0000000000..91ffc54627
--- /dev/null
+++ b/tests/http/restful/http_create_tb.c
@@ -0,0 +1,433 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#define RECV_MAX_LINE 2048
+#define ITEM_MAX_LINE 128
+#define REQ_MAX_LINE 2048
+#define REQ_CLI_COUNT 100
+
+
+typedef enum
+{
+ uninited,
+ connecting,
+ connected,
+ datasent
+} conn_stat;
+
+
+typedef enum
+{
+ false,
+ true
+} bool;
+
+
+typedef unsigned short u16_t;
+typedef unsigned int u32_t;
+
+
+typedef struct
+{
+ int sockfd;
+ int index;
+ conn_stat state;
+ size_t nsent;
+ size_t nrecv;
+ size_t nlen;
+ bool error;
+ bool success;
+ struct sockaddr_in serv_addr;
+} socket_ctx;
+
+
+int set_nonblocking(int sockfd)
+{
+ int ret;
+
+ ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
+ if (ret == -1) {
+ printf("failed to fcntl for %d\r\n", sockfd);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
+{
+ int ret;
+
+ if (ip == NULL || port == 0 || pctx == NULL) {
+ printf("invalid parameter\r\n");
+ return -1;
+ }
+
+ pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (pctx->sockfd == -1) {
+ printf("failed to create socket\r\n");
+ return -1;
+ }
+
+ bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
+
+ pctx->serv_addr.sin_family = AF_INET;
+ pctx->serv_addr.sin_port = htons(port);
+
+ ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
+ if (ret <= 0) {
+ printf("inet_pton error, ip: %s\r\n", ip);
+ return -1;
+ }
+
+ ret = set_nonblocking(pctx->sockfd);
+ if (ret == -1) {
+ printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
+ return -1;
+ }
+
+ return pctx->sockfd;
+}
+
+
+void close_sockets(socket_ctx *pctx, int cnt)
+{
+ int i;
+
+ if (pctx == NULL) {
+ return;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (pctx[i].sockfd > 0) {
+ close(pctx[i].sockfd);
+ pctx[i].sockfd = -1;
+ }
+ }
+}
+
+
+int proc_pending_error(socket_ctx *ctx)
+{
+ int ret;
+ int err;
+ socklen_t len;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+
+ err = 0;
+ len = sizeof(int);
+
+ ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
+ if (ret == -1) {
+ err = errno;
+ }
+
+ if (err) {
+ printf("failed to connect at index: %d\r\n", ctx->index);
+
+ close(ctx->sockfd);
+ ctx->sockfd = -1;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
+{
+ char req_line[ITEM_MAX_LINE];
+ char req_host[ITEM_MAX_LINE];
+ char req_cont_type[ITEM_MAX_LINE];
+ char req_cont_len[ITEM_MAX_LINE];
+ const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
+
+ if (ip == NULL || port == 0 ||
+ url == NULL || url[0] == '\0' ||
+ sql == NULL || sql[0] == '\0' ||
+ req_buf == NULL || len <= 0)
+ {
+ return;
+ }
+
+ snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
+ snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
+ snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
+ snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
+
+ snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
+}
+
+
+int add_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
+}
+
+
+int mod_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
+}
+
+
+int del_event(int epfd, int sockfd)
+{
+ struct epoll_event evs_op;
+
+ evs_op.events = 0;
+ evs_op.data.ptr = NULL;
+
+ return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
+}
+
+
+int main()
+{
+ int i;
+ int ret, n, nsent, nrecv;
+ int epfd;
+ u32_t events;
+ char *str;
+ socket_ctx *pctx, ctx[REQ_CLI_COUNT];
+ char *ip = "127.0.0.1";
+ char *url_prefix = "/rest/sql";
+ char url[ITEM_MAX_LINE];
+ u16_t port = 6041;
+ struct epoll_event evs[REQ_CLI_COUNT];
+ char sql[REQ_MAX_LINE];
+ char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
+ char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
+ int count;
+
+ signal(SIGPIPE, SIG_IGN);
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ctx[i].sockfd = -1;
+ ctx[i].index = i;
+ ctx[i].state = uninited;
+ ctx[i].nsent = 0;
+ ctx[i].nrecv = 0;
+ ctx[i].error = false;
+ ctx[i].success = false;
+
+ memset(url, 0, ITEM_MAX_LINE);
+ memset(sql, 0, REQ_MAX_LINE);
+ memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+ memset(recv_buf[i], 0, RECV_MAX_LINE);
+
+ snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
+ snprintf(sql, REQ_MAX_LINE, "create table if not exists tb%d (ts timestamp, index int, val binary(40))", i);
+
+ build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+
+ ctx[i].nlen = strlen(send_buf[i]);
+ }
+
+ epfd = epoll_create(REQ_CLI_COUNT);
+ if (epfd <= 0) {
+ printf("failed to create epoll\r\n");
+ goto failed;
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = create_socket(ip, port, &ctx[i]);
+ if (ret == -1) {
+ printf("failed to create socket, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ events = EPOLLET | EPOLLIN | EPOLLOUT;
+ ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
+ if (ret == -1) {
+ printf("failed to add sockfd to epoll, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ count = 0;
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
+ if (ret == -1) {
+ if (errno != EINPROGRESS) {
+ printf("connect error, index: %d\r\n", ctx[i].index);
+ (void) del_event(epfd, ctx[i].sockfd);
+ close(ctx[i].sockfd);
+ ctx[i].sockfd = -1;
+ } else {
+ ctx[i].state = connecting;
+ count++;
+ }
+
+ continue;
+ }
+
+ ctx[i].state = connected;
+ count++;
+ }
+
+ printf("clients: %d\r\n", count);
+
+ while (count > 0) {
+ n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
+ if (n == -1) {
+ if (errno != EINTR) {
+ printf("epoll_wait error, reason: %s\r\n", strerror(errno));
+ break;
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (evs[i].events & EPOLLERR) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("event error, index: %d\r\n", pctx->index);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ } else if (evs[i].events & EPOLLIN) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
+ if (nrecv == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ } else if (nrecv == 0) {
+ printf("peer closed connection, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ break;
+ }
+
+ pctx->nrecv += nrecv;
+ if (pctx->nrecv > 12) {
+ if (pctx->error == false && pctx->success == false) {
+ str = recv_buf[pctx->index] + 9;
+ if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
+ printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
+ pctx->error = true;
+ } else {
+ printf("response ok, index: %d\r\n", pctx->index);
+ pctx->success = true;
+ }
+ }
+ }
+ }
+ } else if (evs[i].events & EPOLLOUT) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
+ if (nsent == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to send, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ }
+
+ if (nsent == (int) (pctx->nlen - pctx->nsent)) {
+ printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
+
+ pctx->state = datasent;
+
+ events = EPOLLET | EPOLLIN;
+ (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
+
+ break;
+ } else {
+ pctx->nsent += nsent;
+ }
+ }
+ } else {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+ }
+ }
+ }
+
+failed:
+
+ if (epfd > 0) {
+ close(epfd);
+ }
+
+ close_sockets(ctx, REQ_CLI_COUNT);
+
+ return 0;
+}
diff --git a/tests/http/restful/http_drop_db.c b/tests/http/restful/http_drop_db.c
new file mode 100644
index 0000000000..f82db901dd
--- /dev/null
+++ b/tests/http/restful/http_drop_db.c
@@ -0,0 +1,433 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#define RECV_MAX_LINE 2048
+#define ITEM_MAX_LINE 128
+#define REQ_MAX_LINE 2048
+#define REQ_CLI_COUNT 100
+
+
+typedef enum
+{
+ uninited,
+ connecting,
+ connected,
+ datasent
+} conn_stat;
+
+
+typedef enum
+{
+ false,
+ true
+} bool;
+
+
+typedef unsigned short u16_t;
+typedef unsigned int u32_t;
+
+
+typedef struct
+{
+ int sockfd;
+ int index;
+ conn_stat state;
+ size_t nsent;
+ size_t nrecv;
+ size_t nlen;
+ bool error;
+ bool success;
+ struct sockaddr_in serv_addr;
+} socket_ctx;
+
+
+int set_nonblocking(int sockfd)
+{
+ int ret;
+
+ ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
+ if (ret == -1) {
+ printf("failed to fcntl for %d\r\n", sockfd);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
+{
+ int ret;
+
+ if (ip == NULL || port == 0 || pctx == NULL) {
+ printf("invalid parameter\r\n");
+ return -1;
+ }
+
+ pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (pctx->sockfd == -1) {
+ printf("failed to create socket\r\n");
+ return -1;
+ }
+
+ bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
+
+ pctx->serv_addr.sin_family = AF_INET;
+ pctx->serv_addr.sin_port = htons(port);
+
+ ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
+ if (ret <= 0) {
+ printf("inet_pton error, ip: %s\r\n", ip);
+ return -1;
+ }
+
+ ret = set_nonblocking(pctx->sockfd);
+ if (ret == -1) {
+ printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
+ return -1;
+ }
+
+ return pctx->sockfd;
+}
+
+
+void close_sockets(socket_ctx *pctx, int cnt)
+{
+ int i;
+
+ if (pctx == NULL) {
+ return;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (pctx[i].sockfd > 0) {
+ close(pctx[i].sockfd);
+ pctx[i].sockfd = -1;
+ }
+ }
+}
+
+
+int proc_pending_error(socket_ctx *ctx)
+{
+ int ret;
+ int err;
+ socklen_t len;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+
+ err = 0;
+ len = sizeof(int);
+
+ ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
+ if (ret == -1) {
+ err = errno;
+ }
+
+ if (err) {
+ printf("failed to connect at index: %d\r\n", ctx->index);
+
+ close(ctx->sockfd);
+ ctx->sockfd = -1;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
+{
+ char req_line[ITEM_MAX_LINE];
+ char req_host[ITEM_MAX_LINE];
+ char req_cont_type[ITEM_MAX_LINE];
+ char req_cont_len[ITEM_MAX_LINE];
+ const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
+
+ if (ip == NULL || port == 0 ||
+ url == NULL || url[0] == '\0' ||
+ sql == NULL || sql[0] == '\0' ||
+ req_buf == NULL || len <= 0)
+ {
+ return;
+ }
+
+ snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
+ snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
+ snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
+ snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
+
+ snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
+}
+
+
+int add_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
+}
+
+
+int mod_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
+}
+
+
+int del_event(int epfd, int sockfd)
+{
+ struct epoll_event evs_op;
+
+ evs_op.events = 0;
+ evs_op.data.ptr = NULL;
+
+ return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
+}
+
+
+int main()
+{
+ int i;
+ int ret, n, nsent, nrecv;
+ int epfd;
+ u32_t events;
+ char *str;
+ socket_ctx *pctx, ctx[REQ_CLI_COUNT];
+ char *ip = "127.0.0.1";
+ char *url_prefix = "/rest/sql";
+ char url[ITEM_MAX_LINE];
+ u16_t port = 6041;
+ struct epoll_event evs[REQ_CLI_COUNT];
+ char sql[REQ_MAX_LINE];
+ char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
+ char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
+ int count;
+
+ signal(SIGPIPE, SIG_IGN);
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ctx[i].sockfd = -1;
+ ctx[i].index = i;
+ ctx[i].state = uninited;
+ ctx[i].nsent = 0;
+ ctx[i].nrecv = 0;
+ ctx[i].error = false;
+ ctx[i].success = false;
+
+ memset(url, 0, ITEM_MAX_LINE);
+ memset(sql, 0, REQ_MAX_LINE);
+ memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+ memset(recv_buf[i], 0, RECV_MAX_LINE);
+
+ snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
+ snprintf(sql, REQ_MAX_LINE, "drop database if exists db%d", i);
+
+ build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+
+ ctx[i].nlen = strlen(send_buf[i]);
+ }
+
+ epfd = epoll_create(REQ_CLI_COUNT);
+ if (epfd <= 0) {
+ printf("failed to create epoll\r\n");
+ goto failed;
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = create_socket(ip, port, &ctx[i]);
+ if (ret == -1) {
+ printf("failed to create socket, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ events = EPOLLET | EPOLLIN | EPOLLOUT;
+ ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
+ if (ret == -1) {
+ printf("failed to add sockfd to epoll, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ count = 0;
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
+ if (ret == -1) {
+ if (errno != EINPROGRESS) {
+ printf("connect error, index: %d\r\n", ctx[i].index);
+ (void) del_event(epfd, ctx[i].sockfd);
+ close(ctx[i].sockfd);
+ ctx[i].sockfd = -1;
+ } else {
+ ctx[i].state = connecting;
+ count++;
+ }
+
+ continue;
+ }
+
+ ctx[i].state = connected;
+ count++;
+ }
+
+ printf("clients: %d\r\n", count);
+
+ while (count > 0) {
+ n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
+ if (n == -1) {
+ if (errno != EINTR) {
+ printf("epoll_wait error, reason: %s\r\n", strerror(errno));
+ break;
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (evs[i].events & EPOLLERR) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("event error, index: %d\r\n", pctx->index);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ } else if (evs[i].events & EPOLLIN) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
+ if (nrecv == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ } else if (nrecv == 0) {
+ printf("peer closed connection, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ break;
+ }
+
+ pctx->nrecv += nrecv;
+ if (pctx->nrecv > 12) {
+ if (pctx->error == false && pctx->success == false) {
+ str = recv_buf[pctx->index] + 9;
+ if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
+ printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
+ pctx->error = true;
+ } else {
+ printf("response ok, index: %d\r\n", pctx->index);
+ pctx->success = true;
+ }
+ }
+ }
+ }
+ } else if (evs[i].events & EPOLLOUT) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
+ if (nsent == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to send, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ }
+
+ if (nsent == (int) (pctx->nlen - pctx->nsent)) {
+ printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
+
+ pctx->state = datasent;
+
+ events = EPOLLET | EPOLLIN;
+ (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
+
+ break;
+ } else {
+ pctx->nsent += nsent;
+ }
+ }
+ } else {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+ }
+ }
+ }
+
+failed:
+
+ if (epfd > 0) {
+ close(epfd);
+ }
+
+ close_sockets(ctx, REQ_CLI_COUNT);
+
+ return 0;
+}
diff --git a/tests/http/restful/http_insert_tb.c b/tests/http/restful/http_insert_tb.c
new file mode 100644
index 0000000000..f9590d856c
--- /dev/null
+++ b/tests/http/restful/http_insert_tb.c
@@ -0,0 +1,455 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#define RECV_MAX_LINE 2048
+#define ITEM_MAX_LINE 128
+#define REQ_MAX_LINE 4096
+#define REQ_CLI_COUNT 100
+
+
+typedef enum
+{
+ uninited,
+ connecting,
+ connected,
+ datasent
+} conn_stat;
+
+
+typedef enum
+{
+ false,
+ true
+} bool;
+
+
+typedef struct
+{
+ int sockfd;
+ int index;
+ conn_stat state;
+ size_t nsent;
+ size_t nrecv;
+ size_t nlen;
+ bool error;
+ bool success;
+ struct sockaddr_in serv_addr;
+} socket_ctx;
+
+
+int set_nonblocking(int sockfd)
+{
+ int ret;
+
+ ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
+ if (ret == -1) {
+ printf("failed to fcntl for %d\r\n", sockfd);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx)
+{
+ int ret;
+
+ if (ip == NULL || port == 0 || pctx == NULL) {
+ printf("invalid parameter\r\n");
+ return -1;
+ }
+
+ pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (pctx->sockfd == -1) {
+ printf("failed to create socket\r\n");
+ return -1;
+ }
+
+ bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
+
+ pctx->serv_addr.sin_family = AF_INET;
+ pctx->serv_addr.sin_port = htons(port);
+
+ ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
+ if (ret <= 0) {
+ printf("inet_pton error, ip: %s\r\n", ip);
+ return -1;
+ }
+
+ ret = set_nonblocking(pctx->sockfd);
+ if (ret == -1) {
+ printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
+ return -1;
+ }
+
+ return pctx->sockfd;
+}
+
+
+void close_sockets(socket_ctx *pctx, int cnt)
+{
+ int i;
+
+ if (pctx == NULL) {
+ return;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (pctx[i].sockfd > 0) {
+ close(pctx[i].sockfd);
+ pctx[i].sockfd = -1;
+ }
+ }
+}
+
+
+int proc_pending_error(socket_ctx *ctx)
+{
+ int ret;
+ int err;
+ socklen_t len;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+
+ err = 0;
+ len = sizeof(int);
+
+ ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
+ if (ret == -1) {
+ err = errno;
+ }
+
+ if (err) {
+ printf("failed to connect at index: %d\r\n", ctx->index);
+
+ close(ctx->sockfd);
+ ctx->sockfd = -1;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len)
+{
+ char req_line[ITEM_MAX_LINE];
+ char req_host[ITEM_MAX_LINE];
+ char req_cont_type[ITEM_MAX_LINE];
+ char req_cont_len[ITEM_MAX_LINE];
+ const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
+
+ if (ip == NULL || port == 0 ||
+ url == NULL || url[0] == '\0' ||
+ sql == NULL || sql[0] == '\0' ||
+ req_buf == NULL || len <= 0)
+ {
+ return;
+ }
+
+ snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
+ snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
+ snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
+ snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
+
+ snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
+}
+
+
+int add_event(int epfd, int sockfd, uint32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
+}
+
+
+int mod_event(int epfd, int sockfd, uint32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
+}
+
+
+int del_event(int epfd, int sockfd)
+{
+ struct epoll_event evs_op;
+
+ evs_op.events = 0;
+ evs_op.data.ptr = NULL;
+
+ return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
+}
+
+
+int main()
+{
+ int i;
+ int ret, n, nsent, nrecv, offset;
+ int epfd;
+ uint32_t events;
+ char *str;
+ socket_ctx *pctx, ctx[REQ_CLI_COUNT];
+ char *ip = "127.0.0.1";
+ char *url_prefix = "/rest/sql";
+ char url[ITEM_MAX_LINE];
+ uint16_t port = 6041;
+ struct epoll_event evs[REQ_CLI_COUNT];
+ struct timeval now;
+ int64_t start_time;
+ char sql[REQ_MAX_LINE];
+ char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
+ char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
+ int count;
+
+ signal(SIGPIPE, SIG_IGN);
+
+ gettimeofday(&now, NULL);
+ start_time = now.tv_sec * 1000000 + now.tv_usec;
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ctx[i].sockfd = -1;
+ ctx[i].index = i;
+ ctx[i].state = uninited;
+ ctx[i].nsent = 0;
+ ctx[i].nrecv = 0;
+ ctx[i].error = false;
+ ctx[i].success = false;
+
+ memset(url, 0, ITEM_MAX_LINE);
+ memset(sql, 0, REQ_MAX_LINE);
+ memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+ memset(recv_buf[i], 0, RECV_MAX_LINE);
+
+ snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
+
+ offset = 0;
+
+ ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "insert into tb%d values ", i);
+ if (ret <= 0) {
+ printf("failed to snprintf for sql(prefix), index: %d\r\n ", i);
+ goto failed;
+ }
+
+ offset += ret;
+
+ while (offset < REQ_MAX_LINE - 128) {
+ ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "(%"PRId64", %d, 'test_string_%d') ", start_time + i, i, i);
+ if (ret <= 0) {
+ printf("failed to snprintf for sql(values), index: %d\r\n ", i);
+ goto failed;
+ }
+
+ offset += ret;
+ }
+
+ build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+
+ ctx[i].nlen = strlen(send_buf[i]);
+ }
+
+ epfd = epoll_create(REQ_CLI_COUNT);
+ if (epfd <= 0) {
+ printf("failed to create epoll\r\n");
+ goto failed;
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = create_socket(ip, port, &ctx[i]);
+ if (ret == -1) {
+ printf("failed to create socket, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ events = EPOLLET | EPOLLIN | EPOLLOUT;
+ ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
+ if (ret == -1) {
+ printf("failed to add sockfd to epoll, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ count = 0;
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
+ if (ret == -1) {
+ if (errno != EINPROGRESS) {
+ printf("connect error, index: %d\r\n", ctx[i].index);
+ (void) del_event(epfd, ctx[i].sockfd);
+ close(ctx[i].sockfd);
+ ctx[i].sockfd = -1;
+ } else {
+ ctx[i].state = connecting;
+ count++;
+ }
+
+ continue;
+ }
+
+ ctx[i].state = connected;
+ count++;
+ }
+
+ printf("clients: %d\r\n", count);
+
+ while (count > 0) {
+ n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
+ if (n == -1) {
+ if (errno != EINTR) {
+ printf("epoll_wait error, reason: %s\r\n", strerror(errno));
+ break;
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (evs[i].events & EPOLLERR) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("event error, index: %d\r\n", pctx->index);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ } else if (evs[i].events & EPOLLIN) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
+ if (nrecv == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ } else if (nrecv == 0) {
+ printf("peer closed connection, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ break;
+ }
+
+ pctx->nrecv += nrecv;
+ if (pctx->nrecv > 12) {
+ if (pctx->error == false && pctx->success == false) {
+ str = recv_buf[pctx->index] + 9;
+ if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
+ printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
+ pctx->error = true;
+ } else {
+ printf("response ok, index: %d\r\n", pctx->index);
+ pctx->success = true;
+ }
+ }
+ }
+ }
+ } else if (evs[i].events & EPOLLOUT) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
+ if (nsent == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to send, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ }
+
+ if (nsent == (int) (pctx->nlen - pctx->nsent)) {
+ printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
+
+ pctx->state = datasent;
+
+ events = EPOLLET | EPOLLIN;
+ (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
+
+ break;
+ } else {
+ pctx->nsent += nsent;
+ }
+ }
+ } else {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+ }
+ }
+ }
+
+failed:
+
+ if (epfd > 0) {
+ close(epfd);
+ }
+
+ close_sockets(ctx, REQ_CLI_COUNT);
+
+ return 0;
+}
diff --git a/tests/http/restful/http_query_tb.c b/tests/http/restful/http_query_tb.c
new file mode 100644
index 0000000000..e7ac0d4b01
--- /dev/null
+++ b/tests/http/restful/http_query_tb.c
@@ -0,0 +1,432 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#define RECV_MAX_LINE 2048
+#define ITEM_MAX_LINE 128
+#define REQ_MAX_LINE 4096
+#define REQ_CLI_COUNT 100
+
+
+typedef enum
+{
+ uninited,
+ connecting,
+ connected,
+ datasent
+} conn_stat;
+
+
+typedef enum
+{
+ false,
+ true
+} bool;
+
+
+typedef struct
+{
+ int sockfd;
+ int index;
+ conn_stat state;
+ size_t nsent;
+ size_t nrecv;
+ size_t nlen;
+ bool error;
+ bool success;
+ struct sockaddr_in serv_addr;
+} socket_ctx;
+
+
+int set_nonblocking(int sockfd)
+{
+ int ret;
+
+ ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
+ if (ret == -1) {
+ printf("failed to fcntl for %d\r\n", sockfd);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx)
+{
+ int ret;
+
+ if (ip == NULL || port == 0 || pctx == NULL) {
+ printf("invalid parameter\r\n");
+ return -1;
+ }
+
+ pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (pctx->sockfd == -1) {
+ printf("failed to create socket\r\n");
+ return -1;
+ }
+
+ bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
+
+ pctx->serv_addr.sin_family = AF_INET;
+ pctx->serv_addr.sin_port = htons(port);
+
+ ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
+ if (ret <= 0) {
+ printf("inet_pton error, ip: %s\r\n", ip);
+ return -1;
+ }
+
+ ret = set_nonblocking(pctx->sockfd);
+ if (ret == -1) {
+ printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
+ return -1;
+ }
+
+ return pctx->sockfd;
+}
+
+
+void close_sockets(socket_ctx *pctx, int cnt)
+{
+ int i;
+
+ if (pctx == NULL) {
+ return;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (pctx[i].sockfd > 0) {
+ close(pctx[i].sockfd);
+ pctx[i].sockfd = -1;
+ }
+ }
+}
+
+
+int proc_pending_error(socket_ctx *ctx)
+{
+ int ret;
+ int err;
+ socklen_t len;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+
+ err = 0;
+ len = sizeof(int);
+
+ ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
+ if (ret == -1) {
+ err = errno;
+ }
+
+ if (err) {
+ printf("failed to connect at index: %d\r\n", ctx->index);
+
+ close(ctx->sockfd);
+ ctx->sockfd = -1;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len)
+{
+ char req_line[ITEM_MAX_LINE];
+ char req_host[ITEM_MAX_LINE];
+ char req_cont_type[ITEM_MAX_LINE];
+ char req_cont_len[ITEM_MAX_LINE];
+ const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
+
+ if (ip == NULL || port == 0 ||
+ url == NULL || url[0] == '\0' ||
+ sql == NULL || sql[0] == '\0' ||
+ req_buf == NULL || len <= 0)
+ {
+ return;
+ }
+
+ snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
+ snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
+ snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
+ snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
+
+ snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
+}
+
+
+int add_event(int epfd, int sockfd, uint32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
+}
+
+
+int mod_event(int epfd, int sockfd, uint32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
+}
+
+
+int del_event(int epfd, int sockfd)
+{
+ struct epoll_event evs_op;
+
+ evs_op.events = 0;
+ evs_op.data.ptr = NULL;
+
+ return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
+}
+
+
+int main()
+{
+ int i;
+ int ret, n, nsent, nrecv;
+ int epfd;
+ uint32_t events;
+ char *str;
+ socket_ctx *pctx, ctx[REQ_CLI_COUNT];
+ char *ip = "127.0.0.1";
+ char *url_prefix = "/rest/sql";
+ char url[ITEM_MAX_LINE];
+ uint16_t port = 6041;
+ struct epoll_event evs[REQ_CLI_COUNT];
+ char sql[REQ_MAX_LINE];
+ char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
+ char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
+ int count;
+
+ signal(SIGPIPE, SIG_IGN);
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ctx[i].sockfd = -1;
+ ctx[i].index = i;
+ ctx[i].state = uninited;
+ ctx[i].nsent = 0;
+ ctx[i].nrecv = 0;
+ ctx[i].error = false;
+ ctx[i].success = false;
+
+ memset(url, 0, ITEM_MAX_LINE);
+ memset(sql, 0, REQ_MAX_LINE);
+ memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+ memset(recv_buf[i], 0, RECV_MAX_LINE);
+
+ snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
+
+ snprintf(sql, REQ_MAX_LINE, "select count(*) from tb%d", i);
+
+ build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+
+ ctx[i].nlen = strlen(send_buf[i]);
+ }
+
+ epfd = epoll_create(REQ_CLI_COUNT);
+ if (epfd <= 0) {
+ printf("failed to create epoll\r\n");
+ goto failed;
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = create_socket(ip, port, &ctx[i]);
+ if (ret == -1) {
+ printf("failed to create socket, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ events = EPOLLET | EPOLLIN | EPOLLOUT;
+ ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
+ if (ret == -1) {
+ printf("failed to add sockfd to epoll, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ count = 0;
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
+ if (ret == -1) {
+ if (errno != EINPROGRESS) {
+ printf("connect error, index: %d\r\n", ctx[i].index);
+ (void) del_event(epfd, ctx[i].sockfd);
+ close(ctx[i].sockfd);
+ ctx[i].sockfd = -1;
+ } else {
+ ctx[i].state = connecting;
+ count++;
+ }
+
+ continue;
+ }
+
+ ctx[i].state = connected;
+ count++;
+ }
+
+ printf("clients: %d\r\n", count);
+
+ while (count > 0) {
+ n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 2);
+ if (n == -1) {
+ if (errno != EINTR) {
+ printf("epoll_wait error, reason: %s\r\n", strerror(errno));
+ break;
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (evs[i].events & EPOLLERR) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("event error, index: %d\r\n", pctx->index);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ } else if (evs[i].events & EPOLLIN) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
+ if (nrecv == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ } else if (nrecv == 0) {
+ printf("peer closed connection, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ break;
+ }
+
+ pctx->nrecv += nrecv;
+ if (pctx->nrecv > 12) {
+ if (pctx->error == false && pctx->success == false) {
+ str = recv_buf[pctx->index] + 9;
+ if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
+ printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
+ pctx->error = true;
+ } else {
+ printf("response ok, index: %d\r\n", pctx->index);
+ pctx->success = true;
+ }
+ }
+ }
+ }
+ } else if (evs[i].events & EPOLLOUT) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
+ if (nsent == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to send, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ }
+
+ if (nsent == (int) (pctx->nlen - pctx->nsent)) {
+ printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
+
+ pctx->state = datasent;
+
+ events = EPOLLET | EPOLLIN;
+ (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
+
+ break;
+ } else {
+ pctx->nsent += nsent;
+ }
+ }
+ } else {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+ }
+ }
+ }
+
+failed:
+
+ if (epfd > 0) {
+ close(epfd);
+ }
+
+ close_sockets(ctx, REQ_CLI_COUNT);
+
+ return 0;
+}
diff --git a/tests/http/restful/http_use_db.c b/tests/http/restful/http_use_db.c
new file mode 100644
index 0000000000..3b27022470
--- /dev/null
+++ b/tests/http/restful/http_use_db.c
@@ -0,0 +1,430 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#define RECV_MAX_LINE 2048
+#define ITEM_MAX_LINE 128
+#define REQ_MAX_LINE 2048
+#define REQ_CLI_COUNT 100
+
+
+typedef enum
+{
+ uninited,
+ connecting,
+ connected,
+ datasent
+} conn_stat;
+
+
+typedef enum
+{
+ false,
+ true
+} bool;
+
+
+typedef unsigned short u16_t;
+typedef unsigned int u32_t;
+
+
+typedef struct
+{
+ int sockfd;
+ int index;
+ conn_stat state;
+ size_t nsent;
+ size_t nrecv;
+ size_t nlen;
+ bool error;
+ bool success;
+ struct sockaddr_in serv_addr;
+} socket_ctx;
+
+
+int set_nonblocking(int sockfd)
+{
+ int ret;
+
+ ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
+ if (ret == -1) {
+ printf("failed to fcntl for %d\r\n", sockfd);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
+{
+ int ret;
+
+ if (ip == NULL || port == 0 || pctx == NULL) {
+ printf("invalid parameter\r\n");
+ return -1;
+ }
+
+ pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (pctx->sockfd == -1) {
+ printf("failed to create socket\r\n");
+ return -1;
+ }
+
+ bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
+
+ pctx->serv_addr.sin_family = AF_INET;
+ pctx->serv_addr.sin_port = htons(port);
+
+ ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
+ if (ret <= 0) {
+ printf("inet_pton error, ip: %s\r\n", ip);
+ return -1;
+ }
+
+ ret = set_nonblocking(pctx->sockfd);
+ if (ret == -1) {
+ printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
+ return -1;
+ }
+
+ return pctx->sockfd;
+}
+
+
+void close_sockets(socket_ctx *pctx, int cnt)
+{
+ int i;
+
+ if (pctx == NULL) {
+ return;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (pctx[i].sockfd > 0) {
+ close(pctx[i].sockfd);
+ pctx[i].sockfd = -1;
+ }
+ }
+}
+
+
+int proc_pending_error(socket_ctx *ctx)
+{
+ int ret;
+ int err;
+ socklen_t len;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+
+ err = 0;
+ len = sizeof(int);
+
+ ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
+ if (ret == -1) {
+ err = errno;
+ }
+
+ if (err) {
+ printf("failed to connect at index: %d\r\n", ctx->index);
+
+ close(ctx->sockfd);
+ ctx->sockfd = -1;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
+{
+ char req_line[ITEM_MAX_LINE];
+ char req_host[ITEM_MAX_LINE];
+ char req_cont_type[ITEM_MAX_LINE];
+ char req_cont_len[ITEM_MAX_LINE];
+ const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
+
+ if (ip == NULL || port == 0 ||
+ url == NULL || url[0] == '\0' ||
+ sql == NULL || sql[0] == '\0' ||
+ req_buf == NULL || len <= 0)
+ {
+ return;
+ }
+
+ snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
+ snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
+ snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
+ snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
+
+ snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
+}
+
+
+int add_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
+}
+
+
+int mod_event(int epfd, int sockfd, u32_t events, void *data)
+{
+ struct epoll_event evs_op;
+
+ evs_op.data.ptr = data;
+ evs_op.events = events;
+
+ return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
+}
+
+
+int del_event(int epfd, int sockfd)
+{
+ struct epoll_event evs_op;
+
+ evs_op.events = 0;
+ evs_op.data.ptr = NULL;
+
+ return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
+}
+
+
+int main()
+{
+ int i;
+ int ret, n, nsent, nrecv;
+ int epfd;
+ u32_t events;
+ char *str;
+ socket_ctx *pctx, ctx[REQ_CLI_COUNT];
+ char *ip = "127.0.0.1";
+ char *url = "/rest/sql";
+ u16_t port = 6041;
+ struct epoll_event evs[REQ_CLI_COUNT];
+ char sql[REQ_MAX_LINE];
+ char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
+ char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
+ int count;
+
+ signal(SIGPIPE, SIG_IGN);
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ctx[i].sockfd = -1;
+ ctx[i].index = i;
+ ctx[i].state = uninited;
+ ctx[i].nsent = 0;
+ ctx[i].nrecv = 0;
+ ctx[i].error = false;
+ ctx[i].success = false;
+
+ memset(sql, 0, REQ_MAX_LINE);
+ memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+ memset(recv_buf[i], 0, RECV_MAX_LINE);
+
+ snprintf(sql, REQ_MAX_LINE, "use db%d", i);
+
+ build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
+
+ ctx[i].nlen = strlen(send_buf[i]);
+ }
+
+ epfd = epoll_create(REQ_CLI_COUNT);
+ if (epfd <= 0) {
+ printf("failed to create epoll\r\n");
+ goto failed;
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = create_socket(ip, port, &ctx[i]);
+ if (ret == -1) {
+ printf("failed to create socket, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ events = EPOLLET | EPOLLIN | EPOLLOUT;
+ ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
+ if (ret == -1) {
+ printf("failed to add sockfd to epoll, index: %d\r\n", i);
+ goto failed;
+ }
+ }
+
+ count = 0;
+
+ for (i = 0; i < REQ_CLI_COUNT; i++) {
+ ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
+ if (ret == -1) {
+ if (errno != EINPROGRESS) {
+ printf("connect error, index: %d\r\n", ctx[i].index);
+ (void) del_event(epfd, ctx[i].sockfd);
+ close(ctx[i].sockfd);
+ ctx[i].sockfd = -1;
+ } else {
+ ctx[i].state = connecting;
+ count++;
+ }
+
+ continue;
+ }
+
+ ctx[i].state = connected;
+ count++;
+ }
+
+ printf("clients: %d\r\n", count);
+
+ while (count > 0) {
+ n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 2);
+ if (n == -1) {
+ if (errno != EINTR) {
+ printf("epoll_wait error, reason: %s\r\n", strerror(errno));
+ break;
+ }
+ } else {
+ for (i = 0; i < n; i++) {
+ if (evs[i].events & EPOLLERR) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("event error, index: %d\r\n", pctx->index);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ } else if (evs[i].events & EPOLLIN) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
+ if (nrecv == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ } else if (nrecv == 0) {
+ printf("peer closed connection, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ break;
+ }
+
+ pctx->nrecv += nrecv;
+ if (pctx->nrecv > 12) {
+ if (pctx->error == false && pctx->success == false) {
+ str = recv_buf[pctx->index] + 9;
+ if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
+ printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
+ pctx->error = true;
+ } else {
+ printf("response ok, index: %d\r\n", pctx->index);
+ pctx->success = true;
+ }
+ }
+ }
+ }
+ } else if (evs[i].events & EPOLLOUT) {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ if (pctx->state == connecting) {
+ ret = proc_pending_error(pctx);
+ if (ret == 0) {
+ printf("client connected, index: %d\r\n", pctx->index);
+ pctx->state = connected;
+ } else {
+ printf("client connect failed, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+
+ continue;
+ }
+ }
+
+ for ( ;; ) {
+ nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
+ if (nsent == -1) {
+ if (errno != EAGAIN && errno != EINTR) {
+ printf("failed to send, index: %d\r\n", pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+
+ break;
+ }
+
+ if (nsent == (int) (pctx->nlen - pctx->nsent)) {
+ printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
+
+ pctx->state = datasent;
+
+ events = EPOLLET | EPOLLIN;
+ (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
+
+ break;
+ } else {
+ pctx->nsent += nsent;
+ }
+ }
+ } else {
+ pctx = (socket_ctx *) evs[i].data.ptr;
+ printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
+ (void) del_event(epfd, pctx->sockfd);
+ close(pctx->sockfd);
+ pctx->sockfd = -1;
+ count--;
+ }
+ }
+ }
+ }
+
+failed:
+
+ if (epfd > 0) {
+ close(epfd);
+ }
+
+ close_sockets(ctx, REQ_CLI_COUNT);
+
+ return 0;
+}
diff --git a/tests/pytest/alter/alter_table.py b/tests/pytest/alter/alter_table.py
index a5acb7a73e..33e0aec727 100644
--- a/tests/pytest/alter/alter_table.py
+++ b/tests/pytest/alter/alter_table.py
@@ -102,6 +102,20 @@ class TDTestCase:
print("check2: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3))
+ def alter_table_255_times(self): # add case for TD-6207
+ for i in range(255):
+ tdLog.info("alter table st add column cb%d int"%i)
+ tdSql.execute("alter table st add column cb%d int"%i)
+ tdSql.execute("insert into t0 (ts,c1) values(now,1)")
+ tdSql.execute("reset query cache")
+ tdSql.query("select * from st")
+ tdSql.execute("create table mt(ts timestamp, i int)")
+ tdSql.execute("insert into mt values(now,11)")
+ tdSql.query("select * from mt")
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+ tdSql.query("describe db.st")
+
def run(self):
# Setup params
db = "db"
@@ -131,12 +145,14 @@ class TDTestCase:
tdSql.checkData(0, i, self.rowNum * (size - i))
- tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float)")
- tdSql.execute("create table t0 using st tags(null)")
+ tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float,t2 int,t3 double)")
+ tdSql.execute("create table t0 using st tags(null,1,2.3)")
tdSql.execute("alter table t0 set tag t1=2.1")
tdSql.query("show tables")
tdSql.checkRows(2)
+ self.alter_table_255_times()
+
def stop(self):
tdSql.close()
diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py
index 333c2a0a57..7af38c3b56 100644
--- a/tests/pytest/concurrent_inquiry.py
+++ b/tests/pytest/concurrent_inquiry.py
@@ -175,12 +175,62 @@ class ConcurrentInquiry:
def con_group(self,tlist,col_list,tag_list):
rand_tag = random.randint(0,5)
rand_col = random.randint(0,1)
- return 'group by '+','.join(random.sample(col_list,rand_col) + random.sample(tag_list,rand_tag))
-
+ if len(tag_list):
+ return 'group by '+','.join(random.sample(col_list,rand_col) + random.sample(tag_list,rand_tag))
+ else:
+ return 'group by '+','.join(random.sample(col_list,rand_col))
+
def con_order(self,tlist,col_list,tag_list):
return 'order by '+random.choice(tlist)
- def gen_query_sql(self): #生成查询语句
+ def gen_subquery_sql(self):
+ subsql ,col_num = self.gen_query_sql(1)
+ if col_num == 0:
+ return 0
+ col_list=[]
+ tag_list=[]
+ for i in range(col_num):
+ col_list.append("taosd%d"%i)
+
+ tlist=col_list+['abc'] #增加不存在的域'abc',是否会引起新bug
+ con_rand=random.randint(0,len(condition_list))
+ func_rand=random.randint(0,len(func_list))
+ col_rand=random.randint(0,len(col_list))
+ t_rand=random.randint(0,len(tlist))
+ sql='select ' #select
+ random.shuffle(col_list)
+ random.shuffle(func_list)
+ sel_col_list=[]
+ col_rand=random.randint(0,len(col_list))
+ loop = 0
+ for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数
+ alias = ' as '+ 'sub%d ' % loop
+ loop += 1
+ pick_func = ''
+ if j == 'leastsquares':
+ pick_func=j+'('+i+',1,1)'
+ elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile':
+ pick_func=j+'('+i+',1)'
+ else:
+ pick_func=j+'('+i+')'
+ if bool(random.getrandbits(1)) :
+ pick_func+=alias
+ sel_col_list.append(pick_func)
+ if col_rand == 0:
+ sql = sql + '*'
+ else:
+ sql=sql+','.join(sel_col_list) #select col & func
+ sql = sql + ' from ('+ subsql +') '
+ con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill]
+ sel_con=random.sample(con_func,random.randint(0,len(con_func)))
+ sel_con_list=[]
+ for i in sel_con:
+ sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数
+ sql+=' '.join(sel_con_list) # condition
+ #print(sql)
+ return sql
+
+ def gen_query_sql(self,subquery=0): #生成查询语句
tbi=random.randint(0,len(self.subtb_list)+len(self.stb_list)) #随机决定查询哪张表
tbname=''
col_list=[]
@@ -218,10 +268,10 @@ class ConcurrentInquiry:
pick_func=j+'('+i+',1)'
else:
pick_func=j+'('+i+')'
- if bool(random.getrandbits(1)):
+ if bool(random.getrandbits(1)) | subquery :
pick_func+=alias
sel_col_list.append(pick_func)
- if col_rand == 0:
+ if col_rand == 0 & subquery :
sql = sql + '*'
else:
sql=sql+','.join(sel_col_list) #select col & func
@@ -238,7 +288,7 @@ class ConcurrentInquiry:
sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数
sql+=' '.join(sel_con_list) # condition
#print(sql)
- return sql
+ return (sql,loop)
def gen_query_join(self): #生成join查询语句
tbname = []
@@ -429,9 +479,12 @@ class ConcurrentInquiry:
try:
if self.random_pick():
- sql=self.gen_query_sql()
+ if self.random_pick():
+ sql,temp=self.gen_query_sql()
+ else:
+ sql = self.gen_subquery_sql()
else:
- sql=self.gen_query_join()
+ sql = self.gen_query_join()
print("sql is ",sql)
fo.write(sql+'\n')
start = time.time()
@@ -496,9 +549,12 @@ class ConcurrentInquiry:
while loop:
try:
if self.random_pick():
- sql=self.gen_query_sql()
+ if self.random_pick():
+ sql,temp=self.gen_query_sql()
+ else:
+ sql = self.gen_subquery_sql()
else:
- sql=self.gen_query_join()
+ sql = self.gen_query_join()
print("sql is ",sql)
fo.write(sql+'\n')
start = time.time()
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index 1d7276b898..42cd5d8055 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -80,6 +80,7 @@ python3 ./test.py -f tag_lite/set.py
python3 ./test.py -f tag_lite/smallint.py
python3 ./test.py -f tag_lite/tinyint.py
python3 ./test.py -f tag_lite/timestamp.py
+python3 ./test.py -f tag_lite/TestModifyTag.py
#python3 ./test.py -f dbmgmt/database-name-boundary.py
python3 test.py -f dbmgmt/nanoSecondCheck.py
@@ -381,7 +382,9 @@ python3 ./test.py -f query/querySession.py
python3 test.py -f alter/alter_create_exception.py
python3 ./test.py -f insert/flushwhiledrop.py
python3 ./test.py -f insert/schemalessInsert.py
-python3 ./test.py -f alter/alterColMultiTimes.py
+python3 ./test.py -f alter/alterColMultiTimes.py
+python3 ./test.py -f query/queryWildcardLength.py
+python3 ./test.py -f query/queryTbnameUpperLower.py
#======================p4-end===============
diff --git a/tests/pytest/functions/function_interp.py b/tests/pytest/functions/function_interp.py
index 810c90279c..41215f15eb 100644
--- a/tests/pytest/functions/function_interp.py
+++ b/tests/pytest/functions/function_interp.py
@@ -26,18 +26,70 @@ class TDTestCase:
self.rowNum = 10
self.ts = 1537146000000
-
+
def run(self):
tdSql.prepare()
- tdSql.execute("create table t(ts timestamp, k int)")
- tdSql.execute("insert into t values('2021-1-1 1:1:1', 12);")
-
- tdSql.query("select interp(*) from t where ts='2021-1-1 1:1:1'")
- tdSql.checkRows(1)
- tdSql.checkData(0, 1, 12)
+ tdSql.execute("create table ap1 (ts timestamp, pav float)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.119', 2.90799)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.317', 3.07399)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.517', 0.58117)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.717', 0.16150)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.918', 1.47885)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:56.569', 1.76472)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.381', 2.13722)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.574', 4.10256)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.776', 3.55345)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.976', 1.46624)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.187', 0.17943)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.372', 2.04101)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.573', 3.20924)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.768', 1.71807)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.964', 4.60900)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.155', 4.33907)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.359', 0.76940)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.553', 0.06458)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.742', 4.59857)")
+ tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.938', 1.55081)")
+
+ tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV)")
+ tdSql.checkRows(0)
+ tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT)")
+ tdSql.checkRows(0)
+ tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR)")
+ tdSql.checkRows(0)
+ tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)")
+ tdSql.checkRows(6)
+ tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT)")
+ tdSql.checkRows(6)
+ tdSql.checkData(0,1,2.90799)
+ tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV)")
+ tdSql.checkRows(7)
+ tdSql.checkData(1,1,1.47885)
+ tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)")
+ tdSql.checkRows(7)
+
+ # check desc order
+ tdSql.error("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV) order by ts desc")
+ tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT) order by ts desc")
+ tdSql.checkRows(0)
+ tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR) order by ts desc")
+ tdSql.checkRows(0)
+ tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc")
+ tdSql.checkRows(6)
+ tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT) order by ts desc")
+ tdSql.checkRows(6)
+ tdSql.checkData(0,1,4.60900)
+ tdSql.error("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV) order by ts desc")
+ tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc")
+ tdSql.checkRows(7)
+
+ # check exception
+ tdSql.error("select interp(*) from ap1")
+ tdSql.error("select interp(*) from ap1 FILL(NEXT)")
+ tdSql.error("select interp(*) from ap1 ts >= '2021-07-25 02:19:54' FILL(NEXT)")
+ tdSql.error("select interp(*) from ap1 ts <= '2021-07-25 02:19:54' FILL(NEXT)")
+ tdSql.error("select interp(*) from ap1 where ts >'2021-07-25 02:19:59.938' and ts < now interval(1s) fill(next)")
- tdSql.error("select interp(*) from t where ts >'2021-1-1 1:1:1' and ts < now interval(1s) fill(next)")
-
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py
index b7480fdbd5..f320db43af 100644
--- a/tests/pytest/functions/queryTestCases.py
+++ b/tests/pytest/functions/queryTestCases.py
@@ -13,6 +13,8 @@
import sys
import subprocess
+import random
+import math
from util.log import *
from util.cases import *
@@ -56,7 +58,7 @@ class TDTestCase:
def td3690(self):
tdLog.printNoPrefix("==========TD-3690==========")
tdSql.query("show variables")
- tdSql.checkData(51, 1, 864000)
+ tdSql.checkData(53, 1, 864000)
def td4082(self):
tdLog.printNoPrefix("==========TD-4082==========")
@@ -106,6 +108,9 @@ class TDTestCase:
tdSql.execute("drop database if exists db1")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("create database if not exists db1 keep 3650")
+ tdSql.execute("create database if not exists new keep 3650")
+ tdSql.execute("create database if not exists private keep 3650")
+ tdSql.execute("create database if not exists db2 keep 3650")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)")
@@ -122,6 +127,14 @@ class TDTestCase:
# p1 不进入指定数据库
tdSql.query("show create database db")
tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db2")
+ tdSql.checkRows(1)
+ tdSql.query("show create database new")
+ tdSql.checkRows(1)
+ tdSql.query("show create database private")
+ tdSql.checkRows(1)
tdSql.error("show create database ")
tdSql.error("show create databases db ")
tdSql.error("show create database db.stb1")
@@ -255,7 +268,7 @@ class TDTestCase:
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db")
tdSql.query("show variables")
- tdSql.checkData(36, 1, 3650)
+ tdSql.checkData(38, 1, 3650)
tdSql.query("show databases")
tdSql.checkData(0,7,"3650,3650,3650")
@@ -283,7 +296,7 @@ class TDTestCase:
tdSql.query("show databases")
tdSql.checkData(0, 7, "3650,3650,3650")
tdSql.query("show variables")
- tdSql.checkData(36, 1, 3650)
+ tdSql.checkData(38, 1, 3650)
tdSql.execute("alter database db1 keep 365")
tdSql.execute("drop database if exists db1")
@@ -340,17 +353,552 @@ class TDTestCase:
pass
+ def td4889(self):
+ tdLog.printNoPrefix("==========TD-4889==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+
+ for i in range(1000):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ for j in range(100):
+ tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})")
+
+ tdSql.query("show vgroups")
+ index = tdSql.getData(0,0)
+ tdSql.checkData(0, 6, 0)
+ tdSql.execute(f"compact vnodes in({index})")
+ for i in range(3):
+ tdSql.query("show vgroups")
+ if tdSql.getData(0, 6) == 1:
+ tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1")
+ break
+ if i == 3:
+ tdLog.exit("compacting not occured")
+ time.sleep(0.5)
+
+ pass
+
+ def td5168insert(self):
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)")
+ tdSql.execute("create table db.t1 using db.stb1 tags(1)")
+
+ for i in range(5):
+ c1 = 1001.11 + i*0.1
+ c2 = 1001.11 + i*0.1 + 1*0.01
+ c3 = 1001.11 + i*0.1 + 2*0.01
+ c4 = 1001.11 + i*0.1 + 3*0.01
+ tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})")
+
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)")
+
+ # for i in range(1000000):
+ for i in range(1000000):
+ random1 = random.uniform(1000,1001)
+ random2 = random.uniform(1000,1001)
+ random3 = random.uniform(1000,1001)
+ random4 = random.uniform(1000,1001)
+ tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})")
+
+ pass
+
+ def td5168(self):
+ tdLog.printNoPrefix("==========TD-5168==========")
+ # 插入小范围内的随机数
+ tdLog.printNoPrefix("=====step0: 默认情况下插入数据========")
+ self.td5168insert()
+
+ # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # tdSql.query("select * from db.t1 limit 100,1")
+ # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000,1")
+ # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 10000,1")
+ # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 100000,1")
+ # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000000,1")
+ # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+
+ # 关闭服务并获取未开启压缩情况下的数据容量
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+
+ cfgdir = self.getCfgDir()
+ cfgfile = self.getCfgFile()
+
+ lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'"
+ data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'"
+ dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}")
+
+ ###################################################
+ float_lossy = "float"
+ double_lossy = "double"
+ float_double_lossy = "float|double"
+ no_loosy = ""
+
+ double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}"
+ _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8")
+
+ lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} "
+ lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} "
+ lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} "
+ lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} "
+
+ ###################################################
+
+ # 开启有损压缩,参数float,并启动服务插入数据
+ tdLog.printNoPrefix("=====step1: lossyColumns设置为float========")
+ lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为float情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数double,并启动服务
+ tdLog.printNoPrefix("=====step2: lossyColumns设置为double========")
+ lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为double情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数 float&&double ,并启动服务
+ tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========")
+ lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}")
+
+ if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) :
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.exit("压缩未生效")
+ else:
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.printNoPrefix("压缩生效")
+
+ pass
+
+ def td5433(self):
+ tdLog.printNoPrefix("==========TD-5433==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))")
+ numtab=2000000
+ for i in range(numtab):
+ sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})")
+
+ tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')")
+ tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')")
+ tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')")
+ tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')")
+ tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')")
+
+ tdSql.query("select distinct t1 from stb1 where t1 != '150'")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 != 150")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 = 150")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1 where t1 = '150'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(numtab)
+
+ tdSql.query("select distinct t0 from stb1 where t0 != '2'")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 != 2")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1")
+ tdSql.checkRows(128)
+
+ tdSql.query("select distinct t1 from stb2 where t1 != '200'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 != 200")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 = 200")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2 where t1 = '200'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2")
+ tdSql.checkRows(5)
+
+ tdSql.query("select distinct t0 from stb2 where t0 != '2'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 != 2")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2")
+ tdSql.checkRows(5)
+
+ pass
+
+ def td5798(self):
+ tdLog.printNoPrefix("==========TD-5798 + TD-5810==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)")
+ maxRemainderNum=7
+ tbnum=101
+ for i in range(tbnum-1):
+ sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})")
+ tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)")
+
+ tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})")
+ tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')")
+ tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)")
+ tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)")
+ tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)")
+ tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t100num (ts )values (now-7d)")
+ tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)")
+
+ #========== TD-5810 suport distinct multi-data-coloumn ==========
+ tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c2 from stb1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}")
+ tdSql.checkRows(2)
+
+ tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c2 from t1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c1 from t1 ")
+ tdSql.checkRows(2)
+ tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2")
+ tdSql.checkRows(2)
+
+ tdSql.error("select distinct c5 from stb1")
+ tdSql.error("select distinct c5 from t1")
+ tdSql.error("select distinct c1 from db.*")
+ tdSql.error("select c2, distinct c1 from stb1")
+ tdSql.error("select c2, distinct c1 from t1")
+ tdSql.error("select distinct c2 from ")
+ tdSql.error("distinct c2 from stb1")
+ tdSql.error("distinct c2 from t1")
+ tdSql.error("select distinct c1, c2, c3 from stb1")
+ tdSql.error("select distinct c1, c2, c3 from t1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1")
+ tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1")
+ tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.query("select distinct c1, c2 from stb1 order by ts")
+ tdSql.checkRows(tbnum*3+1)
+ tdSql.query("select distinct c1, c2 from t1 order by ts")
+ tdSql.checkRows(4)
+ tdSql.error("select distinct c1, ts from stb1 group by c2")
+ tdSql.error("select distinct c1, ts from t1 group by c2")
+ tdSql.error("select distinct c1, max(c2) from stb1 ")
+ tdSql.error("select distinct c1, max(c2) from t1 ")
+ tdSql.error("select max(c2), distinct c1 from stb1 ")
+ tdSql.error("select max(c2), distinct c1 from t1 ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1")
+ tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ")
+ tdSql.checkRows(6)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)")
+ tdSql.checkRows(15)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)")
+ tdSql.checkRows(3)
+
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ")
+ tdSql.checkRows(4)
+ tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)")
+ # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)")
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ")
+
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)")
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)")
+
+
+
+ #========== TD-5798 suport distinct multi-tags-coloumn ==========
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0, t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0 t1, t1 t2 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t0, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from t1")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from t100num")
+ tdSql.checkRows(1)
+
+ tdSql.query("select distinct t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t4, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2, t3, t4 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2 t1, t3 t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t3, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from t01")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t3, t4 from t0100num")
+ tdSql.checkRows(1)
+
+
+ ########## should be error #########
+ tdSql.error("select distinct from stb1")
+ tdSql.error("select distinct t3 from stb1")
+ tdSql.error("select distinct t1 from db.*")
+ tdSql.error("select distinct t2 from ")
+ tdSql.error("distinct t2 from stb1")
+ tdSql.error("select distinct stb1")
+ tdSql.error("select distinct t0, t1, t2, t3 from stb1")
+ tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1")
+
+ tdSql.error("select dist t0 from stb1")
+ tdSql.error("select distinct stb2.t2, stb2.t3 from stb1")
+ tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1")
+
+ tdSql.error("select distinct t0, t1 from t1 where t0 < 7")
+
+ ########## add where condition ##########
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3")
+ tdSql.checkRows(3)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2")
+ tdSql.checkRows(2)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct t0, t1 from stb1 where c1 > 2")
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4")
+ tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+
+ tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2")
+ tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2")
+ tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)")
+ tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)")
+ tdSql.checkRows(5)
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ")
+ tdSql.checkRows(4)
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)")
+ tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3")
+ tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3")
+ tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ")
+
+ pass
+
+ def td5935(self):
+ tdLog.printNoPrefix("==========TD-5935==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)")
+ nowtime=int(round((time.time()*1000)))
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})"
+ tdSql.execute(sql)
+ for j in range(1000):
+ tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})")
+ tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ")
+
+ ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ##########
+ stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10"
+ tdSql.query(stddevAndIntervalSql)
+ tdSql.checkRows(10)
+
+ ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ##########
+ fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10"
+ tdSql.query(fillsql)
+ fillResult=False
+ if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None):
+ fillResult=True
+ if fillResult:
+ tdLog.success(f"sql is :{fillsql}, fill(next) is correct")
+ else:
+ tdLog.exit("fill(next) is wrong")
+
+ pass
+
def run(self):
# master branch
# self.td3690()
# self.td4082()
# self.td4288()
- self.td4724()
+ # self.td4724()
+ self.td5798()
+ # self.td5935()
# develop branch
# self.td4097()
-
+ # self.td4889()
+ # self.td5168()
+ # self.td5433()
def stop(self):
tdSql.close()
diff --git a/tests/pytest/insert/insertFromCSVPerformance.py b/tests/pytest/insert/insertFromCSVPerformance.py
index f3b9c2734d..487497631a 100644
--- a/tests/pytest/insert/insertFromCSVPerformance.py
+++ b/tests/pytest/insert/insertFromCSVPerformance.py
@@ -28,7 +28,7 @@ class insertFromCSVPerformace:
self.tbName = tbName
self.branchName = branchName
self.type = buildType
- self.ts = 1500074556514
+ self.ts = 1500000000000
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
@@ -46,13 +46,20 @@ class insertFromCSVPerformace:
config = self.config)
def writeCSV(self):
- with open('test3.csv','w', encoding='utf-8', newline='') as csvFile:
+ tsset = set()
+ rows = 0
+ with open('test4.csv','w', encoding='utf-8', newline='') as csvFile:
writer = csv.writer(csvFile, dialect='excel')
- for i in range(1000000):
- newTimestamp = self.ts + random.randint(10000000, 10000000000) + random.randint(1000, 10000000) + random.randint(1, 1000)
- d = datetime.datetime.fromtimestamp(newTimestamp / 1000)
- dt = str(d.strftime("%Y-%m-%d %H:%M:%S.%f"))
- writer.writerow(["'%s'" % dt, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)])
+ while True:
+ newTimestamp = self.ts + random.randint(1, 10) * 10000000000 + random.randint(1, 10) * 1000000000 + random.randint(1, 10) * 100000000 + random.randint(1, 10) * 10000000 + random.randint(1, 10) * 1000000 + random.randint(1, 10) * 100000 + random.randint(1, 10) * 10000 + random.randint(1, 10) * 1000 + random.randint(1, 10) * 100 + random.randint(1, 10) * 10 + random.randint(1, 10)
+ if newTimestamp not in tsset:
+ tsset.add(newTimestamp)
+ d = datetime.datetime.fromtimestamp(newTimestamp / 1000)
+ dt = str(d.strftime("%Y-%m-%d %H:%M:%S.%f"))
+ writer.writerow(["'%s'" % dt, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)])
+ rows += 1
+ if rows == 2000000:
+ break
def removCSVHeader(self):
data = pd.read_csv("ordered.csv")
@@ -71,7 +78,9 @@ class insertFromCSVPerformace:
cursor.execute("create table if not exists t1(ts timestamp, c1 int, c2 float, c3 int, c4 int)")
startTime = time.time()
cursor.execute("insert into t1 file 'outoforder.csv'")
- totalTime += time.time() - startTime
+ totalTime += time.time() - startTime
+ time.sleep(1)
+
out_of_order_time = (float) (totalTime / 10)
print("Out of Order - Insert time: %f" % out_of_order_time)
@@ -81,7 +90,8 @@ class insertFromCSVPerformace:
cursor.execute("create table if not exists t2(ts timestamp, c1 int, c2 float, c3 int, c4 int)")
startTime = time.time()
cursor.execute("insert into t2 file 'ordered.csv'")
- totalTime += time.time() - startTime
+ totalTime += time.time() - startTime
+ time.sleep(1)
in_order_time = (float) (totalTime / 10)
print("In order - Insert time: %f" % in_order_time)
diff --git a/tests/pytest/query/nestedQuery/queryWithOrderLimit.py b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py
index 692b5b7d36..aa16e8cc76 100644
--- a/tests/pytest/query/nestedQuery/queryWithOrderLimit.py
+++ b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py
@@ -29,7 +29,6 @@ class TDTestCase:
self.tables = 10
self.rowsPerTable = 100
-
def run(self):
# tdSql.execute("drop database db ")
tdSql.prepare()
diff --git a/tests/pytest/query/queryError.py b/tests/pytest/query/queryError.py
index ac78c0518f..e5c468600b 100644
--- a/tests/pytest/query/queryError.py
+++ b/tests/pytest/query/queryError.py
@@ -65,6 +65,10 @@ class TDTestCase:
# TD-2208
tdSql.error("select diff(tagtype),top(tagtype,1) from dev_001")
+ # TD-6006
+ tdSql.error("select * from dev_001 where 'name' is not null")
+ tdSql.error("select * from dev_001 where \"name\" = 'first'")
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/query/queryPerformance.py b/tests/pytest/query/queryPerformance.py
index 81103252d8..29e5cb19b7 100644
--- a/tests/pytest/query/queryPerformance.py
+++ b/tests/pytest/query/queryPerformance.py
@@ -17,6 +17,7 @@ import os
import taos
import time
import argparse
+import json
class taosdemoQueryPerformace:
@@ -48,7 +49,7 @@ class taosdemoQueryPerformace:
cursor2 = self.conn2.cursor()
cursor2.execute("create database if not exists %s" % self.dbName)
cursor2.execute("use %s" % self.dbName)
- cursor2.execute("create table if not exists %s(ts timestamp, query_time float, commit_id binary(50), branch binary(50), type binary(20)) tags(query_id int, query_sql binary(300))" % self.stbName)
+ cursor2.execute("create table if not exists %s(ts timestamp, query_time_avg float, query_time_max float, query_time_min float, commit_id binary(50), branch binary(50), type binary(20)) tags(query_id int, query_sql binary(300))" % self.stbName)
sql = "select count(*) from test.meters"
tableid = 1
@@ -74,7 +75,7 @@ class taosdemoQueryPerformace:
tableid = 6
cursor2.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
- sql = "select * from meters"
+ sql = "select * from meters limit 10000"
tableid = 7
cursor2.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
@@ -87,37 +88,96 @@ class taosdemoQueryPerformace:
cursor2.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
cursor2.close()
+
+ def generateQueryJson(self):
+
+ sqls = []
+ cursor2 = self.conn2.cursor()
+ cursor2.execute("select query_id, query_sql from %s.%s" % (self.dbName, self.stbName))
+ i = 0
+ for data in cursor2:
+ sql = {
+ "sql": data[1],
+ "result_mode": "onlyformat",
+ "result_file": "./query_sql_res%d.txt" % i
+ }
+ sqls.append(sql)
+ i += 1
+
+ query_data = {
+ "filetype": "query",
+ "cfgdir": "/etc/perf",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "databases": "test",
+ "specified_table_query": {
+ "query_times": 100,
+ "concurrent": 1,
+ "sqls": sqls
+ }
+ }
+
+ query_json_file = f"/tmp/query.json"
+
+ with open(query_json_file, 'w') as f:
+ json.dump(query_data, f)
+ return query_json_file
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosdemo" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def getCMDOutput(self, cmd):
+ cmd = os.popen(cmd)
+ output = cmd.read()
+ cmd.close()
+ return output
def query(self):
- cursor = self.conn.cursor()
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ print("taosdemo not found!")
+ sys.exit(1)
+
+ binPath = buildPath + "/build/bin/"
+ os.system(
+ "%sperfMonitor -f %s > query_res.txt" %
+ (binPath, self.generateQueryJson()))
+
+ cursor = self.conn2.cursor()
print("==================== query performance ====================")
-
cursor.execute("use %s" % self.dbName)
- cursor.execute("select tbname, query_id, query_sql from %s" % self.stbName)
+ cursor.execute("select tbname, query_sql from %s" % self.stbName)
+ i = 0
for data in cursor:
table_name = data[0]
- query_id = data[1]
- sql = data[2]
-
- totalTime = 0
- cursor2 = self.conn.cursor()
- cursor2.execute("use test")
- for i in range(100):
- if(self.clearCache == True):
- # root permission is required
- os.system("echo 3 > /proc/sys/vm/drop_caches")
-
- startTime = time.time()
- cursor2.execute(sql)
- totalTime += time.time() - startTime
- cursor2.close()
- print("query time for: %s %f seconds" % (sql, totalTime / 100))
-
- cursor3 = self.conn2.cursor()
- cursor3.execute("insert into %s.%s values(now, %f, '%s', '%s', '%s')" % (self.dbName, table_name, totalTime / 100, self.commitID, self.branch, self.type))
+ sql = data[1]
- cursor3.close()
+ self.avgDelay = self.getCMDOutput("grep 'avgDelay' query_res.txt | awk 'NR==%d{print $2}'" % (i + 1))
+ self.maxDelay = self.getCMDOutput("grep 'avgDelay' query_res.txt | awk 'NR==%d{print $5}'" % (i + 1))
+ self.minDelay = self.getCMDOutput("grep 'avgDelay' query_res.txt | awk 'NR==%d{print $8}'" % (i + 1))
+ i += 1
+
+ print("query time for: %s %f seconds" % (sql, float(self.avgDelay)))
+ c = self.conn2.cursor()
+ c.execute("insert into %s.%s values(now, %f, %f, %f, '%s', '%s', '%s')" % (self.dbName, table_name, float(self.avgDelay), float(self.maxDelay), float(self.minDelay), self.commitID, self.branch, self.type))
+
+ c.close()
cursor.close()
if __name__ == '__main__':
@@ -174,4 +234,4 @@ if __name__ == '__main__':
args = parser.parse_args()
perftest = taosdemoQueryPerformace(args.remove_cache, args.commit_id, args.database_name, args.stable_name, args.table_perfix, args.git_branch, args.build_type)
perftest.createPerfTables()
- perftest.query()
+ perftest.query()
\ No newline at end of file
diff --git a/tests/pytest/query/queryTbnameUpperLower.py b/tests/pytest/query/queryTbnameUpperLower.py
new file mode 100644
index 0000000000..bd4e85c5ca
--- /dev/null
+++ b/tests/pytest/query/queryTbnameUpperLower.py
@@ -0,0 +1,78 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+from util.common import tdCom
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def checkStbWhereIn(self):
+ '''
+ where in ---> upper lower mixed
+ '''
+ tdCom.cleanTb()
+ table_name = tdCom.getLongName(8, "letters_mixed")
+ table_name_sub = f'{table_name}_sub'
+ tb_name_lower = table_name_sub.lower()
+ tb_name_upper = table_name_sub.upper()
+
+ ## create stb and tb
+ tdSql.execute(f'CREATE TABLE {table_name} (ts timestamp, id int, bi1 binary(20)) tags (si1 binary(20))')
+ tdSql.execute(f'create table {table_name_sub}1 using {table_name} tags ("{table_name_sub}1")')
+ tdSql.execute(f'create table {tb_name_lower}2 using {table_name} tags ("{tb_name_lower}2")')
+ tdSql.execute(f'create table {tb_name_upper}3 using {table_name} tags ("{tb_name_upper}3")')
+
+ ## insert values
+ tdSql.execute(f'insert into {table_name_sub}1 values (now-1s, 1, "{table_name_sub}1")')
+ tdSql.execute(f'insert into {tb_name_lower}2 values (now-2s, 2, "{tb_name_lower}21")')
+ tdSql.execute(f'insert into {tb_name_lower}2 values (now-3s, 3, "{tb_name_lower}22")')
+ tdSql.execute(f'insert into {tb_name_upper}3 values (now-4s, 4, "{tb_name_upper}31")')
+ tdSql.execute(f'insert into {tb_name_upper}3 values (now-5s, 5, "{tb_name_upper}32")')
+ tdSql.execute(f'insert into {tb_name_upper}3 values (now-6s, 6, "{tb_name_upper}33")')
+
+ ## query where tbname in single
+ tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub}1")')
+ tdSql.checkRows(1)
+ tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub.upper()}1")')
+ tdSql.checkRows(1)
+ tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub.lower()}1")')
+ tdSql.checkRows(1)
+ tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_lower}2")')
+ tdSql.checkRows(2)
+ tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_lower.upper()}2")')
+ tdSql.checkRows(2)
+ tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_upper}3")')
+ tdSql.checkRows(3)
+ tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_upper.lower()}3")')
+ tdSql.checkRows(3)
+
+ ## query where tbname in multi
+ tdSql.query(f'select * from {table_name} where id=5 and tbname in ("{table_name_sub}1", "{tb_name_lower.upper()}2", "{tb_name_upper.lower()}3")')
+ tdSql.checkRows(1)
+ tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub}1", "{tb_name_lower.upper()}2", "{tb_name_upper.lower()}3")')
+ tdSql.checkRows(6)
+
+ def run(self):
+ tdSql.prepare()
+ self.checkStbWhereIn()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/query/tbname.py b/tests/pytest/query/tbname.py
index 08416ba3ed..30d90b1f9d 100644
--- a/tests/pytest/query/tbname.py
+++ b/tests/pytest/query/tbname.py
@@ -53,6 +53,9 @@ class TDTestCase:
"select * from cars where id=0 and tbname in ('carzero', 'cartwo')")
tdSql.checkRows(1)
+ tdSql.query("select * from cars where tbname in ('carZero', 'CARONE')")
+ tdSql.checkRows(2)
+
"""
tdSql.query("select * from cars where tbname like 'car%'")
tdSql.checkRows(2)
diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py
index 643886f434..f069bb8f70 100644
--- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py
+++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py
@@ -47,6 +47,7 @@ class TDTestCase:
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
+
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# insert data from a special timestamp
# check stable stb0
@@ -89,6 +90,7 @@ class TDTestCase:
os.system(
"%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " %
binPath)
+
tdSql.execute("use nsdb2")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py
index 1d28a2708f..51b064a08e 100644
--- a/tests/pytest/tools/taosdemoPerformance.py
+++ b/tests/pytest/tools/taosdemoPerformance.py
@@ -49,24 +49,18 @@ class taosdemoPerformace:
def generateJson(self):
db = {
"name": "%s" % self.insertDB,
- "drop": "yes",
- "replica": 1
+ "drop": "yes"
}
stb = {
"name": "meters",
- "child_table_exists": "no",
"childtable_count": self.numOfTables,
"childtable_prefix": "stb_",
- "auto_create_table": "no",
- "data_source": "rand",
"batch_create_tbl_num": 10,
- "insert_mode": "taosc",
+ "insert_mode": "rand",
"insert_rows": self.numOfRows,
- "interlace_rows": 0,
- "max_sql_len": 1024000,
- "disorder_ratio": 0,
- "disorder_range": 1000,
+ "batch_rows": 1000000,
+ "max_sql_len": 1048576,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
@@ -100,11 +94,8 @@ class taosdemoPerformace:
"user": "root",
"password": "taosdata",
"thread_count": 10,
- "thread_count_create_tbl": 10,
+ "thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
- "confirm_parameter_prompt": "no",
- "insert_interval": 0,
- "num_of_records_per_req": 30000,
"databases": [db]
}
@@ -145,7 +136,7 @@ class taosdemoPerformace:
binPath = buildPath + "/build/bin/"
os.system(
- "%staosdemo -f %s > /dev/null 2>&1" %
+ "%sperfMonitor -f %s > /dev/null 2>&1" %
(binPath, self.generateJson()))
self.createTableTime = self.getCMDOutput(
"grep 'Spent' insert_res.txt | awk 'NR==1{print $2}'")
diff --git a/tests/pytest/util/dnodes-default.py b/tests/pytest/util/dnodes-default.py
index 085e083149..8da36f3074 100644
--- a/tests/pytest/util/dnodes-default.py
+++ b/tests/pytest/util/dnodes-default.py
@@ -60,7 +60,7 @@ class TDSimClient:
self.cfgDict.update({option: value})
def cfg(self, option, value):
- cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
+ cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
@@ -320,7 +320,7 @@ class TDDnode:
tdLog.exit(cmd)
def cfg(self, option, value):
- cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
+ cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
diff --git a/tests/pytest/util/dnodes-no-random-fail.py b/tests/pytest/util/dnodes-no-random-fail.py
index 2627575e61..a973f8da52 100644
--- a/tests/pytest/util/dnodes-no-random-fail.py
+++ b/tests/pytest/util/dnodes-no-random-fail.py
@@ -58,7 +58,7 @@ class TDSimClient:
self.cfgDict.update({option: value})
def cfg(self, option, value):
- cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
+ cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
@@ -318,7 +318,7 @@ class TDDnode:
tdLog.exit(cmd)
def cfg(self, option, value):
- cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
+ cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
diff --git a/tests/pytest/util/dnodes-random-fail.py b/tests/pytest/util/dnodes-random-fail.py
index 4f4cdcc0d0..7cadca64a3 100644
--- a/tests/pytest/util/dnodes-random-fail.py
+++ b/tests/pytest/util/dnodes-random-fail.py
@@ -58,7 +58,7 @@ class TDSimClient:
self.cfgDict.update({option: value})
def cfg(self, option, value):
- cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
+ cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
@@ -318,7 +318,7 @@ class TDDnode:
tdLog.exit(cmd)
def cfg(self, option, value):
- cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
+ cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index 0f4919ba96..2abb8f5ee7 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -14,15 +14,16 @@
import sys
import os
import os.path
+import platform
import subprocess
from time import sleep
from util.log import *
class TDSimClient:
- def __init__(self):
+ def __init__(self, path):
self.testCluster = False
-
+ self.path = path
self.cfgDict = {
"numOfLogLines": "100000000",
"numOfThreadsPerCore": "2.0",
@@ -41,10 +42,7 @@ class TDSimClient:
"jnidebugFlag": "135",
"qdebugFlag": "135",
"telemetryReporting": "0",
- }
- def init(self, path):
- self.__init__()
- self.path = path
+ }
def getLogDir(self):
self.logDir = "%s/sim/psim/log" % (self.path)
@@ -61,7 +59,7 @@ class TDSimClient:
self.cfgDict.update({option: value})
def cfg(self, option, value):
- cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
+ cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
@@ -246,7 +244,7 @@ class TDDnode:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
- if ("taosd" in files):
+ if (("taosd") in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
@@ -404,7 +402,7 @@ class TDDnode:
tdLog.exit(cmd)
def cfg(self, option, value):
- cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
+ cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
@@ -480,8 +478,7 @@ class TDDnodes:
for i in range(len(self.dnodes)):
self.dnodes[i].init(self.path)
- self.sim = TDSimClient()
- self.sim.init(self.path)
+ self.sim = TDSimClient(self.path)
def setTestCluster(self, value):
self.testCluster = value
diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim
index 0c93fe919a..556292b21b 100644
--- a/tests/script/general/parser/function.sim
+++ b/tests/script/general/parser/function.sim
@@ -313,6 +313,12 @@ if $rows != 6 then
return -1
endi
+print =============================> TD-6086
+sql create stable td6086st(ts timestamp, d double) tags(t nchar(50));
+sql create table td6086ct1 using td6086st tags("ct1");
+sql create table td6086ct2 using td6086st tags("ct2");
+sql SELECT LAST(d),t FROM td6086st WHERE tbname in ('td6086ct1', 'td6086ct2') and ts>="2019-07-30 00:00:00" and ts<="2021-08-31 00:00:00" interval(1800s) fill(prev) GROUP BY tbname;
+
print ==================> td-2624
sql create table tm2(ts timestamp, k int, b binary(12));
sql insert into tm2 values('2011-01-02 18:42:45.326', -1,'abc');
@@ -1149,9 +1155,11 @@ endi
sql select derivative(test_column_alias_name, 1s, 0) from (select avg(k) test_column_alias_name from t1 interval(1s));
-sql create table smeters (ts timestamp, current float, voltage int);
-sql insert into smeters values ('2021-08-08 10:10:10', 10, 1);
-sql insert into smeters values ('2021-08-08 10:10:12', 10, 2);
+sql create table smeters (ts timestamp, current float, voltage int) tags (t1 int);
+sql create table smeter1 using smeters tags (1);
+sql insert into smeter1 values ('2021-08-08 10:10:10', 10, 2);
+sql insert into smeter1 values ('2021-08-08 10:10:12', 10, 2);
+sql insert into smeter1 values ('2021-08-08 10:10:14', 20, 1);
sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10 interval(1000a);
if $rows != 2 then
@@ -1160,9 +1168,21 @@ endi
if $data00 != @21-08-08 10:10:10.000@ then
return -1
endi
+if $data01 != 0.000000000 then
+ return -1
+endi
if $data10 != @21-08-08 10:10:12.000@ then
return -1
endi
+if $data11 != 0.000000000 then
+ return -1
+endi
-
+sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10;
+if $rows != 1 then
+ return -1
+endi
+if $data00 != 0.000000000 then
+ return -1
+endi
diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim
index 74febff063..f192837bb7 100644
--- a/tests/script/general/parser/interp.sim
+++ b/tests/script/general/parser/interp.sim
@@ -68,7 +68,6 @@ print ================== server restart completed
run general/parser/interp_test.sim
-
print ================= TD-5931
sql create stable st5931(ts timestamp, f int) tags(t int)
sql create table ct5931 using st5931 tags(1)
@@ -76,6 +75,7 @@ sql create table nt5931(ts timestamp, f int)
sql select interp(*) from nt5931 where ts=now
sql select interp(*) from st5931 where ts=now
sql select interp(*) from ct5931 where ts=now
+
if $rows != 0 then
return -1
endi
diff --git a/tests/script/general/parser/interp_test.sim b/tests/script/general/parser/interp_test.sim
index 845afb0173..5a2021dcfc 100644
--- a/tests/script/general/parser/interp_test.sim
+++ b/tests/script/general/parser/interp_test.sim
@@ -930,8 +930,254 @@ if $data44 != @18-11-25 19:06:00.000@ then
endi
+sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear);
+if $rows != 8 then
+ return -1
+endi
+if $data00 != @18-09-17 20:35:00.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @18-09-17 20:36:00.000@ then
+ return -1
+endi
+if $data11 != NULL then
+ return -1
+endi
+if $data20 != @18-09-17 20:37:00.000@ then
+ return -1
+endi
+if $data21 != NULL then
+ return -1
+endi
+if $data30 != @18-09-17 20:38:00.000@ then
+ return -1
+endi
+if $data31 != NULL then
+ return -1
+endi
+if $data40 != @18-09-17 20:39:00.000@ then
+ return -1
+endi
+if $data41 != NULL then
+ return -1
+endi
+if $data50 != @18-09-17 20:40:00.000@ then
+ return -1
+endi
+if $data51 != 0 then
+ return -1
+endi
+if $data60 != @18-09-17 20:41:00.000@ then
+ return -1
+endi
+if $data61 != NULL then
+ return -1
+endi
+if $data70 != @18-09-17 20:42:00.000@ then
+ return -1
+endi
+if $data71 != NULL then
+ return -1
+endi
+sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear) order by ts desc;
+if $rows != 8 then
+ return -1
+endi
+if $data00 != @18-09-17 20:42:00.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @18-09-17 20:41:00.000@ then
+ return -1
+endi
+if $data11 != NULL then
+ return -1
+endi
+if $data20 != @18-09-17 20:40:00.000@ then
+ return -1
+endi
+if $data21 != 0 then
+ return -1
+endi
+if $data30 != @18-09-17 20:39:00.000@ then
+ return -1
+endi
+if $data31 != NULL then
+ return -1
+endi
+if $data40 != @18-09-17 20:38:00.000@ then
+ return -1
+endi
+if $data41 != NULL then
+ return -1
+endi
+if $data50 != @18-09-17 20:37:00.000@ then
+ return -1
+endi
+if $data51 != NULL then
+ return -1
+endi
+if $data60 != @18-09-17 20:36:00.000@ then
+ return -1
+endi
+if $data61 != NULL then
+ return -1
+endi
+if $data70 != @18-09-17 20:35:00.000@ then
+ return -1
+endi
+if $data71 != NULL then
+ return -1
+endi
+
+sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(2m) fill(linear) order by ts;
+if $rows != 9 then
+ return -1
+endi
+if $data00 != @18-09-17 20:34:00.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @18-09-17 20:36:00.000@ then
+ return -1
+endi
+if $data11 != NULL then
+ return -1
+endi
+if $data20 != @18-09-17 20:38:00.000@ then
+ return -1
+endi
+if $data21 != NULL then
+ return -1
+endi
+if $data30 != @18-09-17 20:40:00.000@ then
+ return -1
+endi
+if $data31 != 0.00000 then
+ return -1
+endi
+if $data40 != @18-09-17 20:42:00.000@ then
+ return -1
+endi
+if $data41 != 0.20000 then
+ return -1
+endi
+if $data50 != @18-09-17 20:44:00.000@ then
+ return -1
+endi
+if $data51 != 0.40000 then
+ return -1
+endi
+if $data60 != @18-09-17 20:46:00.000@ then
+ return -1
+endi
+if $data61 != 0.60000 then
+ return -1
+endi
+if $data70 != @18-09-17 20:48:00.000@ then
+ return -1
+endi
+if $data71 != 0.80000 then
+ return -1
+endi
+if $data80 != @18-09-17 20:50:00.000@ then
+ return -1
+endi
+if $data81 != 1.00000 then
+ return -1
+endi
+
+
+sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts;
+if $rows != 6 then
+ return -1
+endi
+if $data00 != @18-09-17 20:33:00.000@ then
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data10 != @18-09-17 20:36:00.000@ then
+ return -1
+endi
+if $data11 != NULL then
+ return -1
+endi
+if $data20 != @18-09-17 20:39:00.000@ then
+ return -1
+endi
+if $data21 != NULL then
+ return -1
+endi
+if $data30 != @18-09-17 20:42:00.000@ then
+ return -1
+endi
+if $data31 != 0.20000 then
+ return -1
+endi
+if $data40 != @18-09-17 20:45:00.000@ then
+ return -1
+endi
+if $data41 != 0.50000 then
+ return -1
+endi
+if $data50 != @18-09-17 20:48:00.000@ then
+ return -1
+endi
+if $data51 != 0.80000 then
+ return -1
+endi
+
+sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts desc;
+if $rows != 6 then
+ return -1
+endi
+if $data00 != @18-09-17 20:48:00.000@ then
+ return -1
+endi
+if $data01 != 0.80000 then
+ return -1
+endi
+if $data10 != @18-09-17 20:45:00.000@ then
+ return -1
+endi
+if $data11 != 0.50000 then
+ return -1
+endi
+if $data20 != @18-09-17 20:42:00.000@ then
+ return -1
+endi
+if $data21 != 0.20000 then
+ return -1
+endi
+if $data30 != @18-09-17 20:39:00.000@ then
+ return -1
+endi
+if $data31 != NULL then
+ return -1
+endi
+if $data40 != @18-09-17 20:36:00.000@ then
+ return -1
+endi
+if $data41 != NULL then
+ return -1
+endi
+if $data50 != @18-09-17 20:33:00.000@ then
+ return -1
+endi
+if $data51 != NULL then
+ return -1
+endi
diff --git a/tests/script/general/parser/limit.sim b/tests/script/general/parser/limit.sim
index 23b85095c5..3af2cb3018 100644
--- a/tests/script/general/parser/limit.sim
+++ b/tests/script/general/parser/limit.sim
@@ -75,4 +75,9 @@ sleep 100
run general/parser/limit_tb.sim
run general/parser/limit_stb.sim
+print ========> TD-6017
+sql use $db
+sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 order by ts desc limit 3 offset 1)
+sql select * from (select ts, top(c1, 5) from $stb where ts >= $ts0 order by ts desc limit 3 offset 1)
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/parser/limit_tb.sim b/tests/script/general/parser/limit_tb.sim
index 0c987d88c9..4a93797d40 100644
--- a/tests/script/general/parser/limit_tb.sim
+++ b/tests/script/general/parser/limit_tb.sim
@@ -355,6 +355,10 @@ sql select top(c1, 1) from $tb where ts >= $ts0 and ts <= $tsu limit 5 offset 1
if $rows != 0 then
return -1
endi
+
+print ========> TD-6017
+sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1)
+
sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
print select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
print $data00 $data01
diff --git a/tests/script/general/parser/tbnameIn_query.sim b/tests/script/general/parser/tbnameIn_query.sim
index 65bb89d549..db27886bbf 100644
--- a/tests/script/general/parser/tbnameIn_query.sim
+++ b/tests/script/general/parser/tbnameIn_query.sim
@@ -101,6 +101,30 @@ if $data11 != 2 then
return -1
endi
+## tbname in can accpet Upper case table name
+sql select count(*) from $stb where tbname in ('ti_tb0', 'TI_tb1', 'TI_TB2') group by t1 order by t1
+if $rows != 3 then
+ return -1
+endi
+if $data00 != 10 then
+ return -1
+endi
+if $data01 != 0 then
+ return -1
+endi
+if $data10 != 10 then
+ return -1
+endi
+if $data11 != 1 then
+ return -1
+endi
+if $data20 != 10 then
+ return -1
+endi
+if $data21 != 2 then
+ return -1
+endi
+
# multiple tbname in is not allowed NOW
sql_error select count(*) from $stb where tbname in ('ti_tb1', 'ti_tb300') and tbname in ('ti_tb5', 'ti_tb1000') group by t1 order by t1 asc
#if $rows != 4 then
diff --git a/tests/script/http/httpTest.c b/tests/script/http/httpTest.c
new file mode 100644
index 0000000000..36ce6b95ba
--- /dev/null
+++ b/tests/script/http/httpTest.c
@@ -0,0 +1,128 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define MAXLINE 1024
+
+typedef struct {
+ pthread_t pid;
+ int threadId;
+ int rows;
+ int tables;
+} ThreadObj;
+
+void post(char *ip,int port,char *page,char *msg) {
+ int sockfd,n;
+ char recvline[MAXLINE];
+ struct sockaddr_in servaddr;
+ char content[4096];
+ char content_page[50];
+ sprintf(content_page,"POST /%s HTTP/1.1\r\n",page);
+ char content_host[50];
+ sprintf(content_host,"HOST: %s:%d\r\n",ip,port);
+ char content_type[] = "Content-Type: text/plain\r\n";
+ char Auth[] = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
+ char content_len[50];
+ sprintf(content_len,"Content-Length: %ld\r\n\r\n",strlen(msg));
+ sprintf(content,"%s%s%s%s%s%s",content_page,content_host,content_type,Auth,content_len,msg);
+ if((sockfd = socket(AF_INET,SOCK_STREAM,0)) < 0) {
+ printf("socket error\n");
+ }
+ bzero(&servaddr,sizeof(servaddr));
+ servaddr.sin_family = AF_INET;
+ servaddr.sin_port = htons(port);
+ if(inet_pton(AF_INET,ip,&servaddr.sin_addr) <= 0) {
+ printf("inet_pton error\n");
+ }
+ if(connect(sockfd,(struct sockaddr *)&servaddr,sizeof(servaddr)) < 0) {
+ printf("connect error\n");
+ }
+ write(sockfd,content,strlen(content));
+ printf("%s\n", content);
+ while((n = read(sockfd,recvline,MAXLINE)) > 0) {
+ recvline[n] = 0;
+ if(fputs(recvline,stdout) == EOF) {
+ printf("fputs error\n");
+ }
+ }
+ if(n < 0) {
+ printf("read error\n");
+ }
+}
+
+void singleThread() {
+ char ip[] = "127.0.0.1";
+ int port = 6041;
+ char page[] = "rest/sql";
+ char page1[] = "rest/sql/db1";
+ char page2[] = "rest/sql/db2";
+ char nonexit[] = "rest/sql/xxdb";
+
+ post(ip,port,page,"drop database if exists db1");
+ post(ip,port,page,"create database if not exists db1");
+ post(ip,port,page,"drop database if exists db2");
+ post(ip,port,page,"create database if not exists db2");
+ post(ip,port,page1,"create table t11 (ts timestamp, c1 int)");
+ post(ip,port,page2,"create table t21 (ts timestamp, c1 int)");
+ post(ip,port,page1,"insert into t11 values (now, 1)");
+ post(ip,port,page2,"insert into t21 values (now, 2)");
+ post(ip,port,nonexit,"create database if not exists db3");
+}
+
+void execute(void *params) {
+ char ip[] = "127.0.0.1";
+ int port = 6041;
+ char page[] = "rest/sql";
+ char *unique = calloc(1, 1024);
+ char *sql = calloc(1, 1024);
+ ThreadObj *pThread = (ThreadObj *)params;
+ printf("Thread %d started\n", pThread->threadId);
+ sprintf(unique, "rest/sql/db%d",pThread->threadId);
+ sprintf(sql, "drop database if exists db%d", pThread->threadId);
+ post(ip,port,page, sql);
+ sprintf(sql, "create database if not exists db%d", pThread->threadId);
+ post(ip,port,page, sql);
+ for (int i = 0; i < pThread->tables; i++) {
+ sprintf(sql, "create table t%d (ts timestamp, c1 int)", i);
+ post(ip,port,unique, sql);
+ }
+ for (int i = 0; i < pThread->rows; i++) {
+ sprintf(sql, "insert into t%d values (now + %ds, %d)", pThread->threadId, i, pThread->threadId);
+ post(ip,port,unique, sql);
+ }
+ free(unique);
+ free(sql);
+ return;
+}
+
+void multiThread() {
+ int numOfThreads = 100;
+ int numOfTables = 100;
+ int numOfRows = 1;
+ ThreadObj *threads = calloc((size_t)numOfThreads, sizeof(ThreadObj));
+ for (int i = 0; i < numOfThreads; i++) {
+ ThreadObj *pthread = threads + i;
+ pthread_attr_t thattr;
+ pthread->threadId = i + 1;
+ pthread->rows = numOfRows;
+ pthread->tables = numOfTables;
+ pthread_attr_init(&thattr);
+ pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+ pthread_create(&pthread->pid, &thattr, (void *(*)(void *))execute, pthread);
+ }
+ for (int i = 0; i < numOfThreads; i++) {
+ pthread_join(threads[i].pid, NULL);
+ }
+ free(threads);
+}
+
+int main() {
+ singleThread();
+ multiThread();
+ exit(0);
+}
\ No newline at end of file
diff --git a/tests/script/http/makefile b/tests/script/http/makefile
new file mode 100644
index 0000000000..d1be683eda
--- /dev/null
+++ b/tests/script/http/makefile
@@ -0,0 +1,2 @@
+all:
+ gcc -g httpTest.c -o httpTest -lpthread
\ No newline at end of file
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index b087c734f3..4dff639379 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -90,6 +90,14 @@ cd ../../../debug; make
./test.sh -f general/parser/function.sim
./test.sh -f unique/cluster/vgroup100.sim
+./test.sh -f unique/http/admin.sim
+./test.sh -f unique/http/opentsdb.sim
+
+./test.sh -f unique/import/replica2.sim
+./test.sh -f unique/import/replica3.sim
+
+./test.sh -f general/alter/cached_schema_after_alter.sim
+
#======================b1-end===============
#======================b2-start===============