Merge branch 'develop' into feature/szhou/schemaless

This commit is contained in:
shenglian zhou 2021-08-25 17:11:28 +08:00
commit 3e8b0e856d
92 changed files with 4961 additions and 807 deletions

View File

@ -23,6 +23,7 @@ steps:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: test_arm64_bionic
@ -150,6 +151,7 @@ steps:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: build_trusty
@ -176,6 +178,7 @@ steps:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: build_xenial
@ -201,7 +204,7 @@ steps:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: build_bionic
@ -226,6 +229,7 @@ steps:
branch:
- develop
- master
- 2.0
---
kind: pipeline
name: build_centos7
@ -249,4 +253,4 @@ steps:
branch:
- develop
- master
- 2.0

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.1.6.0")
SET(TD_VER_NUMBER "2.1.7.1")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -98,6 +98,7 @@ int ConvertString(char *buf, size_t nBytes, UINT cpFrom, UINT cpTo, LPCSTR lpDef
char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefaultChar) {
int nBytes;
char *pBuf;
char *pBuf1;
nBytes = 4 * ((int)lstrlen(string) + 1); /* Worst case for the size needed */
pBuf = (char *)malloc(nBytes);
if (!pBuf) {
@ -110,8 +111,9 @@ char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefault
free(pBuf);
return NULL;
}
pBuf = realloc(pBuf, nBytes+1);
return pBuf;
pBuf1 = realloc(pBuf, nBytes+1);
if(pBuf1 == NULL && pBuf != NULL) free(pBuf);
return pBuf1;
}
int CountCharacters(const char *string, UINT cp) {

View File

@ -68,6 +68,7 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
int iString = FALSE; /* TRUE = string mode; FALSE = non-string mode */
int nBackslash = 0;
char **ppszArg;
char **ppszArg1;
int iArg = FALSE; /* TRUE = inside an argument; FALSE = between arguments */
ppszArg = (char **)malloc((argc+1)*sizeof(char *));
@ -89,7 +90,10 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
if ((!iArg) && (c != ' ') && (c != '\t')) { /* Beginning of a new argument */
iArg = TRUE;
ppszArg[argc++] = pszCopy+j;
ppszArg = (char **)realloc(ppszArg, (argc+1)*sizeof(char *));
ppszArg1 = (char **)realloc(ppszArg, (argc+1)*sizeof(char *));
if(ppszArg1 == NULL && ppszArg != NULL)
free(ppszArg);
ppszArg = ppszArg1;
if (!ppszArg) return -1;
pszCopy[j] = c0 = '\0';
}
@ -212,7 +216,7 @@ int _initU(void) {
fprintf(stderr, "Warning: Can't convert the argument line to UTF-8\n");
_acmdln[0] = '\0';
}
realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */
//realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */
/* Should not fail since we make it smaller */
/* Record the console code page, to allow converting the output accordingly */

View File

@ -196,6 +196,7 @@ not_compact_enough:
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
char *realpath(const char *path, char *outbuf) {
char *pOutbuf = outbuf;
char *pOutbuf1 = NULL;
int iErr;
const char *pc;
@ -242,8 +243,11 @@ realpath_failed:
return NULL;
}
if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1);
return pOutbuf;
if (!outbuf) {
pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
}
return pOutbuf1;
}
#endif
@ -517,6 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) {
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
char *realpathU(const char *path, char *outbuf) {
char *pOutbuf = outbuf;
char *pOutbuf1 = NULL;
char *pPath1 = NULL;
char *pPath2 = NULL;
int iErr;
@ -590,10 +595,13 @@ realpathU_failed:
}
DEBUG_LEAVE(("return 0x%p; // \"%s\"\n", pOutbuf, pOutbuf));
if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1);
if (!outbuf) {
pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
}
free(pPath1);
free(pPath2);
return pOutbuf;
return pOutbuf1;
}
#endif /* defined(_WIN32) */

2
deps/TSZ vendored

@ -1 +1 @@
Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c
Subproject commit ceda5bf9fcd7836509ac97dcc0056b3f1dd48cc5

View File

@ -21,7 +21,7 @@ TDengine 的模块之一是时序数据库。但除此之外,为减少研发
## <a class="anchor" id="scenes"></a>TDengine 总体适用场景
作为一个 IOT 大数据平台TDengine 的典型适用场景是在 IOT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRMERP 等,不在本文讨论范围内。
作为一个 IoT 大数据平台TDengine 的典型适用场景是在 IoT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRMERP 等,不在本文讨论范围内。
### 数据源特点和需求
@ -54,7 +54,7 @@ TDengine 的模块之一是时序数据库。但除此之外,为减少研发
|系统性能需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---|
|要求较大的总体处理能力| | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。|
|要求高速处理数据 | | | √ | TDengine 的专门为 IOT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。|
|要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。|
|要求快速处理小粒度数据| | | √ |这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。|
### 系统维护需求

View File

@ -1,6 +1,6 @@
# 通过 Docker 快速体验 TDengine
虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine而无需安装虚拟机或额外租用 Linux 服务器。
虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine而无需安装虚拟机或额外租用 Linux 服务器。另外从2.0.14.0版本开始TDengine提供的镜像已经可以同时支持X86-64、X86、arm64、arm32平台像NAS、树莓派、嵌入式开发板之类可以运行docker的非主流计算机也可以基于本文档轻松体验TDengine。
下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。
@ -12,7 +12,7 @@ Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.c
```bash
$ docker -v
Docker version 20.10.5, build 55c4c88
Docker version 20.10.3, build 48d30b5
```
## 在 Docker 容器中运行 TDengine
@ -20,21 +20,22 @@ Docker version 20.10.5, build 55c4c88
1使用命令拉取 TDengine 镜像,并使它在后台运行。
```bash
$ docker run -d tdengine/tdengine
cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316
$ docker run -d --name tdengine tdengine/tdengine
7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292
```
- **docker run**:通过 Docker 运行一个容器。
- **-d**:让容器在后台运行。
- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像。
- **cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316**:这个返回的长字符是容器 ID我们可以通过容器 ID 来查看对应的容器。
- **docker run**:通过 Docker 运行一个容器
- **--name tdengine**:设置容器名称,我们可以通过容器名称来查看对应的容器
- **-d**:让容器在后台运行
- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像
- **7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292**:这个返回的长字符是容器 ID我们也可以通过容器 ID 来查看对应的容器
2确认容器是否已经正确运行。
```bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS ···
cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
```
- **docker ps**:列出所有正在运行状态的容器信息。
@ -47,25 +48,25 @@ cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ·
3进入 Docker 容器内,使用 TDengine。
```bash
$ docker exec -it cdf548465318 /bin/bash
root@cdf548465318:~/TDengine-server-2.0.13.0#
$ docker exec -it tdengine /bin/bash
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
```
- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。
- **-i**:进入交互模式。
- **-t**:指定一个终端。
- **cdf548465318**:容器 ID需要根据 docker ps 指令返回的值进行修改。
- **c452519b0f9b**:容器 ID需要根据 docker ps 指令返回的值进行修改。
- **/bin/bash**:载入容器后运行 bash 来进行交互。
4进入容器后执行 taos shell 客户端程序。
```bash
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
taos>
```
TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。
@ -78,45 +79,74 @@ TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息
```bash
$ taos> q
root@cdf548465318:~/TDengine-server-2.0.13.0#
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
```
2在命令行界面执行 taosdemo。
```bash
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taosdemo
###################################################################
# Server IP: localhost:0
# User: root
# Password: taosdata
# Use metric: true
# Datatype of Columns: int int int int int int int float
# Binary Length(If applicable): -1
# Number of Columns per record: 3
# Number of Threads: 10
# Number of Tables: 10000
# Number of Data per Table: 100000
# Records/Request: 1000
# Database name: test
# Table prefix: t
# Delete method: 0
# Test time: 2021-04-13 02:05:20
###################################################################
root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo
taosdemo is simulating data generated by power equipments monitoring...
host: 127.0.0.1:6030
user: root
password: taosdata
configDir:
resultFile: ./output.txt
thread num of insert data: 10
thread num of create table: 10
top insert interval: 0
number of records per req: 30000
max sql length: 1048576
database count: 1
database[0]:
database[0] name: test
drop: yes
replica: 1
precision: ms
super table count: 1
super table[0]:
stbName: meters
autoCreateTable: no
childTblExists: no
childTblCount: 10000
childTblPrefix: d
dataSource: rand
iface: taosc
insertRows: 10000
interlaceRows: 0
disorderRange: 1000
disorderRatio: 0
maxSqlLen: 1048576
timeStampStep: 1
startTimestamp: 2017-07-14 10:40:00.000
sampleFormat:
sampleFile:
tagsFile:
columnCount: 3
column[0]:FLOAT column[1]:INT column[2]:FLOAT
tagCount: 2
tag[0]:INT tag[1]:BINARY(16)
Press enter key to continue or Ctrl-C to stop
```
回车后,该命令将新建一个数据库 test并且自动创建一张超级表 meters并以超级表 meters 为模版创建了 1 万张表,表名从 "t0" 到 "t9999"。每张表有 10 万条记录,每条记录有 f1f2f3 三个字段,时间戳 ts 字段从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:41:39 999"。每张表带有 areaid 和 loc 两个标签 TAGareaid 被设置为 1 到 10loc 被设置为 "beijing" 或 "shanghai"。
回车后,该命令将在数据库 test 下面自动创建一张超级表 meters该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupIdgroupId 被设置为 1 到 10 location 被设置为 "beijing" 或者 "shanghai"。
执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
3进入 TDengine 终端,查看 taosdemo 生成的数据。
- **进入命令行。**
```bash
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos>
taos>
```
- **查看数据库。**
@ -124,8 +154,8 @@ taos>
```bash
$ taos> show databases;
name | created_time | ntables | vgroups | ···
test | 2021-04-13 02:14:15.950 | 10000 | 6 | ···
log | 2021-04-12 09:36:37.549 | 4 | 1 | ···
test | 2021-08-18 06:01:11.021 | 10000 | 6 | ···
log | 2021-08-18 05:51:51.065 | 4 | 1 | ···
```
@ -136,10 +166,10 @@ $ taos> use test;
Database changed.
$ taos> show stables;
name | created_time | columns | tags | tables |
=====================================================================================
meters | 2021-04-13 02:14:15.955 | 4 | 2 | 10000 |
Query OK, 1 row(s) in set (0.001737s)
name | created_time | columns | tags | tables |
============================================================================================
meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 |
Query OK, 1 row(s) in set (0.003259s)
```
@ -147,42 +177,45 @@ Query OK, 1 row(s) in set (0.001737s)
```bash
$ taos> select * from test.t0 limit 10;
ts | f1 | f2 | f3 |
====================================================================
2017-07-14 02:40:01.000 | 3 | 9 | 0 |
2017-07-14 02:40:02.000 | 0 | 1 | 2 |
2017-07-14 02:40:03.000 | 7 | 2 | 3 |
2017-07-14 02:40:04.000 | 9 | 4 | 5 |
2017-07-14 02:40:05.000 | 1 | 2 | 5 |
2017-07-14 02:40:06.000 | 6 | 3 | 2 |
2017-07-14 02:40:07.000 | 4 | 7 | 8 |
2017-07-14 02:40:08.000 | 4 | 6 | 6 |
2017-07-14 02:40:09.000 | 5 | 7 | 7 |
2017-07-14 02:40:10.000 | 1 | 5 | 0 |
Query OK, 10 row(s) in set (0.003638s)
DB error: Table does not exist (0.002857s)
taos> select * from test.d0 limit 10;
ts | current | voltage | phase |
======================================================================================
2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 |
2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 |
2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 |
2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 |
2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 |
2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 |
2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 |
2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 |
2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 |
2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 |
Query OK, 10 row(s) in set (0.016791s)
```
- **查看 t0 表的标签值。**
- **查看 d0 表的标签值。**
```bash
$ taos> select areaid, loc from test.t0;
areaid | loc |
===========================
10 | shanghai |
Query OK, 1 row(s) in set (0.002904s)
$ taos> select groupid, location from test.d0;
groupid | location |
=================================
0 | shanghai |
Query OK, 1 row(s) in set (0.003490s)
```
## 停止正在 Docker 中运行的 TDengine 服务
```bash
$ docker stop cdf548465318
cdf548465318
$ docker stop tdengine
tdengine
```
- **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。
- **cdf548465318**:容器 ID根据 docker ps 指令返回的结果进行修改
- **tdengine**:容器名称
## 编程开发时连接在 Docker 中的 TDengine
@ -195,7 +228,7 @@ $ docker run -d -v /etc/taos:/etc/taos -P 6041:6041 tdengine/tdengine
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
```
- 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。
@ -206,6 +239,5 @@ $ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
2直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。
```bash
$ docker exec -it 526aa188da /bin/bash
$ docker exec -it tdengine /bin/bash
```

View File

@ -2,7 +2,7 @@
# TDengine数据建模
TDengine采用关系型数据模型需要建库、建表。因此对于一个具体的应用场景需要考虑库的设计,超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。
TDengine采用关系型数据模型需要建库、建表。因此对于一个具体的应用场景需要考虑库超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。
关于数据建模请参考[视频教程](https://www.taosdata.com/blog/2020/11/11/1945.html)。

View File

@ -2,8 +2,6 @@
## 总体介绍
TDengine 提供了遵循 JDBC 标准3.0API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索下载。
`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTfultaos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful。 JDBC-JNI 通过调用客户端 libtaos.so或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。
![tdengine-connector](page://images/tdengine-jdbc-connector.png)
@ -14,12 +12,10 @@ TDengine 提供了遵循 JDBC 标准3.0API 规范的 `taos-jdbcdriver` 实
* RESTful应用将 SQL 发送给位于物理节点2pnode2上的 RESTful 连接器,再调用客户端 APIlibtaos.so
* JDBC-RESTfulJava 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求发送给物理节点2的 RESTful 连接器。
TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但TDengine与关系对象型数据库的使用场景和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
* 目前不支持嵌套查询nested query
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询taos-jdbcdriver 会自动关闭上一个 ResultSet。
### JDBC-JNI和JDBC-RESTful的对比
@ -50,9 +46,12 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
</tr>
</table>
注意:与 JNI 方式不同RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。
注意:与 JNI 方式不同RESTful 接口是无状态的。在使用JDBC-RESTful时需要在sql中指定表、超级表的数据库名称。例如
```sql
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6);
```
### <a class="anchor" id="version"></a>TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
## <a class="anchor" id="version"></a>TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
@ -65,7 +64,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
### TDengine DataType 和 Java DataType
## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
@ -82,36 +81,27 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
| BINARY | byte array |
| NCHAR | java.lang.String |
## 安装
## 安装Java Connector
Java连接器支持的系统有 Linux 64/Windows x64/Windows x86。
**安装前准备:**
- 已安装TDengine服务器端
- 已安装好TDengine应用驱动具体请参照 [安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver) 章节
TDengine 为了方便 Java 应用使用,遵循 JDBC 标准(3.0)API 规范提供了 `taos-jdbcdriver` 实现。可以通过 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索并下载。
由于 TDengine 的应用驱动是使用C语言开发的使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
### 安装前准备
使用Java Connector连接数据库前需要具备以下条件
1. Linux或Windows操作系统
2. Java 1.8以上运行时环境
3. TDengine-client使用JDBC-JNI时必须使用JDBC-RESTful时非必须
**注意**:由于 TDengine 的应用驱动是使用C语言开发的使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
- libtaos.so 在 Linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
- taos.dll 在 Windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
注意:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client)Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。
### 如何获取 TAOS-JDBCDriver
**maven仓库**
**注意**:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client)Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。
### 通过maven获取JDBC driver
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。
- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [maven.aliyun](https://maven.aliyun.com/mvn/search)
maven 项目中使用如下 pom.xml 配置即可
maven 项目中在pom.xml 中添加以下依赖
```xml-dtd
<dependency>
<groupId>com.taosdata.jdbc</groupId>
@ -119,39 +109,22 @@ maven 项目中使用如下 pom.xml 配置即可:
<version>2.0.18</version>
</dependency>
```
**源码编译打包**
下载 TDengine 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
### 通过源码编译获取JDBC driver
### 示例程序
示例程序源码位于install_directory/examples/JDBC有如下目录
JDBCDemo JDBC示例源程序
JDBCConnectorChecker JDBC安装校验源程序及jar包
Springbootdemo springboot示例源程序
SpringJdbcTemplate SpringJDBC模板
### 安装验证
运行如下指令:
```Bash
cd {install_directory}/examples/JDBC/JDBCConnectorChecker
java -jar JDBCConnectorChecker.jar -host <fqdn>
可以通过下载TDengine的源码自己编译最新版本的java connector
```shell
git clone https://github.com/taosdata/TDengine.git
cd TDengine/src/connector/jdbc
mvn clean package -Dmaven.test.skip=true
```
验证通过将打印出成功信息。
编译后在target目录下会产生taos-jdbcdriver-2.0.XX-dist.jar的jar包。
## Java连接器的使用
### 获取连接
#### 指定URL获取连接
通过指定URL获取连接如下所示
```java
@ -159,23 +132,19 @@ Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
以上示例,使用 **JDBC-RESTful** 的 driver建立了到 hostname 为 taosdemo.com端口为 6041数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要:
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
3. 使用 6041 作为连接端口。
如果希望获得更好的写入和查询性能Java 应用可以使用 **JDBC-JNI** 的driver如下所示
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
以上示例,使用了 JDBC-JNI 的 driver建立了到 hostname 为 taosdemo.com端口为 6030TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
**注意**:使用 JDBC-JNI 的 drivertaos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库Linux 下是 libtaos.soWindows 下是 taos.dll
@ -194,6 +163,9 @@ url中的配置参数如下
* charset客户端使用的字符集默认值为系统字符集。
* locale客户端语言环境默认值系统当前 locale。
* timezone客户端使用的时区默认值为系统当前时区。
* batchfetch: 仅在使用JDBC-JNI时生效。true在执行查询时批量拉取结果集false逐行拉取结果集。默认值为false。
* timestampFormat: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP'结果集中timestamp类型的字段为一个long值; 'UTC'结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING'结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。
* batchErrorIgnoretrue在执行Statement的executeBatch时如果中间有一条sql执行失败继续执行下面的sq了。false不再执行失败sql后的任何语句。默认值为false。
#### 指定URL和Properties获取连接
@ -222,11 +194,13 @@ properties 中的配置参数如下:
* TSDBDriver.PROPERTY_KEY_CHARSET客户端使用的字符集默认值为系统字符集。
* TSDBDriver.PROPERTY_KEY_LOCALE客户端语言环境默认值系统当前 locale。
* TSDBDriver.PROPERTY_KEY_TIME_ZONE客户端使用的时区默认值为系统当前时区。
* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: 仅在使用JDBC-JNI时生效。true在执行查询时批量拉取结果集false逐行拉取结果集。默认值为false。
* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP'结果集中timestamp类型的字段为一个long值; 'UTC'结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING'结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。
* TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNOREtrue在执行Statement的executeBatch时如果中间有一条sql执行失败继续执行下面的sq了。false不再执行失败sql后的任何语句。默认值为false。
#### 使用客户端配置文件建立连接
当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。如下所示
1. 在 Java 应用中不指定 hostname 和 port
```java
@ -243,7 +217,6 @@ public Connection getConn() throws Exception{
```
2. 在配置文件中指定 firstEp 和 secondEp
```
# first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030
@ -424,9 +397,9 @@ public void setNString(int columnIndex, ArrayList<String> list, int size) throws
```
其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。
### <a class="anchor" id="subscribe"></a>订阅
## <a class="anchor" id="subscribe"></a>订阅
#### 创建
### 创建
```java
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
@ -440,7 +413,7 @@ TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from met
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
#### 消费数据
### 消费数据
```java
int total = 0;
@ -458,7 +431,7 @@ while(true) {
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
#### 关闭订阅
### 关闭订阅
```java
sub.close(true);
@ -466,7 +439,7 @@ sub.close(true);
`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
### 关闭资源
## 关闭资源
```java
resultSet.close();
@ -478,19 +451,8 @@ conn.close();
## 与连接池使用
**HikariCP**
* 引入相应 HikariCP maven 依赖:
```xml
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>3.4.1</version>
</dependency>
```
* 使用示例如下:
### HikariCP
使用示例如下:
```java
public static void main(String[] args) throws SQLException {
@ -522,19 +484,8 @@ conn.close();
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP)。
**Druid**
* 引入相应 Druid maven 依赖:
```xml
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.20</version>
</dependency>
```
* 使用示例如下:
### Druid
使用示例如下:
```java
public static void main(String[] args) throws Exception {
@ -580,6 +531,16 @@ Query OK, 1 row(s) in set (0.000141s)
* Spring JdbcTemplate 中使用 taos-jdbcdriver可参考 [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate)
* Springboot + Mybatis 中使用,可参考 [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo)
## 示例程序
示例程序源码位于TDengine/test/examples/JDBC下:
* JDBCDemoJDBC示例源程序
* JDBCConnectorCheckerJDBC安装校验源程序及jar包
* Springbootdemospringboot示例源程序
* SpringJdbcTemplateSpringJDBC模板
请参考:![JDBC example](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC)
## 常见问题
* java.lang.UnsatisfiedLinkError: no taos in java.library.path

View File

@ -315,10 +315,6 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
1. 调用 `taos_stmt_init` 创建参数绑定对象;
2. 调用 `taos_stmt_prepare` 解析 INSERT 语句;
3. 如果 INSERT 语句中预留了表名但没有预留 TAGS那么调用 `taos_stmt_set_tbname` 来设置表名;
* 从 2.1.6.0 版本开始,对于向一个超级表下的多个子表同时写入数据(每个子表写入的数据较少,可能只有一行)的情形,提供了一个专用的优化接口 `taos_stmt_set_sub_tbname`,可以通过提前载入 meta 数据以及避免对 SQL 语法的重复解析来节省总体的处理时间(但这个优化方法并不支持自动建表语法)。具体使用方法如下:
1. 必须先提前调用 `taos_load_table_info` 来加载所有需要的超级表和子表的 table meta
2. 然后对一个超级表的第一个子表调用 `taos_stmt_set_tbname` 来设置表名;
3. 后续子表用 `taos_stmt_set_sub_tbname` 来设置表名。
4. 如果 INSERT 语句中既预留了表名又预留了 TAGS例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags` 来设置表名和 TAGS 的值;
5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param` 以单行的方式设置 VALUES 的值;
6. 调用 `taos_stmt_add_batch` 把当前绑定的参数加入批处理;
@ -362,12 +358,6 @@ typedef struct TAOS_BIND {
2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。
- `int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name)`
2.1.6.0 版本新增,仅支持用于替换 INSERT 语句中、属于同一个超级表下的多个子表中、作为写入目标的第 2 个到第 n 个子表的表名)
当 SQL 语句中的表名使用了 `?` 占位时,如果想要一批写入的表是多个属于同一个超级表的子表,那么可以使用此函数绑定除第一个子表之外的其他子表的表名。
*注意:*在使用时,客户端必须先调用 `taos_load_table_info` 来加载所有需要的超级表和子表的 table meta然后对一个超级表的第一个子表调用 `taos_stmt_set_tbname`,后续子表用 `taos_stmt_set_sub_tbname`
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
@ -976,13 +966,17 @@ Go连接器支持的系统有
**提示建议Go版本是1.13及以上,并开启模块支持:**
```sh
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.io,direct
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.io,direct
```
在taosdemo.go所在目录下进行编译和执行
```sh
go mod init *demo*
go build ./demo -h fqdn -p serverPort
go mod init taosdemo
go get github.com/taosdata/driver-go/taosSql
# use win branch in Windows platform.
#go get github.com/taosdata/driver-go/taosSql@win
go build
./taosdemo -h fqdn -p serverPort
```
### Go连接器的使用

View File

@ -375,7 +375,7 @@ taos -C 或 taos --dump-config
timezone GMT-8
timezone Asia/Shanghai
```
均是合法的设置东八区时区的格式。
均是合法的设置东八区时区的格式。但需注意Windows 下并不支持 `timezone Asia/Shanghai` 这样的写法,而必须写成 `timezone UTC-8`
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容时间戳字符串、关键词now的解析产生影响。例如
```sql
@ -800,7 +800,7 @@ taos -n sync -P 6042 -h <fqdn of server>
`taos -n speed -h <fqdn of server> -P 6030 -N 10 -l 10000000 -S TCP`
从 2.1.7.0 版本开始taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下:
从 2.1.8.0 版本开始taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下:
-n设为“speed”时表示对网络速度进行诊断。
-h所要连接的服务端的 FQDN 或 ip 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。
@ -809,6 +809,15 @@ taos -n sync -P 6042 -h <fqdn of server>
-l单个网络包的大小单位字节。最小值是 1024、最大值是 1024*1024*1024默认值为 1000。
-S网络封包的类型。可以是 TCP 或 UDP默认值为 TCP。
#### FQDN 解析速度诊断
`taos -n fqdn -h <fqdn of server>`
从 2.1.8.0 版本开始taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下:
-n设为“fqdn”时表示对 FQDN 解析进行诊断。
-h所要解析的目标 FQDN 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。
#### 服务端日志
taosd 服务端日志文件标志位 debugflag 默认为 131在 debug 时往往需要将其提升到 135 或 143 。

View File

@ -206,10 +206,6 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
显示当前数据库下的所有数据表信息。
说明:可在 like 中使用通配符进行名称的匹配,这一通配符字符串最长不能超过 20 字节。( 从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
通配符匹配1'%'百分号匹配0到任意个字符2'\_'下划线匹配单个任意字符。
- **显示一个数据表的创建语句**
```mysql
@ -718,15 +714,19 @@ Query OK, 1 row(s) in set (0.001091s)
| = | equal to | all types |
| <> | not equal to | all types |
| between and | within a certain range | **`timestamp`** and all numeric types |
| in | matches any value in a set | all types except first column `timestamp` |
| in | match any value in a set | all types except first column `timestamp` |
| like | match a wildcard string | **`binary`** **`nchar`** |
| % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** |
1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。
2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`
4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明BOOL 类型写作 `{true, false}``{0, 1}` 均可,但不能写作 0、1 之外的整数FLOAT 和 DOUBLE 类型会受到浮点数精度影响集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功TIMESTAMP 类型支持非主键的列。<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER -->
2. like 算子使用通配符字符串进行匹配检查。
* 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意字符。
* 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
3. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
4. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`
5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明BOOL 类型写作 `{true, false}``{0, 1}` 均可,但不能写作 0、1 之外的整数FLOAT 和 DOUBLE 类型会受到浮点数精度影响集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功TIMESTAMP 类型支持非主键的列。<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER -->
<a class="anchor" id="union"></a>
### UNION ALL 操作符
@ -1197,8 +1197,6 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
适用于:**表、超级表**。
说明与LAST函数不同LAST_ROW不支持时间范围限制强制返回最后一条记录。
限制LAST_ROW()不能与INTERVAL一起使用。
示例:

View File

@ -203,7 +203,7 @@ The configuration parameters in properties are as follows:
* TSDBDriver.PROPERTY_KEY_LOCALE: client locale. The default value is the current system locale.
* TSDBDriver.PROPERTY_KEY_TIME_ZONE: timezone used by the client. The default value is the current timezone of the system.
* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-JNI. True if batch ResultSet fetching is enabled; false if row-by-row ResultSet fetching is enabled. Default value is flase.
* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'.
* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'.
* TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false.
#### Establishing a connection with configuration file
@ -317,14 +317,17 @@ Since version 2.1.2.0, TDengine's JDBC-JNI implementation has significantly impr
Statement stmt = conn.createStatement();
Random r = new Random();
// In the INSERT statement, the VALUES clause allows you to specify a specific column; If automatic table creation is adopted, the TAGS clause needs to set the parameter values of all TAGS columns
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)");
s.setTableName("w1");
// set tags
s.setTagInt(0, r.nextInt(10));
s.setTagString(1, "Beijing");
int numOfRows = 10;
// set values
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
ts.add(System.currentTimeMillis() + i);
@ -341,9 +344,10 @@ for (int i = 0; i < numOfRows; i++){
}
s.setString(2, s2, 10);
// The cache is not cleared after AddBatch. Do not bind new data again before ExecuteBatch
s.columnDataAddBatch();
s.columnDataExecuteBatch();
// Clear the cache, after which you can bind new data(including table names, tags, values):
s.columnDataClearBatch();
s.columnDataCloseBatch();
```
@ -499,6 +503,10 @@ Query OK, 1 row(s) in set (0.000141s)
- Please refer to [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) if using taos-jdbcdriver in Spring JdbcTemplate.
- Please refer to [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) if using taos-jdbcdriver in Spring JdbcTemplate.
## Example Codes
you see sample code here: ![JDBC example](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC)
## FAQ
- java.lang.UnsatisfiedLinkError: no taos in java.library.path

View File

@ -142,6 +142,7 @@ function install_bin() {
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/perfMonitor || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/set_core || :
fi
@ -167,6 +168,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
fi

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18
version: '2.1.6.0'
version: '2.1.7.1'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.1.6.0
- usr/lib/libtaos.so.2.1.7.1
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so

View File

@ -116,8 +116,17 @@ void bnCleanupDnodes() {
static void bnCheckDnodesSize(int32_t dnodesNum) {
if (tsBnDnodes.maxSize <= dnodesNum) {
tsBnDnodes.maxSize = dnodesNum * 2;
tsBnDnodes.list = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *));
int32_t maxSize = dnodesNum * 2;
SDnodeObj** list1 = NULL;
int32_t retry = 0;
while(list1 == NULL && retry++ < 3) {
list1 = realloc(tsBnDnodes.list, maxSize * sizeof(SDnodeObj *));
}
if(list1) {
tsBnDnodes.list = list1;
tsBnDnodes.maxSize = maxSize;
}
}
}

View File

@ -4,6 +4,8 @@ PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
INCLUDE_DIRECTORIES(jni)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/plugins/http/inc)
AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX)

View File

@ -50,6 +50,12 @@ void tscUnlockByThread(int64_t *lockedBy);
int tsInsertInitialCheck(SSqlObj *pSql);
void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs);
void tscFreeRetrieveSup(SSqlObj *pSql);
#ifdef __cplusplus
}
#endif

View File

@ -144,6 +144,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo);
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
bool hasTagValOutput(SQueryInfo* pQueryInfo);
bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo);

View File

@ -38,6 +38,11 @@ extern "C" {
#include "qUtil.h"
#include "tcmdtype.h"
typedef enum {
TAOS_REQ_FROM_SHELL,
TAOS_REQ_FROM_HTTP
} SReqOrigin;
// forward declaration
struct SSqlInfo;
@ -123,7 +128,7 @@ typedef struct {
int32_t kvLen; // len of SKVRow
} SMemRowInfo;
typedef struct {
uint8_t memRowType; // default is 0, that is SDataRow
uint8_t memRowType; // default is 0, that is SDataRow
uint8_t compareStat; // 0 no need, 1 need compare
TDRowTLenT kvRowInitLen;
SMemRowInfo *rowInfo;
@ -340,6 +345,7 @@ typedef struct STscObj {
SRpcCorEpSet *tscCorMgmtEpSet;
pthread_mutex_t mutex;
int32_t numOfObj; // number of sqlObj from this tscObj
SReqOrigin from;
} STscObj;
typedef struct SSubqueryState {

View File

@ -1693,7 +1693,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
SInsertStatementParam* pInsertParam = &pCmd->insertParam;
SInsertStatementParam *pInsertParam = &pCmd->insertParam;
destroyTableNameList(pInsertParam);
pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks);

View File

@ -1527,8 +1527,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT;
pCmd->insertParam.objectId = pSql->self;
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
pSql->sqlstr = sqlstr;
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY);

View File

@ -40,6 +40,7 @@
#include "qScript.h"
#include "ttype.h"
#include "qFilter.h"
#include "httpInt.h"
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
@ -1671,8 +1672,28 @@ static bool has(SArray* pFieldList, int32_t startIdx, const char* name) {
static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; }
static char* cloneCurrentDBName(SSqlObj* pSql) {
char *p = NULL;
HttpContext *pCtx = NULL;
pthread_mutex_lock(&pSql->pTscObj->mutex);
char *p = strdup(pSql->pTscObj->db);
STscObj *pTscObj = pSql->pTscObj;
switch (pTscObj->from) {
case TAOS_REQ_FROM_HTTP:
pCtx = pSql->param;
if (pCtx && pCtx->db[0] != '\0') {
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN] = {0};
int32_t len = sprintf(db, "%s%s%s", pTscObj->acctId, TS_PATH_DELIMITER, pCtx->db);
assert(len <= sizeof(db));
p = strdup(db);
}
break;
default:
break;
}
if (p == NULL) {
p = strdup(pSql->pTscObj->db);
}
pthread_mutex_unlock(&pSql->pTscObj->mutex);
return p;
@ -2033,9 +2054,10 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
}
bool hasDistinct = false;
bool hasAgg = false;
size_t numOfExpr = taosArrayGetSize(pSelNodeList);
size_t numOfExpr = taosArrayGetSize(pSelNodeList);
int32_t distIdx = -1;
for (int32_t i = 0; i < numOfExpr; ++i) {
int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo);
@ -2090,7 +2112,6 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
}
}
//TODO(dengyihao), refactor as function
//handle distinct func mixed with other func
if (hasDistinct == true) {
@ -2106,6 +2127,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
if (pQueryInfo->pDownstream != NULL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
pQueryInfo->distinct = true;
}
@ -2629,7 +2651,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO);
} else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI);
}
}
if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
@ -2663,8 +2685,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
assert(ids.num == 1);
tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema);
}
tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid);
return TSDB_CODE_SUCCESS;
}
@ -3046,7 +3068,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, &s);
}
}
tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
return TSDB_CODE_SUCCESS;
}
@ -4644,7 +4665,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
}
pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY;
ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg);
ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pCmd->payload);
*pExpr = NULL;
if (type) {
*type |= TSQL_EXPR_JOIN;
@ -5626,6 +5647,7 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
const char* msg3 = "top/bottom not support fill";
const char* msg4 = "illegal value or data overflow";
const char* msg5 = "fill only available for interval query";
const char* msg6 = "not supported function now";
if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
@ -5664,6 +5686,9 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
}
} else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_PREV;
if (tscIsPointInterpQuery(pQueryInfo) && pQueryInfo->order.order == TSDB_ORDER_DESC) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
} else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_NEXT;
} else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) {
@ -5768,14 +5793,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
const char* msg7 = "only primary timestamp/column in groupby clause allowed as order column";
const char* msg8 = "only column in groupby clause allowed as order column";
const char* msg9 = "orderby column must projected in subquery";
const char* msg10 = "not support distinct mixed with order by";
setDefaultOrderInfo(pQueryInfo);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pQueryInfo->distinct || pSqlNode->pSortOrder == NULL) {
return TSDB_CODE_SUCCESS;
}
if (pSqlNode->pSortOrder == NULL) {
return TSDB_CODE_SUCCESS;
}
char* pMsgBuf = tscGetErrorMsgPayload(pCmd);
SArray* pSortOrder = pSqlNode->pSortOrder;
@ -5795,6 +5819,9 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return invalidOperationMsg(pMsgBuf, msg2);
}
}
if (size > 0 && pQueryInfo->distinct) {
return invalidOperationMsg(pMsgBuf, msg10);
}
// handle the first part of order by
tVariant* pVar = taosArrayGet(pSortOrder, 0);
@ -5863,12 +5890,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
} else if (isTopBottomQuery(pQueryInfo)) {
/* order of top/bottom query in interval is not valid */
int32_t pos = tscExprTopBottomIndex(pQueryInfo);
assert(pos > 0);
SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1);
assert(pExpr->base.functionId == TSDB_FUNC_TS);
pExpr = tscExprGet(pQueryInfo, pos);
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(pMsgBuf, msg5);
}
@ -5959,13 +5988,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return invalidOperationMsg(pMsgBuf, msg8);
}
} else {
/* order of top/bottom query in interval is not valid */
int32_t pos = tscExprTopBottomIndex(pQueryInfo);
assert(pos > 0);
SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1);
assert(pExpr->base.functionId == TSDB_FUNC_TS);
pExpr = tscExprGet(pQueryInfo, pos);
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(pMsgBuf, msg5);
}
@ -8672,6 +8701,8 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) {
n += 1;
}
info->numOfColumns = n;
return meta;
}
@ -8700,7 +8731,6 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS
return code;
}
// create dummy table meta info
STableMetaInfo* pTableMetaInfo1 = calloc(1, sizeof(STableMetaInfo));
if (pTableMetaInfo1 == NULL) {

View File

@ -892,7 +892,9 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
}
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
pSql->sqlstr = sqlstr;
if (pSql->sqlstr == NULL) {
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tfree(pSql);

View File

@ -2038,17 +2038,14 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
tscAsyncResultOnError(pSql);
}
static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) {
void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) {
assert(numOfSubs <= pSql->subState.numOfSub && numOfSubs >= 0);
for(int32_t i = 0; i < numOfSubs; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
assert(pSub != NULL);
SRetrieveSupport* pSupport = pSub->param;
tfree(pSupport->localBuffer);
tfree(pSupport);
tscFreeRetrieveSup(pSub);
taos_free_result(pSub);
}
@ -2406,6 +2403,10 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
} else {
SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex};
tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss);
int32_t ti = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid);
assert(ti >= 0);
SColumn* x = taosArrayGetP(pNewQueryInfo->colList, ti);
tscColumnCopy(x, pCol);
}
}
}
@ -2607,7 +2608,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
static void tscFreeRetrieveSup(SSqlObj *pSql) {
void tscFreeRetrieveSup(SSqlObj *pSql) {
SRetrieveSupport *trsupport = pSql->param;
void* p = atomic_val_compare_exchange_ptr(&pSql->param, trsupport, 0);
@ -2765,27 +2766,43 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
int32_t code = pParentSql->res.code;
if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) {
// remove the cached tableMeta and vgroup id list, and then parse the sql again
tscResetSqlCmd( &pParentSql->cmd, true, pParentSql->self);
SSqlObj *userSql = NULL;
if (pParentSql->param) {
userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql;
}
pParentSql->retry++;
pParentSql->res.code = TSDB_CODE_SUCCESS;
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self,
tstrerror(code), pParentSql->retry);
if (userSql == NULL) {
userSql = pParentSql;
}
code = tsParseSql(pParentSql, true);
if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) {
if (userSql != pParentSql) {
tscFreeRetrieveSup(pParentSql);
}
tscFreeSubobj(userSql);
tfree(userSql->pSubs);
userSql->res.code = TSDB_CODE_SUCCESS;
userSql->retry++;
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self,
tstrerror(code), userSql->retry);
tscResetSqlCmd(&userSql->cmd, true, userSql->self);
code = tsParseSql(userSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
}
if (code != TSDB_CODE_SUCCESS) {
pParentSql->res.code = code;
tscAsyncResultOnError(pParentSql);
userSql->res.code = code;
tscAsyncResultOnError(userSql);
return;
}
executeQuery(pParentSql, pQueryInfo);
pQueryInfo = tscGetQueryInfo(&userSql->cmd);
executeQuery(userSql, pQueryInfo);
} else {
(*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code);
}
@ -2855,7 +2872,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
pParentSql->self, pState->numOfSub, pState->numOfRetrievedRows);
SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
tscClearInterpInfo(pPQueryInfo);
code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self);
pParentSql->res.code = code;

View File

@ -403,6 +403,27 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) {
return false;
}
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
if (pExpr == NULL) {
continue;
}
if (pExpr->base.functionId == TSDB_FUNC_TS) {
continue;
}
if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) {
return i;
}
}
return -1;
}
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
@ -659,8 +680,10 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo
} else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
char* buffer = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
if(buffer == NULL)
return ;
pRes->buffer[i] = buffer;
// string terminated char for binary data
memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows);
@ -1236,6 +1259,7 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
}
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilters);
pOutput->precision = pSqlObjList[0]->res.precision;
SSchema* schema = NULL;
@ -3634,10 +3658,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNewQueryInfo->prjOffset = pQueryInfo->prjOffset;
pNewQueryInfo->numOfTables = 0;
pNewQueryInfo->pTableMetaInfo = NULL;
pNewQueryInfo->bufLen = pQueryInfo->bufLen;
pNewQueryInfo->distinct = pQueryInfo->distinct;
pNewQueryInfo->bufLen = pQueryInfo->bufLen;
pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
pNewQueryInfo->distinct = pQueryInfo->distinct;
if (pNewQueryInfo->buf == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
@ -3853,8 +3877,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
int32_t index = ps->subqueryIndex;
bool ret = subAndCheckDone(pSql, pParentSql, index);
tfree(ps);
pSql->param = NULL;
tscFreeRetrieveSup(pSql);
if (!ret) {
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index);
@ -3863,7 +3886,13 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
// todo refactor
tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self);
tscResetSqlCmd(&pParentSql->cmd, true, pParentSql->self);
if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry)) {
tscAsyncResultOnError(pParentSql);
return;
}
tscFreeSubobj(pParentSql);
tfree(pParentSql->pSubs);
pParentSql->res.code = TSDB_CODE_SUCCESS;
pParentSql->retry++;
@ -3871,6 +3900,9 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self,
tstrerror(code), pParentSql->retry);
tscResetSqlCmd(&pParentSql->cmd, true, pParentSql->self);
code = tsParseSql(pParentSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
@ -3905,9 +3937,11 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
}
if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly
assert(pSql->subState.numOfSub == 0);
pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream);
assert(pSql->pSubs == NULL);
pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES);
assert(pSql->subState.states == NULL);
pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t));
code = pthread_mutex_init(&pSql->subState.mutex, NULL);
@ -3933,6 +3967,9 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
pNew->sqlstr = strdup(pSql->sqlstr);
pNew->fp = tscSubqueryCompleteCallback;
pNew->maxRetry = pSql->maxRetry;
pNew->cmd.resColumnId = TSDB_RES_COL_ID;
tsem_init(&pNew->rspSem, 0, 0);
SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id
@ -4490,10 +4527,14 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
assert(*ppChild != NULL);
STableMeta* p = *ppSTable;
STableMeta* pChild = *ppChild;
size_t sz = (p != NULL) ? tscGetTableMetaSize(p) : 0; //ppSTableBuf actually capacity may larger than sz, dont care
if (p != NULL && sz != 0) {
memset((char *)p, 0, sz);
}
STableMeta* pChild1;
taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz);
*ppSTable = p;
@ -4504,7 +4545,10 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema);
int32_t tableMetaSize = sizeof(STableMeta) + totalBytes;
if (*tableMetaCapacity < tableMetaSize) {
pChild = realloc(pChild, tableMetaSize);
pChild1 = realloc(pChild, tableMetaSize);
if(pChild1 == NULL)
return -1;
pChild = pChild1;
*tableMetaCapacity = (size_t)tableMetaSize;
}

View File

@ -547,8 +547,9 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder);
static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) {
if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2;
pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
if (pBuilder->pColIdx == NULL) return -1;
SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
if (pColIdx == NULL) return -1;
pBuilder->pColIdx = pColIdx;
}
pBuilder->pColIdx[pBuilder->nCols].colId = colId;
@ -561,8 +562,9 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId,
while (tlen > pBuilder->alloc - pBuilder->size) {
pBuilder->alloc *= 2;
}
pBuilder->buf = realloc(pBuilder->buf, pBuilder->alloc);
if (pBuilder->buf == NULL) return -1;
void* buf = realloc(pBuilder->buf, pBuilder->alloc);
if (buf == NULL) return -1;
pBuilder->buf = buf;
}
memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen);

View File

@ -163,6 +163,7 @@ extern char tsDataDir[];
extern char tsLogDir[];
extern char tsScriptDir[];
extern int64_t tsTickPerDay[3];
extern int32_t tsTopicBianryLen;
// system info
extern char tsOsName[];

View File

@ -138,8 +138,9 @@ int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int1
if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2;
pBuilder->columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols);
if (pBuilder->columns == NULL) return -1;
STColumn* columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols);
if (columns == NULL) return -1;
pBuilder->columns = columns;
}
STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]);

View File

@ -84,7 +84,7 @@ int32_t tsCompressColData = -1;
// client
int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN;
int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN;
int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN;
int8_t tsTscEnableRecordSql = 0;
// the maximum number of results for projection query on super table that are returned from
@ -152,7 +152,6 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int32_t tsTsdbMetaCompactRatio = TSDB_META_COMPACT_RATIO;
// tsdb config
// For backward compatibility
bool tsdbForceKeepFile = false;
@ -210,6 +209,7 @@ char tsScriptDir[PATH_MAX] = {0};
char tsTempDir[PATH_MAX] = "/tmp/";
int32_t tsDiskCfgNum = 0;
int32_t tsTopicBianryLen = 16000;
#ifndef _STORAGE
SDiskCfg tsDiskCfg[1];
@ -570,7 +570,6 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "numOfMnodes";
cfg.ptr = &tsNumOfMnodes;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@ -1238,6 +1237,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "topicBianryLen";
cfg.ptr = &tsTopicBianryLen;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 16;
cfg.maxValue = 16000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "httpEnableRecordSql";
cfg.ptr = &tsHttpEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT8;

@ -1 +1 @@
Subproject commit b8f76da4a708d158ec3cc4b844571dc4414e36b4
Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f

@ -1 +1 @@
Subproject commit ce5201014136503d34fecbd56494b67b4961056c
Subproject commit b62a26ecc164a310104df57691691b237e091c89

View File

@ -49,7 +49,7 @@ def _load_taos():
try:
return load_func[platform.system()]()
except:
sys.exit("unsupported platform to TDengine connector")
raise InterfaceError('unsupported platform or failed to load taos client library')
_libtaos = _load_taos()

View File

@ -72,12 +72,13 @@ static int32_t shellShowTables(TAOS *con, char *db) {
int32_t tbIndex = tbNum++;
if (tbMallocNum < tbNum) {
tbMallocNum = (tbMallocNum * 2 + 1);
tbNames = realloc(tbNames, tbMallocNum * sizeof(char *));
if (tbNames == NULL) {
char** tbNames1 = realloc(tbNames, tbMallocNum * sizeof(char *));
if (tbNames1 == NULL) {
fprintf(stdout, "failed to malloc tablenames, num:%d\n", tbMallocNum);
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
break;
}
tbNames = tbNames1;
}
tbNames[tbIndex] = malloc(TSDB_TABLE_NAME_LEN);

View File

@ -254,8 +254,12 @@ int32_t shellRunCommand(TAOS* con, char* command) {
}
if (c == '\\') {
esc = true;
continue;
if (quote != 0 && (*command == '_' || *command == '\\')) {
//DO nothing
} else {
esc = true;
continue;
}
}
if (quote == c) {

File diff suppressed because it is too large Load Diff

View File

@ -18,6 +18,7 @@
#include <stdio.h>
#include <stdlib.h>
#if defined(WINDOWS)
int main(int argc, char *argv[]) {
printf("welcome to use taospack tools v1.3 for windows.\n");
@ -148,7 +149,10 @@ float* read_float(const char* inFile, int* pcount){
//printf(" buff=%s float=%.50f \n ", buf, floats[fi]);
if ( ++fi == malloc_cnt ) {
malloc_cnt += 100000;
floats = realloc(floats, malloc_cnt*sizeof(float));
float* floats1 = realloc(floats, malloc_cnt*sizeof(float));
if(floats1 == NULL)
break;
floats = floats1;
}
memset(buf, 0, sizeof(buf));
}
@ -601,7 +605,6 @@ void test_threadsafe_double(int thread_count){
}
void unitTestFloat() {
float ft1 [] = {1.11, 2.22, 3.333};
@ -662,7 +665,50 @@ void unitTestFloat() {
free(ft2);
free(buff);
free(output);
}
void leakFloat() {
int cnt = sizeof(g_ft1)/sizeof(float);
float* floats = g_ft1;
int algorithm = 2;
// compress
const char* input = (const char*)floats;
int input_len = cnt * sizeof(float);
int output_len = input_len + 1024;
char* output = (char*) malloc(output_len);
char* buff = (char*) malloc(input_len);
int buff_len = input_len;
int ret_len = 0;
ret_len = tsCompressFloatLossy(input, input_len, cnt, output, output_len, algorithm, buff, buff_len);
if(ret_len == 0) {
printf(" compress float error.\n");
free(buff);
free(output);
return ;
}
float* ft2 = (float*)malloc(input_len);
ret_len = tsDecompressFloatLossy(output, ret_len, cnt, (char*)ft2, input_len, algorithm, buff, buff_len);
if(ret_len == 0) {
printf(" decompress float error.\n");
}
free(ft2);
free(buff);
free(output);
}
void leakTest(){
for(int i=0; i< 90000000000000; i++){
if(i%10000==0)
printf(" ---------- %d ---------------- \n", i);
leakFloat();
}
}
#define DB_CNT 500
@ -689,7 +735,7 @@ extern char Compressor [];
// ----------------- main ----------------------
//
int main(int argc, char *argv[]) {
printf("welcome to use taospack tools v1.3\n");
printf("welcome to use taospack tools v1.6\n");
//printf(" sizeof(int)=%d\n", (int)sizeof(int));
//printf(" sizeof(long)=%d\n", (int)sizeof(long));
@ -753,6 +799,9 @@ int main(int argc, char *argv[]) {
if(strcmp(argv[1], "-mem") == 0) {
memTest();
}
else if(strcmp(argv[1], "-leak") == 0) {
leakTest();
}
}
else{
unitTestFloat();

View File

@ -2934,10 +2934,11 @@ static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray
(*totalMallocLen) *= 2;
}
pMultiMeta = realloc(pMultiMeta, *totalMallocLen);
if (pMultiMeta == NULL) {
SMultiTableMeta* pMultiMeta1 = realloc(pMultiMeta, *totalMallocLen);
if (pMultiMeta1 == NULL) {
return NULL;
}
pMultiMeta = pMultiMeta1;
}
return pMultiMeta;

View File

@ -504,8 +504,9 @@ void * taosTRealloc(void *ptr, size_t size) {
void * tptr = (void *)((char *)ptr - sizeof(size_t));
size_t tsize = size + sizeof(size_t);
tptr = realloc(tptr, tsize);
if (tptr == NULL) return NULL;
void* tptr1 = realloc(tptr, tsize);
if (tptr1 == NULL) return NULL;
tptr = tptr1;
*(size_t *)tptr = size;

View File

@ -81,11 +81,13 @@ int32_t getstr(char **lineptr, size_t *n, FILE *stream, char terminator, int32_t
*n += MIN_CHUNK;
nchars_avail = (int32_t)(*n + *lineptr - read_pos);
*lineptr = realloc(*lineptr, *n);
if (!*lineptr) {
char* lineptr1 = realloc(*lineptr, *n);
if (!lineptr1) {
errno = ENOMEM;
return -1;
}
*lineptr = lineptr1;
read_pos = *n - nchars_avail + *lineptr;
assert((*lineptr + *n) == (read_pos + nchars_avail));
}

View File

@ -150,6 +150,7 @@ typedef struct HttpContext {
char ipstr[22];
char user[TSDB_USER_LEN]; // parsed from auth token or login message
char pass[HTTP_PASSWORD_LEN];
char db[/*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN];
TAOS * taos;
void * ppContext;
HttpSession *session;

View File

@ -22,12 +22,12 @@
#include "httpResp.h"
#include "httpSql.h"
#define REST_ROOT_URL_POS 0
#define REST_ACTION_URL_POS 1
#define REST_USER_URL_POS 2
#define REST_PASS_URL_POS 3
#define REST_ROOT_URL_POS 0
#define REST_ACTION_URL_POS 1
#define REST_USER_USEDB_URL_POS 2
#define REST_PASS_URL_POS 3
void restInitHandle(HttpServer* pServer);
bool restProcessRequest(struct HttpContext* pContext);
#endif
#endif

View File

@ -62,11 +62,11 @@ void restInitHandle(HttpServer* pServer) {
bool restGetUserFromUrl(HttpContext* pContext) {
HttpParser* pParser = pContext->parser;
if (pParser->path[REST_USER_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_URL_POS].pos <= 0) {
if (pParser->path[REST_USER_USEDB_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_USEDB_URL_POS].pos <= 0) {
return false;
}
tstrncpy(pContext->user, pParser->path[REST_USER_URL_POS].str, TSDB_USER_LEN);
tstrncpy(pContext->user, pParser->path[REST_USER_USEDB_URL_POS].str, TSDB_USER_LEN);
return true;
}
@ -107,6 +107,16 @@ bool restProcessSqlRequest(HttpContext* pContext, int32_t timestampFmt) {
HttpSqlCmd* cmd = &(pContext->singleCmd);
cmd->nativSql = sql;
/* find if there is db_name in url */
pContext->db[0] = '\0';
HttpString *path = &pContext->parser->path[REST_USER_USEDB_URL_POS];
if (path->pos > 0 && !(strlen(sql) > 4 && (sql[0] == 'u' || sql[0] == 'U') &&
(sql[1] == 's' || sql[1] == 'S') && (sql[2] == 'e' || sql[2] == 'E') && sql[3] == ' '))
{
snprintf(pContext->db, /*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN, "%s", path->str);
}
pContext->reqType = HTTP_REQTYPE_SINGLE_SQL;
if (timestampFmt == REST_TIMESTAMP_FMT_LOCAL_STRING) {
pContext->encodeMethod = &restEncodeSqlLocalTimeStringMethod;

View File

@ -419,6 +419,11 @@ void httpProcessRequest(HttpContext *pContext) {
&(pContext->taos));
httpDebug("context:%p, fd:%d, user:%s, try connect tdengine, taos:%p", pContext, pContext->fd, pContext->user,
pContext->taos);
if (pContext->taos != NULL) {
STscObj *pObj = pContext->taos;
pObj->from = TAOS_REQ_FROM_HTTP;
}
} else {
httpExecCmd(pContext);
}

View File

@ -220,6 +220,8 @@ tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrder
void tOrderDescDestroy(tOrderDescriptor *pDesc);
void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn);
void tColModelAppend(SColumnModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows,
int32_t numOfRowsToWrite, int32_t srcCapacity);

View File

@ -24,10 +24,10 @@ extern "C" {
extern uint32_t qDebugFlag;
#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", 255, __VA_ARGS__); }} while(0)
#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", 255, __VA_ARGS__); }} while(0)
#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", 255, __VA_ARGS__); }} while(0)
#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", 255, __VA_ARGS__); }} while(0)
#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qDebug(...) do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qTrace(...) do { if (qDebugFlag & DEBUG_TRACE) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qDump(a, l) do { if (qDebugFlag & DEBUG_DUMP) { taosDumpData((unsigned char *)a, l); }} while(0)

View File

@ -3670,6 +3670,8 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
return;
}
bool ascQuery = (pCtx->order == TSDB_ORDER_ASC);
if (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) {
*(TSKEY *)pCtx->pOutput = pCtx->startTs;
} else if (type == TSDB_FILL_NULL) {
@ -3677,7 +3679,7 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
} else if (type == TSDB_FILL_SET_VALUE) {
tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true);
} else {
if (pCtx->start.key != INT64_MIN && pCtx->start.key < pCtx->startTs && pCtx->end.key > pCtx->startTs) {
if (pCtx->start.key != INT64_MIN && ((ascQuery && pCtx->start.key <= pCtx->startTs && pCtx->end.key >= pCtx->startTs) || ((!ascQuery) && pCtx->start.key >= pCtx->startTs && pCtx->end.key <= pCtx->startTs))) {
if (type == TSDB_FILL_PREV) {
if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) {
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val);
@ -3716,13 +3718,14 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY skey = GET_TS_DATA(pCtx, 0);
if (type == TSDB_FILL_PREV) {
if (skey > pCtx->startTs) {
if ((ascQuery && skey > pCtx->startTs) || ((!ascQuery) && skey < pCtx->startTs)) {
return;
}
if (pCtx->size > 1) {
TSKEY ekey = GET_TS_DATA(pCtx, 1);
if (ekey > skey && ekey <= pCtx->startTs) {
if ((ascQuery && ekey > skey && ekey <= pCtx->startTs) ||
((!ascQuery) && ekey < skey && ekey >= pCtx->startTs)){
skey = ekey;
}
}
@ -3731,10 +3734,10 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY ekey = skey;
char* val = NULL;
if (ekey < pCtx->startTs) {
if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) {
if (pCtx->size > 1) {
ekey = GET_TS_DATA(pCtx, 1);
if (ekey < pCtx->startTs) {
if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) {
return;
}
@ -3755,12 +3758,11 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY ekey = GET_TS_DATA(pCtx, 1);
// no data generated yet
if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) {
if ((ascQuery && !(skey <= pCtx->startTs && ekey >= pCtx->startTs))
|| ((!ascQuery) && !(skey >= pCtx->startTs && ekey <= pCtx->startTs))) {
return;
}
assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs);
char *start = GET_INPUT_DATA(pCtx, 0);
char *end = GET_INPUT_DATA(pCtx, 1);
@ -3788,11 +3790,37 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
static void interp_function(SQLFunctionCtx *pCtx) {
// at this point, the value is existed, return directly
if (pCtx->size > 0) {
// impose the timestamp check
TSKEY key = GET_TS_DATA(pCtx, 0);
bool ascQuery = (pCtx->order == TSDB_ORDER_ASC);
TSKEY key;
char *pData;
int32_t typedData = 0;
if (ascQuery) {
key = GET_TS_DATA(pCtx, 0);
pData = GET_INPUT_DATA(pCtx, 0);
} else {
key = pCtx->start.key;
if (key == INT64_MIN) {
key = GET_TS_DATA(pCtx, 0);
pData = GET_INPUT_DATA(pCtx, 0);
} else {
if (!(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL)) {
pData = pCtx->start.ptr;
} else {
typedData = 1;
pData = (char *)&pCtx->start.val;
}
}
}
//if (key == pCtx->startTs && (ascQuery || !(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL))) {
if (key == pCtx->startTs) {
char *pData = GET_INPUT_DATA(pCtx, 0);
assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType);
if (typedData) {
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, *(double *)pData);
} else {
assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType);
}
SET_VAL(pCtx, 1, 1);
} else {
interp_function_impl(pCtx);

View File

@ -45,7 +45,7 @@
#define MULTI_KEY_DELIM "-"
#define HASH_CAPACITY_LIMIT 10000000
#define HASH_CAPACITY_LIMIT 10000000
#define TIME_WINDOW_COPY(_dst, _src) do {\
(_dst).skey = (_src).skey;\
@ -1327,6 +1327,16 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
pCtx[k].end.key = curTs;
pCtx[k].end.val = v2;
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
if (prevRowIndex == -1) {
pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[index];
} else {
pCtx[k].start.ptr = (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes;
}
pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes;
}
}
} else if (functionId == TSDB_FUNC_TWA) {
SPoint point1 = (SPoint){.key = prevTs, .val = &v1};
@ -1596,6 +1606,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
SResultRow* pResult = NULL;
int32_t forwardStep = 0;
int32_t ret = 0;
STimeWindow preWin = win;
while (1) {
// null data, failed to allocate more memory buffer
@ -1610,12 +1621,13 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
// window start(end) key interpolation
doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
preWin = win;
int32_t prevEndPos = (forwardStep - 1) * step + startPos;
startPos = getNextQualifiedWindow(pQueryAttr, &win, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos);
if (startPos < 0) {
if (win.skey <= pQueryAttr->window.ekey) {
if ((ascQuery && win.skey <= pQueryAttr->window.ekey) || ((!ascQuery) && win.ekey >= pQueryAttr->window.ekey)) {
int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId,
pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
@ -1626,7 +1638,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
// window start(end) key interpolation
doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
}
break;
@ -3570,7 +3582,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i
SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo;
int64_t tid = 0;
pRuntimeEnv->keyBuf = realloc(pRuntimeEnv->keyBuf, sizeof(tid) + sizeof(int64_t) + POINTER_BYTES);
pRuntimeEnv->keyBuf = realloc(pRuntimeEnv->keyBuf, sizeof(tid) + sizeof(int64_t) + POINTER_BYTES);
SResultRow* pRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&tid, sizeof(tid), true, uid);
for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
@ -7156,14 +7168,14 @@ static bool initMultiDistinctInfo(SDistinctOperatorInfo *pInfo, SOperatorInfo* p
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, j);
if (pColDataInfo->info.colId == pOperator->pExpr[i].base.resColId) {
SDistinctDataInfo item = {.index = j, .type = pColDataInfo->info.type, .bytes = pColDataInfo->info.bytes};
taosArrayInsert(pInfo->pDistinctDataInfo, i, &item);
taosArrayInsert(pInfo->pDistinctDataInfo, i, &item);
}
}
}
pInfo->totalBytes += (int32_t)strlen(MULTI_KEY_DELIM) * (pOperator->numOfOutput);
pInfo->buf = calloc(1, pInfo->totalBytes);
return taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput ? true : false;
}
}
static void buildMultiDistinctKey(SDistinctOperatorInfo *pInfo, SSDataBlock *pBlock, int32_t rowId) {
char *p = pInfo->buf;
@ -7188,11 +7200,13 @@ static void buildMultiDistinctKey(SDistinctOperatorInfo *pInfo, SSDataBlock *pBl
p += strlen(MULTI_KEY_DELIM);
}
}
static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
SDistinctOperatorInfo* pInfo = pOperator->info;
SSDataBlock* pRes = pInfo->pRes;
@ -7247,11 +7261,11 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
pRes->info.rows += 1;
}
}
if (pRes->info.rows >= pInfo->threshold) {
break;
}
}
return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
}

View File

@ -768,60 +768,6 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
free(buf);
}
void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) {
assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols);
int32_t bytes = pSchema[index].bytes;
int32_t size = bytes + sizeof(int32_t);
char* buf = calloc(1, size * numOfRows);
for(int32_t i = 0; i < numOfRows; ++i) {
char* dest = buf + size * i;
memcpy(dest, ((char*)pCols[index]) + bytes * i, bytes);
*(int32_t*)(dest+bytes) = i;
}
qsort(buf, numOfRows, size, compareFn);
int32_t prevLength = 0;
char* p = NULL;
for(int32_t i = 0; i < numOfCols; ++i) {
int32_t bytes1 = pSchema[i].bytes;
if (i == index) {
for(int32_t j = 0; j < numOfRows; ++j){
char* src = buf + (j * size);
char* dest = (char*) pCols[i] + (j * bytes1);
memcpy(dest, src, bytes1);
}
} else {
// make sure memory buffer is enough
if (prevLength < bytes1) {
char *tmp = realloc(p, bytes1 * numOfRows);
assert(tmp);
p = tmp;
prevLength = bytes1;
}
memcpy(p, pCols[i], bytes1 * numOfRows);
for(int32_t j = 0; j < numOfRows; ++j){
char* dest = (char*) pCols[i] + bytes1 * j;
int32_t newPos = *(int32_t*)(buf + (j * size) + bytes);
char* src = p + (newPos * bytes1);
memcpy(dest, src, bytes1);
}
}
}
tfree(buf);
tfree(p);
}
/*
* deep copy of sschema
*/
@ -1157,3 +1103,57 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) {
destroyColumnModel(pDesc->pColumnModel);
tfree(pDesc);
}
void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) {
assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols);
int32_t bytes = pSchema[index].bytes;
int32_t size = bytes + sizeof(int32_t);
char* buf = calloc(1, size * numOfRows);
for(int32_t i = 0; i < numOfRows; ++i) {
char* dest = buf + size * i;
memcpy(dest, ((char*) pCols[index]) + bytes * i, bytes);
*(int32_t*)(dest+bytes) = i;
}
qsort(buf, numOfRows, size, compareFn);
int32_t prevLength = 0;
char* p = NULL;
for(int32_t i = 0; i < numOfCols; ++i) {
int32_t bytes1 = pSchema[i].bytes;
if (i == index) {
for(int32_t j = 0; j < numOfRows; ++j){
char* src = buf + (j * size);
char* dest = ((char*)pCols[i]) + (j * bytes1);
memcpy(dest, src, bytes1);
}
} else {
// make sure memory buffer is enough
if (prevLength < bytes1) {
char *tmp = realloc(p, bytes1 * numOfRows);
assert(tmp);
p = tmp;
prevLength = bytes1;
}
memcpy(p, pCols[i], bytes1 * numOfRows);
for(int32_t j = 0; j < numOfRows; ++j){
char* dest = ((char*)pCols[i]) + bytes1 * j;
int32_t newPos = *(int32_t*)(buf + (j * size) + bytes);
char* src = p + (newPos * bytes1);
memcpy(dest, src, bytes1);
}
}
}
tfree(buf);
tfree(p);
}

View File

@ -698,7 +698,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
}
// fill operator
if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) {
if (pQueryAttr->fillType != TSDB_FILL_NONE && pQueryAttr->interval.interval > 0) {
op = OP_Fill;
taosArrayPush(plan, &op);
}

View File

@ -223,8 +223,11 @@ static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) {
static void shrinkBuffer(STSList* ptsData) {
// shrink tmp buffer size if it consumes too many memory compared to the pre-defined size
if (ptsData->allocSize >= ptsData->threshold * 2) {
ptsData->rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE);
ptsData->allocSize = MEM_BUF_SIZE;
char* rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE);
if(rawBuf) {
ptsData->rawBuf = rawBuf;
ptsData->allocSize = MEM_BUF_SIZE;
}
}
}

View File

@ -18,6 +18,9 @@
#define TSDB_FS_VERSION 0
// ================== TSDB global config
extern bool tsdbForceKeepFile;
// ================== CURRENT file header info
typedef struct {
uint32_t version; // Current file system version (relating to code)
@ -110,4 +113,4 @@ static FORCE_INLINE int tsdbUnLockFS(STsdbFS* pFs) {
return 0;
}
#endif /* _TD_TSDB_FS_H_ */
#endif /* _TD_TSDB_FS_H_ */

View File

@ -37,6 +37,7 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired);
static int tsdbProcessExpiredFS(STsdbRepo *pRepo);
static int tsdbCreateMeta(STsdbRepo *pRepo);
// For backward compatibility
// ================== CURRENT file header info
static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) {
int tlen = 0;

View File

@ -1572,7 +1572,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
int32_t numOfColsOfRow1 = 0;
if (pSchema1 == NULL) {
pSchema1 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row1));
pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1));
}
if(isRow1DataRow) {
numOfColsOfRow1 = schemaNCols(pSchema1);
@ -1584,7 +1584,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
if(row2) {
isRow2DataRow = isDataRow(row2);
if (pSchema2 == NULL) {
pSchema2 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row2));
pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2));
}
if(isRow2DataRow) {
numOfColsOfRow2 = schemaNCols(pSchema2);
@ -3480,6 +3480,7 @@ void filterPrepare(void* expr, void* param) {
SArray *arr = (SArray *)(pCond->arr);
for (size_t i = 0; i < taosArrayGetSize(arr); i++) {
char* p = taosArrayGetP(arr, i);
strtolower(varDataVal(p), varDataVal(p));
taosHashPut(pObj, varDataVal(p),varDataLen(p), &dummy, sizeof(dummy));
}
} else {

View File

@ -22,10 +22,10 @@ extern "C" {
#include "os.h"
#define TSDB_PATTERN_MATCH 0
#define TSDB_PATTERN_NOMATCH 1
#define TSDB_PATTERN_NOWILDCARDMATCH 2
#define TSDB_PATTERN_STRING_MAX_LEN 100
#define TSDB_PATTERN_MATCH 0
#define TSDB_PATTERN_NOMATCH 1
#define TSDB_PATTERN_NOWILDCARDMATCH 2
#define TSDB_PATTERN_STRING_DEFAULT_LEN 100
#define FLT_COMPAR_TOL_FACTOR 4
#define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON))

View File

@ -537,7 +537,8 @@ void taosCacheCleanup(SCacheObj *pCacheObj) {
pCacheObj->deleting = 1;
// wait for the refresh thread quit before destroying the cache object.
while(atomic_load_8(&pCacheObj->deleting) != 0) {
// But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for 2 seconds.
for (int i = 0; i < 40&&atomic_load_8(&pCacheObj->deleting) != 0; i++) {
taosMsleep(50);
}

View File

@ -139,8 +139,8 @@ int32_t compareFloatVal(const void *pLeft, const void *pRight) {
}
if (FLT_EQUAL(p1, p2)) {
return 0;
}
return FLT_GREATER(p1, p2) ? 1: -1;
}
return FLT_GREATER(p1, p2) ? 1: -1;
}
int32_t compareFloatValDesc(const void* pLeft, const void* pRight) {
@ -164,8 +164,8 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight) {
}
if (FLT_EQUAL(p1, p2)) {
return 0;
}
return FLT_GREATER(p1, p2) ? 1: -1;
}
return FLT_GREATER(p1, p2) ? 1: -1;
}
int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) {
@ -175,7 +175,7 @@ int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) {
int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) {
int32_t len1 = varDataLen(pLeft);
int32_t len2 = varDataLen(pRight);
if (len1 != len2) {
return len1 > len2? 1:-1;
} else {
@ -224,33 +224,33 @@ int32_t compareLenPrefixedWStrDesc(const void* pLeft, const void* pRight) {
*/
int patternMatch(const char *patterStr, const char *str, size_t size, const SPatternCompareInfo *pInfo) {
char c, c1;
int32_t i = 0;
int32_t j = 0;
while ((c = patterStr[i++]) != 0) {
if (c == pInfo->matchAll) { /* Match "*" */
while ((c = patterStr[i++]) == pInfo->matchAll || c == pInfo->matchOne) {
if (c == pInfo->matchOne && (j > size || str[j++] == 0)) {
// empty string, return not match
return TSDB_PATTERN_NOWILDCARDMATCH;
}
}
if (c == 0) {
return TSDB_PATTERN_MATCH; /* "*" at the end of the pattern matches */
}
char next[3] = {toupper(c), tolower(c), 0};
while (1) {
size_t n = strcspn(str, next);
str += n;
if (str[0] == 0 || (n >= size)) {
break;
}
int32_t ret = patternMatch(&patterStr[i], ++str, size - n - 1, pInfo);
if (ret != TSDB_PATTERN_NOMATCH) {
return ret;
@ -258,18 +258,19 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat
}
return TSDB_PATTERN_NOWILDCARDMATCH;
}
c1 = str[j++];
if (j <= size) {
if (c == '\\' && patterStr[i] == '_' && c1 == '_') { i++; continue; }
if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) {
continue;
}
}
return TSDB_PATTERN_NOMATCH;
}
return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH;
}
@ -277,13 +278,13 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c
wchar_t c, c1;
wchar_t matchOne = L'_'; // "_"
wchar_t matchAll = L'%'; // "%"
int32_t i = 0;
int32_t j = 0;
while ((c = patterStr[i++]) != 0) {
if (c == matchAll) { /* Match "%" */
while ((c = patterStr[i++]) == matchAll || c == matchOne) {
if (c == matchOne && (j > size || str[j++] == 0)) {
return TSDB_PATTERN_NOWILDCARDMATCH;
@ -292,33 +293,33 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c
if (c == 0) {
return TSDB_PATTERN_MATCH;
}
wchar_t accept[3] = {towupper(c), towlower(c), 0};
while (1) {
size_t n = wcscspn(str, accept);
str += n;
if (str[0] == 0 || (n >= size)) {
break;
}
int32_t ret = WCSPatternMatch(&patterStr[i], ++str, size - n - 1, pInfo);
if (ret != TSDB_PATTERN_NOMATCH) {
return ret;
}
}
return TSDB_PATTERN_NOWILDCARDMATCH;
}
c1 = str[j++];
if (j <= size) {
if (c == c1 || towlower(c) == towlower(c1) || (c == matchOne && c1 != 0)) {
continue;
}
}
return TSDB_PATTERN_NOMATCH;
}
@ -358,12 +359,13 @@ int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
SPatternCompareInfo pInfo = {'%', '_'};
assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE);
wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo);
free(pattern);
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
}
@ -410,10 +412,10 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) {
} else { /* normal relational comparFn */
comparFn = compareLenPrefixedStr;
}
break;
}
case TSDB_DATA_TYPE_NCHAR: {
if (optr == TSDB_RELATION_LIKE) {
comparFn = compareWStrPatternComp;
@ -434,13 +436,13 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) {
comparFn = compareInt32Val;
break;
}
return comparFn;
}
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) {
__compar_fn_t comparFn = NULL;
switch (keyType) {
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_BOOL:
@ -484,7 +486,7 @@ __compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) {
comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc;
break;
}
return comparFn;
}
@ -517,7 +519,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) {
default: { // todo refactor
tstr* t1 = (tstr*) f1;
tstr* t2 = (tstr*) f2;
if (t1->len != t2->len) {
return t1->len > t2->len? 1:-1;
} else {

View File

@ -70,7 +70,7 @@ void doubleSkipListTest() {
}
void randKeyTest() {
SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT),
SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT, TSDB_ORDER_ASC),
false, getkey);
int32_t size = 200000;

View File

@ -540,7 +540,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
pWal->version = pHead->version;
//wInfo("writeFp: %ld", offset);
// wInfo("writeFp: %ld", offset);
if (0 != walSMemRowCheck(pHead)) {
wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64,
pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset);

View File

@ -0,0 +1,429 @@
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <errno.h>
#include <signal.h>
#define RECV_MAX_LINE 2048
#define ITEM_MAX_LINE 128
#define REQ_MAX_LINE 2048
#define REQ_CLI_COUNT 100
typedef enum
{
uninited,
connecting,
connected,
datasent
} conn_stat;
typedef enum
{
false,
true
} bool;
typedef unsigned short u16_t;
typedef unsigned int u32_t;
typedef struct
{
int sockfd;
int index;
conn_stat state;
size_t nsent;
size_t nrecv;
size_t nlen;
bool error;
bool success;
struct sockaddr_in serv_addr;
} socket_ctx;
int set_nonblocking(int sockfd)
{
int ret;
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
if (ret == -1) {
printf("failed to fcntl for %d\r\n", sockfd);
return ret;
}
return ret;
}
int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
{
int ret;
if (ip == NULL || port == 0 || pctx == NULL) {
printf("invalid parameter\r\n");
return -1;
}
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (pctx->sockfd == -1) {
printf("failed to create socket\r\n");
return -1;
}
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
pctx->serv_addr.sin_family = AF_INET;
pctx->serv_addr.sin_port = htons(port);
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
if (ret <= 0) {
printf("inet_pton error, ip: %s\r\n", ip);
return -1;
}
ret = set_nonblocking(pctx->sockfd);
if (ret == -1) {
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
return -1;
}
return pctx->sockfd;
}
void close_sockets(socket_ctx *pctx, int cnt)
{
int i;
if (pctx == NULL) {
return;
}
for (i = 0; i < cnt; i++) {
if (pctx[i].sockfd > 0) {
close(pctx[i].sockfd);
pctx[i].sockfd = -1;
}
}
}
int proc_pending_error(socket_ctx *ctx)
{
int ret;
int err;
socklen_t len;
if (ctx == NULL) {
return 0;
}
err = 0;
len = sizeof(int);
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
if (ret == -1) {
err = errno;
}
if (err) {
printf("failed to connect at index: %d\r\n", ctx->index);
close(ctx->sockfd);
ctx->sockfd = -1;
return -1;
}
return 0;
}
void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
{
char req_line[ITEM_MAX_LINE];
char req_host[ITEM_MAX_LINE];
char req_cont_type[ITEM_MAX_LINE];
char req_cont_len[ITEM_MAX_LINE];
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
if (ip == NULL || port == 0 ||
url == NULL || url[0] == '\0' ||
sql == NULL || sql[0] == '\0' ||
req_buf == NULL || len <= 0)
{
return;
}
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
}
int add_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
}
int mod_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
}
int del_event(int epfd, int sockfd)
{
struct epoll_event evs_op;
evs_op.events = 0;
evs_op.data.ptr = NULL;
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
}
int main()
{
int i;
int ret, n, nsent, nrecv;
int epfd;
u32_t events;
char *str;
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
char *ip = "127.0.0.1";
char *url = "/rest/sql";
u16_t port = 6041;
struct epoll_event evs[REQ_CLI_COUNT];
char sql[REQ_MAX_LINE];
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
int count;
signal(SIGPIPE, SIG_IGN);
for (i = 0; i < REQ_CLI_COUNT; i++) {
ctx[i].sockfd = -1;
ctx[i].index = i;
ctx[i].state = uninited;
ctx[i].nsent = 0;
ctx[i].nrecv = 0;
ctx[i].error = false;
ctx[i].success = false;
memset(sql, 0, REQ_MAX_LINE);
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
memset(recv_buf[i], 0, RECV_MAX_LINE);
snprintf(sql, REQ_MAX_LINE, "create database if not exists db%d precision 'us'", i);
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
ctx[i].nlen = strlen(send_buf[i]);
}
epfd = epoll_create(REQ_CLI_COUNT);
if (epfd <= 0) {
printf("failed to create epoll\r\n");
goto failed;
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = create_socket(ip, port, &ctx[i]);
if (ret == -1) {
printf("failed to create socket ar %d\r\n", i);
goto failed;
}
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
events = EPOLLET | EPOLLIN | EPOLLOUT;
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
if (ret == -1) {
printf("failed to add sockfd at %d to epoll\r\n", i);
goto failed;
}
}
count = 0;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
if (ret == -1) {
if (errno != EINPROGRESS) {
printf("connect error, index: %d\r\n", ctx[i].index);
(void) del_event(epfd, ctx[i].sockfd);
close(ctx[i].sockfd);
ctx[i].sockfd = -1;
} else {
ctx[i].state = connecting;
count++;
}
continue;
}
ctx[i].state = connected;
count++;
}
printf("clients: %d\r\n", count);
while (count > 0) {
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
if (n == -1) {
if (errno != EINTR) {
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
break;
}
} else {
for (i = 0; i < n; i++) {
if (evs[i].events & EPOLLERR) {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("event error, index: %d\r\n", pctx->index);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
} else if (evs[i].events & EPOLLIN) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
if (nrecv == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
} else if (nrecv == 0) {
printf("peer closed connection, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
break;
}
pctx->nrecv += nrecv;
if (pctx->nrecv > 12) {
if (pctx->error == false && pctx->success == false) {
str = recv_buf[pctx->index] + 9;
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
pctx->error = true;
} else {
printf("response ok, index: %d\r\n", pctx->index);
pctx->success = true;
}
}
}
}
} else if (evs[i].events & EPOLLOUT) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
if (nsent == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to send, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
}
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
pctx->state = datasent;
events = EPOLLET | EPOLLIN;
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
break;
} else {
pctx->nsent += nsent;
}
}
} else {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
}
}
}
failed:
if (epfd > 0) {
close(epfd);
}
close_sockets(ctx, REQ_CLI_COUNT);
return 0;
}

View File

@ -0,0 +1,433 @@
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <errno.h>
#include <signal.h>
#define RECV_MAX_LINE 2048
#define ITEM_MAX_LINE 128
#define REQ_MAX_LINE 2048
#define REQ_CLI_COUNT 100
typedef enum
{
uninited,
connecting,
connected,
datasent
} conn_stat;
typedef enum
{
false,
true
} bool;
typedef unsigned short u16_t;
typedef unsigned int u32_t;
typedef struct
{
int sockfd;
int index;
conn_stat state;
size_t nsent;
size_t nrecv;
size_t nlen;
bool error;
bool success;
struct sockaddr_in serv_addr;
} socket_ctx;
int set_nonblocking(int sockfd)
{
int ret;
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
if (ret == -1) {
printf("failed to fcntl for %d\r\n", sockfd);
return ret;
}
return ret;
}
int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
{
int ret;
if (ip == NULL || port == 0 || pctx == NULL) {
printf("invalid parameter\r\n");
return -1;
}
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (pctx->sockfd == -1) {
printf("failed to create socket\r\n");
return -1;
}
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
pctx->serv_addr.sin_family = AF_INET;
pctx->serv_addr.sin_port = htons(port);
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
if (ret <= 0) {
printf("inet_pton error, ip: %s\r\n", ip);
return -1;
}
ret = set_nonblocking(pctx->sockfd);
if (ret == -1) {
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
return -1;
}
return pctx->sockfd;
}
void close_sockets(socket_ctx *pctx, int cnt)
{
int i;
if (pctx == NULL) {
return;
}
for (i = 0; i < cnt; i++) {
if (pctx[i].sockfd > 0) {
close(pctx[i].sockfd);
pctx[i].sockfd = -1;
}
}
}
int proc_pending_error(socket_ctx *ctx)
{
int ret;
int err;
socklen_t len;
if (ctx == NULL) {
return 0;
}
err = 0;
len = sizeof(int);
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
if (ret == -1) {
err = errno;
}
if (err) {
printf("failed to connect at index: %d\r\n", ctx->index);
close(ctx->sockfd);
ctx->sockfd = -1;
return -1;
}
return 0;
}
void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
{
char req_line[ITEM_MAX_LINE];
char req_host[ITEM_MAX_LINE];
char req_cont_type[ITEM_MAX_LINE];
char req_cont_len[ITEM_MAX_LINE];
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
if (ip == NULL || port == 0 ||
url == NULL || url[0] == '\0' ||
sql == NULL || sql[0] == '\0' ||
req_buf == NULL || len <= 0)
{
return;
}
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
}
int add_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
}
int mod_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
}
int del_event(int epfd, int sockfd)
{
struct epoll_event evs_op;
evs_op.events = 0;
evs_op.data.ptr = NULL;
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
}
int main()
{
int i;
int ret, n, nsent, nrecv;
int epfd;
u32_t events;
char *str;
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
char *ip = "127.0.0.1";
char *url_prefix = "/rest/sql";
char url[ITEM_MAX_LINE];
u16_t port = 6041;
struct epoll_event evs[REQ_CLI_COUNT];
char sql[REQ_MAX_LINE];
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
int count;
signal(SIGPIPE, SIG_IGN);
for (i = 0; i < REQ_CLI_COUNT; i++) {
ctx[i].sockfd = -1;
ctx[i].index = i;
ctx[i].state = uninited;
ctx[i].nsent = 0;
ctx[i].nrecv = 0;
ctx[i].error = false;
ctx[i].success = false;
memset(url, 0, ITEM_MAX_LINE);
memset(sql, 0, REQ_MAX_LINE);
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
memset(recv_buf[i], 0, RECV_MAX_LINE);
snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
snprintf(sql, REQ_MAX_LINE, "create table if not exists tb%d (ts timestamp, index int, val binary(40))", i);
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
ctx[i].nlen = strlen(send_buf[i]);
}
epfd = epoll_create(REQ_CLI_COUNT);
if (epfd <= 0) {
printf("failed to create epoll\r\n");
goto failed;
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = create_socket(ip, port, &ctx[i]);
if (ret == -1) {
printf("failed to create socket, index: %d\r\n", i);
goto failed;
}
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
events = EPOLLET | EPOLLIN | EPOLLOUT;
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
if (ret == -1) {
printf("failed to add sockfd to epoll, index: %d\r\n", i);
goto failed;
}
}
count = 0;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
if (ret == -1) {
if (errno != EINPROGRESS) {
printf("connect error, index: %d\r\n", ctx[i].index);
(void) del_event(epfd, ctx[i].sockfd);
close(ctx[i].sockfd);
ctx[i].sockfd = -1;
} else {
ctx[i].state = connecting;
count++;
}
continue;
}
ctx[i].state = connected;
count++;
}
printf("clients: %d\r\n", count);
while (count > 0) {
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
if (n == -1) {
if (errno != EINTR) {
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
break;
}
} else {
for (i = 0; i < n; i++) {
if (evs[i].events & EPOLLERR) {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("event error, index: %d\r\n", pctx->index);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
} else if (evs[i].events & EPOLLIN) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
if (nrecv == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
} else if (nrecv == 0) {
printf("peer closed connection, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
break;
}
pctx->nrecv += nrecv;
if (pctx->nrecv > 12) {
if (pctx->error == false && pctx->success == false) {
str = recv_buf[pctx->index] + 9;
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
pctx->error = true;
} else {
printf("response ok, index: %d\r\n", pctx->index);
pctx->success = true;
}
}
}
}
} else if (evs[i].events & EPOLLOUT) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
if (nsent == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to send, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
}
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
pctx->state = datasent;
events = EPOLLET | EPOLLIN;
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
break;
} else {
pctx->nsent += nsent;
}
}
} else {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
}
}
}
failed:
if (epfd > 0) {
close(epfd);
}
close_sockets(ctx, REQ_CLI_COUNT);
return 0;
}

View File

@ -0,0 +1,433 @@
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <errno.h>
#include <signal.h>
#define RECV_MAX_LINE 2048
#define ITEM_MAX_LINE 128
#define REQ_MAX_LINE 2048
#define REQ_CLI_COUNT 100
typedef enum
{
uninited,
connecting,
connected,
datasent
} conn_stat;
typedef enum
{
false,
true
} bool;
typedef unsigned short u16_t;
typedef unsigned int u32_t;
typedef struct
{
int sockfd;
int index;
conn_stat state;
size_t nsent;
size_t nrecv;
size_t nlen;
bool error;
bool success;
struct sockaddr_in serv_addr;
} socket_ctx;
int set_nonblocking(int sockfd)
{
int ret;
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
if (ret == -1) {
printf("failed to fcntl for %d\r\n", sockfd);
return ret;
}
return ret;
}
int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
{
int ret;
if (ip == NULL || port == 0 || pctx == NULL) {
printf("invalid parameter\r\n");
return -1;
}
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (pctx->sockfd == -1) {
printf("failed to create socket\r\n");
return -1;
}
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
pctx->serv_addr.sin_family = AF_INET;
pctx->serv_addr.sin_port = htons(port);
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
if (ret <= 0) {
printf("inet_pton error, ip: %s\r\n", ip);
return -1;
}
ret = set_nonblocking(pctx->sockfd);
if (ret == -1) {
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
return -1;
}
return pctx->sockfd;
}
void close_sockets(socket_ctx *pctx, int cnt)
{
int i;
if (pctx == NULL) {
return;
}
for (i = 0; i < cnt; i++) {
if (pctx[i].sockfd > 0) {
close(pctx[i].sockfd);
pctx[i].sockfd = -1;
}
}
}
int proc_pending_error(socket_ctx *ctx)
{
int ret;
int err;
socklen_t len;
if (ctx == NULL) {
return 0;
}
err = 0;
len = sizeof(int);
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
if (ret == -1) {
err = errno;
}
if (err) {
printf("failed to connect at index: %d\r\n", ctx->index);
close(ctx->sockfd);
ctx->sockfd = -1;
return -1;
}
return 0;
}
void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
{
char req_line[ITEM_MAX_LINE];
char req_host[ITEM_MAX_LINE];
char req_cont_type[ITEM_MAX_LINE];
char req_cont_len[ITEM_MAX_LINE];
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
if (ip == NULL || port == 0 ||
url == NULL || url[0] == '\0' ||
sql == NULL || sql[0] == '\0' ||
req_buf == NULL || len <= 0)
{
return;
}
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
}
int add_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
}
int mod_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
}
int del_event(int epfd, int sockfd)
{
struct epoll_event evs_op;
evs_op.events = 0;
evs_op.data.ptr = NULL;
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
}
int main()
{
int i;
int ret, n, nsent, nrecv;
int epfd;
u32_t events;
char *str;
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
char *ip = "127.0.0.1";
char *url_prefix = "/rest/sql";
char url[ITEM_MAX_LINE];
u16_t port = 6041;
struct epoll_event evs[REQ_CLI_COUNT];
char sql[REQ_MAX_LINE];
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
int count;
signal(SIGPIPE, SIG_IGN);
for (i = 0; i < REQ_CLI_COUNT; i++) {
ctx[i].sockfd = -1;
ctx[i].index = i;
ctx[i].state = uninited;
ctx[i].nsent = 0;
ctx[i].nrecv = 0;
ctx[i].error = false;
ctx[i].success = false;
memset(url, 0, ITEM_MAX_LINE);
memset(sql, 0, REQ_MAX_LINE);
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
memset(recv_buf[i], 0, RECV_MAX_LINE);
snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
snprintf(sql, REQ_MAX_LINE, "drop database if exists db%d", i);
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
ctx[i].nlen = strlen(send_buf[i]);
}
epfd = epoll_create(REQ_CLI_COUNT);
if (epfd <= 0) {
printf("failed to create epoll\r\n");
goto failed;
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = create_socket(ip, port, &ctx[i]);
if (ret == -1) {
printf("failed to create socket, index: %d\r\n", i);
goto failed;
}
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
events = EPOLLET | EPOLLIN | EPOLLOUT;
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
if (ret == -1) {
printf("failed to add sockfd to epoll, index: %d\r\n", i);
goto failed;
}
}
count = 0;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
if (ret == -1) {
if (errno != EINPROGRESS) {
printf("connect error, index: %d\r\n", ctx[i].index);
(void) del_event(epfd, ctx[i].sockfd);
close(ctx[i].sockfd);
ctx[i].sockfd = -1;
} else {
ctx[i].state = connecting;
count++;
}
continue;
}
ctx[i].state = connected;
count++;
}
printf("clients: %d\r\n", count);
while (count > 0) {
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
if (n == -1) {
if (errno != EINTR) {
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
break;
}
} else {
for (i = 0; i < n; i++) {
if (evs[i].events & EPOLLERR) {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("event error, index: %d\r\n", pctx->index);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
} else if (evs[i].events & EPOLLIN) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
if (nrecv == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
} else if (nrecv == 0) {
printf("peer closed connection, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
break;
}
pctx->nrecv += nrecv;
if (pctx->nrecv > 12) {
if (pctx->error == false && pctx->success == false) {
str = recv_buf[pctx->index] + 9;
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
pctx->error = true;
} else {
printf("response ok, index: %d\r\n", pctx->index);
pctx->success = true;
}
}
}
}
} else if (evs[i].events & EPOLLOUT) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
if (nsent == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to send, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
}
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
pctx->state = datasent;
events = EPOLLET | EPOLLIN;
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
break;
} else {
pctx->nsent += nsent;
}
}
} else {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
}
}
}
failed:
if (epfd > 0) {
close(epfd);
}
close_sockets(ctx, REQ_CLI_COUNT);
return 0;
}

View File

@ -0,0 +1,455 @@
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <inttypes.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <sys/time.h>
#include <errno.h>
#include <signal.h>
#define RECV_MAX_LINE 2048
#define ITEM_MAX_LINE 128
#define REQ_MAX_LINE 4096
#define REQ_CLI_COUNT 100
typedef enum
{
uninited,
connecting,
connected,
datasent
} conn_stat;
typedef enum
{
false,
true
} bool;
typedef struct
{
int sockfd;
int index;
conn_stat state;
size_t nsent;
size_t nrecv;
size_t nlen;
bool error;
bool success;
struct sockaddr_in serv_addr;
} socket_ctx;
int set_nonblocking(int sockfd)
{
int ret;
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
if (ret == -1) {
printf("failed to fcntl for %d\r\n", sockfd);
return ret;
}
return ret;
}
int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx)
{
int ret;
if (ip == NULL || port == 0 || pctx == NULL) {
printf("invalid parameter\r\n");
return -1;
}
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (pctx->sockfd == -1) {
printf("failed to create socket\r\n");
return -1;
}
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
pctx->serv_addr.sin_family = AF_INET;
pctx->serv_addr.sin_port = htons(port);
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
if (ret <= 0) {
printf("inet_pton error, ip: %s\r\n", ip);
return -1;
}
ret = set_nonblocking(pctx->sockfd);
if (ret == -1) {
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
return -1;
}
return pctx->sockfd;
}
void close_sockets(socket_ctx *pctx, int cnt)
{
int i;
if (pctx == NULL) {
return;
}
for (i = 0; i < cnt; i++) {
if (pctx[i].sockfd > 0) {
close(pctx[i].sockfd);
pctx[i].sockfd = -1;
}
}
}
int proc_pending_error(socket_ctx *ctx)
{
int ret;
int err;
socklen_t len;
if (ctx == NULL) {
return 0;
}
err = 0;
len = sizeof(int);
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
if (ret == -1) {
err = errno;
}
if (err) {
printf("failed to connect at index: %d\r\n", ctx->index);
close(ctx->sockfd);
ctx->sockfd = -1;
return -1;
}
return 0;
}
void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len)
{
char req_line[ITEM_MAX_LINE];
char req_host[ITEM_MAX_LINE];
char req_cont_type[ITEM_MAX_LINE];
char req_cont_len[ITEM_MAX_LINE];
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
if (ip == NULL || port == 0 ||
url == NULL || url[0] == '\0' ||
sql == NULL || sql[0] == '\0' ||
req_buf == NULL || len <= 0)
{
return;
}
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
}
int add_event(int epfd, int sockfd, uint32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
}
int mod_event(int epfd, int sockfd, uint32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
}
int del_event(int epfd, int sockfd)
{
struct epoll_event evs_op;
evs_op.events = 0;
evs_op.data.ptr = NULL;
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
}
int main()
{
int i;
int ret, n, nsent, nrecv, offset;
int epfd;
uint32_t events;
char *str;
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
char *ip = "127.0.0.1";
char *url_prefix = "/rest/sql";
char url[ITEM_MAX_LINE];
uint16_t port = 6041;
struct epoll_event evs[REQ_CLI_COUNT];
struct timeval now;
int64_t start_time;
char sql[REQ_MAX_LINE];
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
int count;
signal(SIGPIPE, SIG_IGN);
gettimeofday(&now, NULL);
start_time = now.tv_sec * 1000000 + now.tv_usec;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ctx[i].sockfd = -1;
ctx[i].index = i;
ctx[i].state = uninited;
ctx[i].nsent = 0;
ctx[i].nrecv = 0;
ctx[i].error = false;
ctx[i].success = false;
memset(url, 0, ITEM_MAX_LINE);
memset(sql, 0, REQ_MAX_LINE);
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
memset(recv_buf[i], 0, RECV_MAX_LINE);
snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
offset = 0;
ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "insert into tb%d values ", i);
if (ret <= 0) {
printf("failed to snprintf for sql(prefix), index: %d\r\n ", i);
goto failed;
}
offset += ret;
while (offset < REQ_MAX_LINE - 128) {
ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "(%"PRId64", %d, 'test_string_%d') ", start_time + i, i, i);
if (ret <= 0) {
printf("failed to snprintf for sql(values), index: %d\r\n ", i);
goto failed;
}
offset += ret;
}
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
ctx[i].nlen = strlen(send_buf[i]);
}
epfd = epoll_create(REQ_CLI_COUNT);
if (epfd <= 0) {
printf("failed to create epoll\r\n");
goto failed;
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = create_socket(ip, port, &ctx[i]);
if (ret == -1) {
printf("failed to create socket, index: %d\r\n", i);
goto failed;
}
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
events = EPOLLET | EPOLLIN | EPOLLOUT;
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
if (ret == -1) {
printf("failed to add sockfd to epoll, index: %d\r\n", i);
goto failed;
}
}
count = 0;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
if (ret == -1) {
if (errno != EINPROGRESS) {
printf("connect error, index: %d\r\n", ctx[i].index);
(void) del_event(epfd, ctx[i].sockfd);
close(ctx[i].sockfd);
ctx[i].sockfd = -1;
} else {
ctx[i].state = connecting;
count++;
}
continue;
}
ctx[i].state = connected;
count++;
}
printf("clients: %d\r\n", count);
while (count > 0) {
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
if (n == -1) {
if (errno != EINTR) {
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
break;
}
} else {
for (i = 0; i < n; i++) {
if (evs[i].events & EPOLLERR) {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("event error, index: %d\r\n", pctx->index);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
} else if (evs[i].events & EPOLLIN) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
if (nrecv == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
} else if (nrecv == 0) {
printf("peer closed connection, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
break;
}
pctx->nrecv += nrecv;
if (pctx->nrecv > 12) {
if (pctx->error == false && pctx->success == false) {
str = recv_buf[pctx->index] + 9;
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
pctx->error = true;
} else {
printf("response ok, index: %d\r\n", pctx->index);
pctx->success = true;
}
}
}
}
} else if (evs[i].events & EPOLLOUT) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
if (nsent == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to send, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
}
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
pctx->state = datasent;
events = EPOLLET | EPOLLIN;
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
break;
} else {
pctx->nsent += nsent;
}
}
} else {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
}
}
}
failed:
if (epfd > 0) {
close(epfd);
}
close_sockets(ctx, REQ_CLI_COUNT);
return 0;
}

View File

@ -0,0 +1,432 @@
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <inttypes.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <sys/time.h>
#include <errno.h>
#include <signal.h>
#define RECV_MAX_LINE 2048
#define ITEM_MAX_LINE 128
#define REQ_MAX_LINE 4096
#define REQ_CLI_COUNT 100
typedef enum
{
uninited,
connecting,
connected,
datasent
} conn_stat;
typedef enum
{
false,
true
} bool;
typedef struct
{
int sockfd;
int index;
conn_stat state;
size_t nsent;
size_t nrecv;
size_t nlen;
bool error;
bool success;
struct sockaddr_in serv_addr;
} socket_ctx;
int set_nonblocking(int sockfd)
{
int ret;
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
if (ret == -1) {
printf("failed to fcntl for %d\r\n", sockfd);
return ret;
}
return ret;
}
int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx)
{
int ret;
if (ip == NULL || port == 0 || pctx == NULL) {
printf("invalid parameter\r\n");
return -1;
}
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (pctx->sockfd == -1) {
printf("failed to create socket\r\n");
return -1;
}
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
pctx->serv_addr.sin_family = AF_INET;
pctx->serv_addr.sin_port = htons(port);
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
if (ret <= 0) {
printf("inet_pton error, ip: %s\r\n", ip);
return -1;
}
ret = set_nonblocking(pctx->sockfd);
if (ret == -1) {
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
return -1;
}
return pctx->sockfd;
}
void close_sockets(socket_ctx *pctx, int cnt)
{
int i;
if (pctx == NULL) {
return;
}
for (i = 0; i < cnt; i++) {
if (pctx[i].sockfd > 0) {
close(pctx[i].sockfd);
pctx[i].sockfd = -1;
}
}
}
int proc_pending_error(socket_ctx *ctx)
{
int ret;
int err;
socklen_t len;
if (ctx == NULL) {
return 0;
}
err = 0;
len = sizeof(int);
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
if (ret == -1) {
err = errno;
}
if (err) {
printf("failed to connect at index: %d\r\n", ctx->index);
close(ctx->sockfd);
ctx->sockfd = -1;
return -1;
}
return 0;
}
void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len)
{
char req_line[ITEM_MAX_LINE];
char req_host[ITEM_MAX_LINE];
char req_cont_type[ITEM_MAX_LINE];
char req_cont_len[ITEM_MAX_LINE];
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
if (ip == NULL || port == 0 ||
url == NULL || url[0] == '\0' ||
sql == NULL || sql[0] == '\0' ||
req_buf == NULL || len <= 0)
{
return;
}
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
}
int add_event(int epfd, int sockfd, uint32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
}
int mod_event(int epfd, int sockfd, uint32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
}
int del_event(int epfd, int sockfd)
{
struct epoll_event evs_op;
evs_op.events = 0;
evs_op.data.ptr = NULL;
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
}
int main()
{
int i;
int ret, n, nsent, nrecv;
int epfd;
uint32_t events;
char *str;
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
char *ip = "127.0.0.1";
char *url_prefix = "/rest/sql";
char url[ITEM_MAX_LINE];
uint16_t port = 6041;
struct epoll_event evs[REQ_CLI_COUNT];
char sql[REQ_MAX_LINE];
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
int count;
signal(SIGPIPE, SIG_IGN);
for (i = 0; i < REQ_CLI_COUNT; i++) {
ctx[i].sockfd = -1;
ctx[i].index = i;
ctx[i].state = uninited;
ctx[i].nsent = 0;
ctx[i].nrecv = 0;
ctx[i].error = false;
ctx[i].success = false;
memset(url, 0, ITEM_MAX_LINE);
memset(sql, 0, REQ_MAX_LINE);
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
memset(recv_buf[i], 0, RECV_MAX_LINE);
snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
snprintf(sql, REQ_MAX_LINE, "select count(*) from tb%d", i);
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
ctx[i].nlen = strlen(send_buf[i]);
}
epfd = epoll_create(REQ_CLI_COUNT);
if (epfd <= 0) {
printf("failed to create epoll\r\n");
goto failed;
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = create_socket(ip, port, &ctx[i]);
if (ret == -1) {
printf("failed to create socket, index: %d\r\n", i);
goto failed;
}
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
events = EPOLLET | EPOLLIN | EPOLLOUT;
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
if (ret == -1) {
printf("failed to add sockfd to epoll, index: %d\r\n", i);
goto failed;
}
}
count = 0;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
if (ret == -1) {
if (errno != EINPROGRESS) {
printf("connect error, index: %d\r\n", ctx[i].index);
(void) del_event(epfd, ctx[i].sockfd);
close(ctx[i].sockfd);
ctx[i].sockfd = -1;
} else {
ctx[i].state = connecting;
count++;
}
continue;
}
ctx[i].state = connected;
count++;
}
printf("clients: %d\r\n", count);
while (count > 0) {
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 2);
if (n == -1) {
if (errno != EINTR) {
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
break;
}
} else {
for (i = 0; i < n; i++) {
if (evs[i].events & EPOLLERR) {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("event error, index: %d\r\n", pctx->index);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
} else if (evs[i].events & EPOLLIN) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
if (nrecv == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
} else if (nrecv == 0) {
printf("peer closed connection, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
break;
}
pctx->nrecv += nrecv;
if (pctx->nrecv > 12) {
if (pctx->error == false && pctx->success == false) {
str = recv_buf[pctx->index] + 9;
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
pctx->error = true;
} else {
printf("response ok, index: %d\r\n", pctx->index);
pctx->success = true;
}
}
}
}
} else if (evs[i].events & EPOLLOUT) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
if (nsent == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to send, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
}
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
pctx->state = datasent;
events = EPOLLET | EPOLLIN;
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
break;
} else {
pctx->nsent += nsent;
}
}
} else {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
}
}
}
failed:
if (epfd > 0) {
close(epfd);
}
close_sockets(ctx, REQ_CLI_COUNT);
return 0;
}

View File

@ -0,0 +1,430 @@
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <errno.h>
#include <signal.h>
#define RECV_MAX_LINE 2048
#define ITEM_MAX_LINE 128
#define REQ_MAX_LINE 2048
#define REQ_CLI_COUNT 100
typedef enum
{
uninited,
connecting,
connected,
datasent
} conn_stat;
typedef enum
{
false,
true
} bool;
typedef unsigned short u16_t;
typedef unsigned int u32_t;
typedef struct
{
int sockfd;
int index;
conn_stat state;
size_t nsent;
size_t nrecv;
size_t nlen;
bool error;
bool success;
struct sockaddr_in serv_addr;
} socket_ctx;
int set_nonblocking(int sockfd)
{
int ret;
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
if (ret == -1) {
printf("failed to fcntl for %d\r\n", sockfd);
return ret;
}
return ret;
}
int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
{
int ret;
if (ip == NULL || port == 0 || pctx == NULL) {
printf("invalid parameter\r\n");
return -1;
}
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (pctx->sockfd == -1) {
printf("failed to create socket\r\n");
return -1;
}
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
pctx->serv_addr.sin_family = AF_INET;
pctx->serv_addr.sin_port = htons(port);
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
if (ret <= 0) {
printf("inet_pton error, ip: %s\r\n", ip);
return -1;
}
ret = set_nonblocking(pctx->sockfd);
if (ret == -1) {
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
return -1;
}
return pctx->sockfd;
}
void close_sockets(socket_ctx *pctx, int cnt)
{
int i;
if (pctx == NULL) {
return;
}
for (i = 0; i < cnt; i++) {
if (pctx[i].sockfd > 0) {
close(pctx[i].sockfd);
pctx[i].sockfd = -1;
}
}
}
int proc_pending_error(socket_ctx *ctx)
{
int ret;
int err;
socklen_t len;
if (ctx == NULL) {
return 0;
}
err = 0;
len = sizeof(int);
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
if (ret == -1) {
err = errno;
}
if (err) {
printf("failed to connect at index: %d\r\n", ctx->index);
close(ctx->sockfd);
ctx->sockfd = -1;
return -1;
}
return 0;
}
void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
{
char req_line[ITEM_MAX_LINE];
char req_host[ITEM_MAX_LINE];
char req_cont_type[ITEM_MAX_LINE];
char req_cont_len[ITEM_MAX_LINE];
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
if (ip == NULL || port == 0 ||
url == NULL || url[0] == '\0' ||
sql == NULL || sql[0] == '\0' ||
req_buf == NULL || len <= 0)
{
return;
}
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
}
int add_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
}
int mod_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
}
int del_event(int epfd, int sockfd)
{
struct epoll_event evs_op;
evs_op.events = 0;
evs_op.data.ptr = NULL;
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
}
int main()
{
int i;
int ret, n, nsent, nrecv;
int epfd;
u32_t events;
char *str;
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
char *ip = "127.0.0.1";
char *url = "/rest/sql";
u16_t port = 6041;
struct epoll_event evs[REQ_CLI_COUNT];
char sql[REQ_MAX_LINE];
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
int count;
signal(SIGPIPE, SIG_IGN);
for (i = 0; i < REQ_CLI_COUNT; i++) {
ctx[i].sockfd = -1;
ctx[i].index = i;
ctx[i].state = uninited;
ctx[i].nsent = 0;
ctx[i].nrecv = 0;
ctx[i].error = false;
ctx[i].success = false;
memset(sql, 0, REQ_MAX_LINE);
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
memset(recv_buf[i], 0, RECV_MAX_LINE);
snprintf(sql, REQ_MAX_LINE, "use db%d", i);
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
ctx[i].nlen = strlen(send_buf[i]);
}
epfd = epoll_create(REQ_CLI_COUNT);
if (epfd <= 0) {
printf("failed to create epoll\r\n");
goto failed;
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = create_socket(ip, port, &ctx[i]);
if (ret == -1) {
printf("failed to create socket, index: %d\r\n", i);
goto failed;
}
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
events = EPOLLET | EPOLLIN | EPOLLOUT;
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
if (ret == -1) {
printf("failed to add sockfd to epoll, index: %d\r\n", i);
goto failed;
}
}
count = 0;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
if (ret == -1) {
if (errno != EINPROGRESS) {
printf("connect error, index: %d\r\n", ctx[i].index);
(void) del_event(epfd, ctx[i].sockfd);
close(ctx[i].sockfd);
ctx[i].sockfd = -1;
} else {
ctx[i].state = connecting;
count++;
}
continue;
}
ctx[i].state = connected;
count++;
}
printf("clients: %d\r\n", count);
while (count > 0) {
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 2);
if (n == -1) {
if (errno != EINTR) {
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
break;
}
} else {
for (i = 0; i < n; i++) {
if (evs[i].events & EPOLLERR) {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("event error, index: %d\r\n", pctx->index);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
} else if (evs[i].events & EPOLLIN) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
if (nrecv == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
} else if (nrecv == 0) {
printf("peer closed connection, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
break;
}
pctx->nrecv += nrecv;
if (pctx->nrecv > 12) {
if (pctx->error == false && pctx->success == false) {
str = recv_buf[pctx->index] + 9;
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
pctx->error = true;
} else {
printf("response ok, index: %d\r\n", pctx->index);
pctx->success = true;
}
}
}
}
} else if (evs[i].events & EPOLLOUT) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
if (nsent == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to send, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
}
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
pctx->state = datasent;
events = EPOLLET | EPOLLIN;
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
break;
} else {
pctx->nsent += nsent;
}
}
} else {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
}
}
}
failed:
if (epfd > 0) {
close(epfd);
}
close_sockets(ctx, REQ_CLI_COUNT);
return 0;
}

View File

@ -102,6 +102,20 @@ class TDTestCase:
print("check2: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3))
def alter_table_255_times(self): # add case for TD-6207
for i in range(255):
tdLog.info("alter table st add column cb%d int"%i)
tdSql.execute("alter table st add column cb%d int"%i)
tdSql.execute("insert into t0 (ts,c1) values(now,1)")
tdSql.execute("reset query cache")
tdSql.query("select * from st")
tdSql.execute("create table mt(ts timestamp, i int)")
tdSql.execute("insert into mt values(now,11)")
tdSql.query("select * from mt")
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.query("describe db.st")
def run(self):
# Setup params
db = "db"
@ -131,12 +145,14 @@ class TDTestCase:
tdSql.checkData(0, i, self.rowNum * (size - i))
tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float)")
tdSql.execute("create table t0 using st tags(null)")
tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float,t2 int,t3 double)")
tdSql.execute("create table t0 using st tags(null,1,2.3)")
tdSql.execute("alter table t0 set tag t1=2.1")
tdSql.query("show tables")
tdSql.checkRows(2)
self.alter_table_255_times()
def stop(self):
tdSql.close()

View File

@ -175,12 +175,62 @@ class ConcurrentInquiry:
def con_group(self,tlist,col_list,tag_list):
rand_tag = random.randint(0,5)
rand_col = random.randint(0,1)
return 'group by '+','.join(random.sample(col_list,rand_col) + random.sample(tag_list,rand_tag))
if len(tag_list):
return 'group by '+','.join(random.sample(col_list,rand_col) + random.sample(tag_list,rand_tag))
else:
return 'group by '+','.join(random.sample(col_list,rand_col))
def con_order(self,tlist,col_list,tag_list):
return 'order by '+random.choice(tlist)
def gen_query_sql(self): #生成查询语句
def gen_subquery_sql(self):
subsql ,col_num = self.gen_query_sql(1)
if col_num == 0:
return 0
col_list=[]
tag_list=[]
for i in range(col_num):
col_list.append("taosd%d"%i)
tlist=col_list+['abc'] #增加不存在的域'abc'是否会引起新bug
con_rand=random.randint(0,len(condition_list))
func_rand=random.randint(0,len(func_list))
col_rand=random.randint(0,len(col_list))
t_rand=random.randint(0,len(tlist))
sql='select ' #select
random.shuffle(col_list)
random.shuffle(func_list)
sel_col_list=[]
col_rand=random.randint(0,len(col_list))
loop = 0
for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数
alias = ' as '+ 'sub%d ' % loop
loop += 1
pick_func = ''
if j == 'leastsquares':
pick_func=j+'('+i+',1,1)'
elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile':
pick_func=j+'('+i+',1)'
else:
pick_func=j+'('+i+')'
if bool(random.getrandbits(1)) :
pick_func+=alias
sel_col_list.append(pick_func)
if col_rand == 0:
sql = sql + '*'
else:
sql=sql+','.join(sel_col_list) #select col & func
sql = sql + ' from ('+ subsql +') '
con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill]
sel_con=random.sample(con_func,random.randint(0,len(con_func)))
sel_con_list=[]
for i in sel_con:
sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数
sql+=' '.join(sel_con_list) # condition
#print(sql)
return sql
def gen_query_sql(self,subquery=0): #生成查询语句
tbi=random.randint(0,len(self.subtb_list)+len(self.stb_list)) #随机决定查询哪张表
tbname=''
col_list=[]
@ -218,10 +268,10 @@ class ConcurrentInquiry:
pick_func=j+'('+i+',1)'
else:
pick_func=j+'('+i+')'
if bool(random.getrandbits(1)):
if bool(random.getrandbits(1)) | subquery :
pick_func+=alias
sel_col_list.append(pick_func)
if col_rand == 0:
if col_rand == 0 & subquery :
sql = sql + '*'
else:
sql=sql+','.join(sel_col_list) #select col & func
@ -238,7 +288,7 @@ class ConcurrentInquiry:
sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数
sql+=' '.join(sel_con_list) # condition
#print(sql)
return sql
return (sql,loop)
def gen_query_join(self): #生成join查询语句
tbname = []
@ -429,9 +479,12 @@ class ConcurrentInquiry:
try:
if self.random_pick():
sql=self.gen_query_sql()
if self.random_pick():
sql,temp=self.gen_query_sql()
else:
sql = self.gen_subquery_sql()
else:
sql=self.gen_query_join()
sql = self.gen_query_join()
print("sql is ",sql)
fo.write(sql+'\n')
start = time.time()
@ -496,9 +549,12 @@ class ConcurrentInquiry:
while loop:
try:
if self.random_pick():
sql=self.gen_query_sql()
if self.random_pick():
sql,temp=self.gen_query_sql()
else:
sql = self.gen_subquery_sql()
else:
sql=self.gen_query_join()
sql = self.gen_query_join()
print("sql is ",sql)
fo.write(sql+'\n')
start = time.time()

View File

@ -80,6 +80,7 @@ python3 ./test.py -f tag_lite/set.py
python3 ./test.py -f tag_lite/smallint.py
python3 ./test.py -f tag_lite/tinyint.py
python3 ./test.py -f tag_lite/timestamp.py
python3 ./test.py -f tag_lite/TestModifyTag.py
#python3 ./test.py -f dbmgmt/database-name-boundary.py
python3 test.py -f dbmgmt/nanoSecondCheck.py
@ -381,7 +382,9 @@ python3 ./test.py -f query/querySession.py
python3 test.py -f alter/alter_create_exception.py
python3 ./test.py -f insert/flushwhiledrop.py
python3 ./test.py -f insert/schemalessInsert.py
python3 ./test.py -f alter/alterColMultiTimes.py
python3 ./test.py -f alter/alterColMultiTimes.py
python3 ./test.py -f query/queryWildcardLength.py
python3 ./test.py -f query/queryTbnameUpperLower.py
#======================p4-end===============

View File

@ -26,18 +26,70 @@ class TDTestCase:
self.rowNum = 10
self.ts = 1537146000000
def run(self):
tdSql.prepare()
tdSql.execute("create table t(ts timestamp, k int)")
tdSql.execute("insert into t values('2021-1-1 1:1:1', 12);")
tdSql.query("select interp(*) from t where ts='2021-1-1 1:1:1'")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 12)
tdSql.execute("create table ap1 (ts timestamp, pav float)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.119', 2.90799)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.317', 3.07399)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.517', 0.58117)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.717', 0.16150)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.918', 1.47885)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:56.569', 1.76472)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.381', 2.13722)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.574', 4.10256)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.776', 3.55345)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.976', 1.46624)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.187', 0.17943)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.372', 2.04101)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.573', 3.20924)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.768', 1.71807)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.964', 4.60900)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.155', 4.33907)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.359', 0.76940)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.553', 0.06458)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.742', 4.59857)")
tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.938', 1.55081)")
tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV)")
tdSql.checkRows(0)
tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT)")
tdSql.checkRows(0)
tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR)")
tdSql.checkRows(0)
tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)")
tdSql.checkRows(6)
tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT)")
tdSql.checkRows(6)
tdSql.checkData(0,1,2.90799)
tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV)")
tdSql.checkRows(7)
tdSql.checkData(1,1,1.47885)
tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)")
tdSql.checkRows(7)
# check desc order
tdSql.error("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV) order by ts desc")
tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT) order by ts desc")
tdSql.checkRows(0)
tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR) order by ts desc")
tdSql.checkRows(0)
tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc")
tdSql.checkRows(6)
tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT) order by ts desc")
tdSql.checkRows(6)
tdSql.checkData(0,1,4.60900)
tdSql.error("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV) order by ts desc")
tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc")
tdSql.checkRows(7)
# check exception
tdSql.error("select interp(*) from ap1")
tdSql.error("select interp(*) from ap1 FILL(NEXT)")
tdSql.error("select interp(*) from ap1 ts >= '2021-07-25 02:19:54' FILL(NEXT)")
tdSql.error("select interp(*) from ap1 ts <= '2021-07-25 02:19:54' FILL(NEXT)")
tdSql.error("select interp(*) from ap1 where ts >'2021-07-25 02:19:59.938' and ts < now interval(1s) fill(next)")
tdSql.error("select interp(*) from t where ts >'2021-1-1 1:1:1' and ts < now interval(1s) fill(next)")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -13,6 +13,8 @@
import sys
import subprocess
import random
import math
from util.log import *
from util.cases import *
@ -56,7 +58,7 @@ class TDTestCase:
def td3690(self):
tdLog.printNoPrefix("==========TD-3690==========")
tdSql.query("show variables")
tdSql.checkData(51, 1, 864000)
tdSql.checkData(53, 1, 864000)
def td4082(self):
tdLog.printNoPrefix("==========TD-4082==========")
@ -106,6 +108,9 @@ class TDTestCase:
tdSql.execute("drop database if exists db1")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("create database if not exists db1 keep 3650")
tdSql.execute("create database if not exists new keep 3650")
tdSql.execute("create database if not exists private keep 3650")
tdSql.execute("create database if not exists db2 keep 3650")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)")
@ -122,6 +127,14 @@ class TDTestCase:
# p1 不进入指定数据库
tdSql.query("show create database db")
tdSql.checkRows(1)
tdSql.query("show create database db1")
tdSql.checkRows(1)
tdSql.query("show create database db2")
tdSql.checkRows(1)
tdSql.query("show create database new")
tdSql.checkRows(1)
tdSql.query("show create database private")
tdSql.checkRows(1)
tdSql.error("show create database ")
tdSql.error("show create databases db ")
tdSql.error("show create database db.stb1")
@ -255,7 +268,7 @@ class TDTestCase:
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db")
tdSql.query("show variables")
tdSql.checkData(36, 1, 3650)
tdSql.checkData(38, 1, 3650)
tdSql.query("show databases")
tdSql.checkData(0,7,"3650,3650,3650")
@ -283,7 +296,7 @@ class TDTestCase:
tdSql.query("show databases")
tdSql.checkData(0, 7, "3650,3650,3650")
tdSql.query("show variables")
tdSql.checkData(36, 1, 3650)
tdSql.checkData(38, 1, 3650)
tdSql.execute("alter database db1 keep 365")
tdSql.execute("drop database if exists db1")
@ -340,17 +353,552 @@ class TDTestCase:
pass
def td4889(self):
tdLog.printNoPrefix("==========TD-4889==========")
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("use db")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
for i in range(1000):
tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
for j in range(100):
tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})")
tdSql.query("show vgroups")
index = tdSql.getData(0,0)
tdSql.checkData(0, 6, 0)
tdSql.execute(f"compact vnodes in({index})")
for i in range(3):
tdSql.query("show vgroups")
if tdSql.getData(0, 6) == 1:
tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1")
break
if i == 3:
tdLog.exit("compacting not occured")
time.sleep(0.5)
pass
def td5168insert(self):
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("use db")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)")
tdSql.execute("create table db.t1 using db.stb1 tags(1)")
for i in range(5):
c1 = 1001.11 + i*0.1
c2 = 1001.11 + i*0.1 + 1*0.01
c3 = 1001.11 + i*0.1 + 2*0.01
c4 = 1001.11 + i*0.1 + 3*0.01
tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})")
# tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)")
# tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)")
# tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)")
# tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)")
# tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)")
# for i in range(1000000):
for i in range(1000000):
random1 = random.uniform(1000,1001)
random2 = random.uniform(1000,1001)
random3 = random.uniform(1000,1001)
random4 = random.uniform(1000,1001)
tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})")
pass
def td5168(self):
tdLog.printNoPrefix("==========TD-5168==========")
# 插入小范围内的随机数
tdLog.printNoPrefix("=====step0: 默认情况下插入数据========")
self.td5168insert()
# 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配
for i in range(5):
tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
# c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
for j in range(4):
locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
# tdSql.query("select * from db.t1 limit 100,1")
# f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
#
# tdSql.query("select * from db.t1 limit 1000,1")
# f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
#
# tdSql.query("select * from db.t1 limit 10000,1")
# f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
#
# tdSql.query("select * from db.t1 limit 100000,1")
# f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
#
# tdSql.query("select * from db.t1 limit 1000000,1")
# f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
# 关闭服务并获取未开启压缩情况下的数据容量
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
tdDnodes.stop(index)
cfgdir = self.getCfgDir()
cfgfile = self.getCfgFile()
lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'"
data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'"
dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
tdLog.printNoPrefix(f"close the lossyColumnsdata size is: {dsize_init};the lossyColumns line is: {lossy_args}")
###################################################
float_lossy = "float"
double_lossy = "double"
float_double_lossy = "float|double"
no_loosy = ""
double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}"
_ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8")
lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} "
lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} "
lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} "
lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} "
###################################################
# 开启有损压缩参数float并启动服务插入数据
tdLog.printNoPrefix("=====step1: lossyColumns设置为float========")
lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8")
tdDnodes.start(index)
self.td5168insert()
# 查询前面所述5个时间数据并与基准数值进行比较
for i in range(5):
tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
# c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
for j in range(4):
# locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
# print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
# 关闭服务并获取压缩参数为float情况下的数据容量
tdDnodes.stop(index)
dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
tdLog.printNoPrefix(f"open the lossyColumns data size is{dsize_float};the lossyColumns line is: {lossy_args}")
# 修改有损压缩参数double并启动服务
tdLog.printNoPrefix("=====step2: lossyColumns设置为double========")
lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8")
tdDnodes.start(index)
self.td5168insert()
# 查询前面所述5个时间数据并与基准数值进行比较
for i in range(5):
tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
for j in range(4):
tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
# 关闭服务并获取压缩参数为double情况下的数据容量
tdDnodes.stop(index)
dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
tdLog.printNoPrefix(f"open the lossyColumns, data size is{dsize_double};the lossyColumns line is: {lossy_args}")
# 修改有损压缩,参数 float&&double ,并启动服务
tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========")
lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8")
tdDnodes.start(index)
self.td5168insert()
# 查询前面所述5个时间数据并与基准数值进行比较
for i in range(5):
tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
for j in range(4):
tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
# 关闭服务并获取压缩参数为 float&&double 情况下的数据容量
tdDnodes.stop(index)
dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
tdLog.printNoPrefix(f"open the lossyColumns data size is{dsize_float_double};the lossyColumns line is: {lossy_args}")
if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) :
tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
tdLog.exit("压缩未生效")
else:
tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
tdLog.printNoPrefix("压缩生效")
pass
def td5433(self):
tdLog.printNoPrefix("==========TD-5433==========")
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("use db")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)")
tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))")
numtab=2000000
for i in range(numtab):
sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})"
tdSql.execute(sql)
tdSql.execute(f"insert into db.t{i} values (now-10d, {i})")
tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})")
tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})")
tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')")
tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')")
tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')")
tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')")
tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')")
tdSql.query("select distinct t1 from stb1 where t1 != '150'")
tdSql.checkRows(numtab-1)
tdSql.query("select distinct t1 from stb1 where t1 != 150")
tdSql.checkRows(numtab-1)
tdSql.query("select distinct t1 from stb1 where t1 = 150")
tdSql.checkRows(1)
tdSql.query("select distinct t1 from stb1 where t1 = '150'")
tdSql.checkRows(1)
tdSql.query("select distinct t1 from stb1")
tdSql.checkRows(numtab)
tdSql.query("select distinct t0 from stb1 where t0 != '2'")
tdSql.checkRows(127)
tdSql.query("select distinct t0 from stb1 where t0 != 2")
tdSql.checkRows(127)
tdSql.query("select distinct t0 from stb1 where t0 = 2")
tdSql.checkRows(1)
tdSql.query("select distinct t0 from stb1 where t0 = '2'")
tdSql.checkRows(1)
tdSql.query("select distinct t0 from stb1")
tdSql.checkRows(128)
tdSql.query("select distinct t1 from stb2 where t1 != '200'")
tdSql.checkRows(4)
tdSql.query("select distinct t1 from stb2 where t1 != 200")
tdSql.checkRows(4)
tdSql.query("select distinct t1 from stb2 where t1 = 200")
tdSql.checkRows(1)
tdSql.query("select distinct t1 from stb2 where t1 = '200'")
tdSql.checkRows(1)
tdSql.query("select distinct t1 from stb2")
tdSql.checkRows(5)
tdSql.query("select distinct t0 from stb2 where t0 != '2'")
tdSql.checkRows(4)
tdSql.query("select distinct t0 from stb2 where t0 != 2")
tdSql.checkRows(4)
tdSql.query("select distinct t0 from stb2 where t0 = 2")
tdSql.checkRows(1)
tdSql.query("select distinct t0 from stb2 where t0 = '2'")
tdSql.checkRows(1)
tdSql.query("select distinct t0 from stb2")
tdSql.checkRows(5)
pass
def td5798(self):
tdLog.printNoPrefix("==========TD-5798 + TD-5810==========")
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("use db")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)")
tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)")
maxRemainderNum=7
tbnum=101
for i in range(tbnum-1):
sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})"
tdSql.execute(sql)
tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})")
tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})")
tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})")
tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)")
tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})")
tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')")
tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')")
tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')")
tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)")
tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)")
tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)")
tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)")
tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)")
tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)")
tdSql.execute(f"insert into db.t100num (ts )values (now-7d)")
tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)")
tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)")
tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)")
tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)")
#========== TD-5810 suport distinct multi-data-coloumn ==========
tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}")
tdSql.checkRows(tbnum)
tdSql.query(f"select distinct c2 from stb1")
tdSql.checkRows(4)
tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}")
tdSql.checkRows(tbnum*3)
tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}")
tdSql.checkRows(tbnum)
tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3")
tdSql.checkRows(3)
tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}")
tdSql.checkRows(2)
tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}")
tdSql.checkRows(1)
tdSql.query(f"select distinct c2 from t1")
tdSql.checkRows(4)
tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}")
tdSql.checkRows(3)
tdSql.query(f"select distinct c1,c1 from t1 ")
tdSql.checkRows(2)
tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}")
tdSql.checkRows(1)
tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3")
tdSql.checkRows(3)
tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2")
tdSql.checkRows(1)
tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ")
tdSql.checkRows(3)
tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2")
tdSql.checkRows(2)
tdSql.error("select distinct c5 from stb1")
tdSql.error("select distinct c5 from t1")
tdSql.error("select distinct c1 from db.*")
tdSql.error("select c2, distinct c1 from stb1")
tdSql.error("select c2, distinct c1 from t1")
tdSql.error("select distinct c2 from ")
tdSql.error("distinct c2 from stb1")
tdSql.error("distinct c2 from t1")
tdSql.error("select distinct c1, c2, c3 from stb1")
tdSql.error("select distinct c1, c2, c3 from t1")
tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1")
tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1")
tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1")
tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}")
tdSql.checkRows(tbnum*3)
tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}")
tdSql.checkRows(3)
tdSql.query("select distinct c1, c2 from stb1 order by ts")
tdSql.checkRows(tbnum*3+1)
tdSql.query("select distinct c1, c2 from t1 order by ts")
tdSql.checkRows(4)
tdSql.error("select distinct c1, ts from stb1 group by c2")
tdSql.error("select distinct c1, ts from t1 group by c2")
tdSql.error("select distinct c1, max(c2) from stb1 ")
tdSql.error("select distinct c1, max(c2) from t1 ")
tdSql.error("select max(c2), distinct c1 from stb1 ")
tdSql.error("select max(c2), distinct c1 from t1 ")
tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0")
tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0")
tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ")
tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ")
tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)")
tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)")
tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)")
tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1")
tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1")
tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ")
tdSql.checkRows(6)
tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)")
tdSql.checkRows(15)
tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)")
tdSql.checkRows(3)
tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ")
tdSql.checkRows(3)
tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ")
tdSql.checkRows(3)
tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ")
tdSql.checkRows(4)
tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4")
tdSql.checkRows(3)
tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3")
tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)")
# tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)")
tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)")
# tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)")
# tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)")
tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )")
tdSql.checkRows(1)
tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )")
tdSql.checkRows(1)
tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ")
# tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)")
# tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)")
#========== TD-5798 suport distinct multi-tags-coloumn ==========
tdSql.query("select distinct t1 from stb1")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t0, t1 from stb1")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t1, t0 from stb1")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t1, t2 from stb1")
tdSql.checkRows(maxRemainderNum*2+1)
tdSql.query("select distinct t0, t1, t2 from stb1")
tdSql.checkRows(maxRemainderNum*2+1)
tdSql.query("select distinct t0 t1, t1 t2 from stb1")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t0, t0, t0 from stb1")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t0, t1 from t1")
tdSql.checkRows(1)
tdSql.query("select distinct t0, t1 from t100num")
tdSql.checkRows(1)
tdSql.query("select distinct t3 from stb2")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t2, t3 from stb2")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t3, t2 from stb2")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t4, t2 from stb2")
tdSql.checkRows(maxRemainderNum*3+1)
tdSql.query("select distinct t2, t3, t4 from stb2")
tdSql.checkRows(maxRemainderNum*3+1)
tdSql.query("select distinct t2 t1, t3 t2 from stb2")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t3, t3, t3 from stb2")
tdSql.checkRows(maxRemainderNum+1)
tdSql.query("select distinct t2, t3 from t01")
tdSql.checkRows(1)
tdSql.query("select distinct t3, t4 from t0100num")
tdSql.checkRows(1)
########## should be error #########
tdSql.error("select distinct from stb1")
tdSql.error("select distinct t3 from stb1")
tdSql.error("select distinct t1 from db.*")
tdSql.error("select distinct t2 from ")
tdSql.error("distinct t2 from stb1")
tdSql.error("select distinct stb1")
tdSql.error("select distinct t0, t1, t2, t3 from stb1")
tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1")
tdSql.error("select dist t0 from stb1")
tdSql.error("select distinct stb2.t2, stb2.t3 from stb1")
tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1")
tdSql.error("select distinct t0, t1 from t1 where t0 < 7")
########## add where condition ##########
tdSql.query("select distinct t0, t1 from stb1 where t1 > 3")
tdSql.checkRows(3)
tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2")
tdSql.checkRows(2)
tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2")
tdSql.checkRows(1)
tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2")
tdSql.checkRows(3)
tdSql.error("select distinct t0, t1 from stb1 where c1 > 2")
tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5")
tdSql.checkRows(1)
tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4")
tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2")
tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0")
tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ")
tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)")
tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2")
tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2")
tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)")
tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)")
tdSql.checkRows(5)
tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ")
tdSql.checkRows(4)
tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ")
tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
tdSql.checkRows(1)
tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3")
tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)")
tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)")
tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
tdSql.checkRows(1)
tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3")
tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3")
tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ")
pass
def td5935(self):
tdLog.printNoPrefix("==========TD-5935==========")
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("use db")
tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)")
nowtime=int(round((time.time()*1000)))
for i in range(100):
sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})"
tdSql.execute(sql)
for j in range(1000):
tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})")
tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ")
########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ##########
stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10"
tdSql.query(stddevAndIntervalSql)
tdSql.checkRows(10)
########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ##########
fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10"
tdSql.query(fillsql)
fillResult=False
if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None):
fillResult=True
if fillResult:
tdLog.success(f"sql is :{fillsql}, fill(next) is correct")
else:
tdLog.exit("fill(next) is wrong")
pass
def run(self):
# master branch
# self.td3690()
# self.td4082()
# self.td4288()
self.td4724()
# self.td4724()
self.td5798()
# self.td5935()
# develop branch
# self.td4097()
# self.td4889()
# self.td5168()
# self.td5433()
def stop(self):
tdSql.close()

View File

@ -28,7 +28,7 @@ class insertFromCSVPerformace:
self.tbName = tbName
self.branchName = branchName
self.type = buildType
self.ts = 1500074556514
self.ts = 1500000000000
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
@ -46,13 +46,20 @@ class insertFromCSVPerformace:
config = self.config)
def writeCSV(self):
with open('test3.csv','w', encoding='utf-8', newline='') as csvFile:
tsset = set()
rows = 0
with open('test4.csv','w', encoding='utf-8', newline='') as csvFile:
writer = csv.writer(csvFile, dialect='excel')
for i in range(1000000):
newTimestamp = self.ts + random.randint(10000000, 10000000000) + random.randint(1000, 10000000) + random.randint(1, 1000)
d = datetime.datetime.fromtimestamp(newTimestamp / 1000)
dt = str(d.strftime("%Y-%m-%d %H:%M:%S.%f"))
writer.writerow(["'%s'" % dt, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)])
while True:
newTimestamp = self.ts + random.randint(1, 10) * 10000000000 + random.randint(1, 10) * 1000000000 + random.randint(1, 10) * 100000000 + random.randint(1, 10) * 10000000 + random.randint(1, 10) * 1000000 + random.randint(1, 10) * 100000 + random.randint(1, 10) * 10000 + random.randint(1, 10) * 1000 + random.randint(1, 10) * 100 + random.randint(1, 10) * 10 + random.randint(1, 10)
if newTimestamp not in tsset:
tsset.add(newTimestamp)
d = datetime.datetime.fromtimestamp(newTimestamp / 1000)
dt = str(d.strftime("%Y-%m-%d %H:%M:%S.%f"))
writer.writerow(["'%s'" % dt, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)])
rows += 1
if rows == 2000000:
break
def removCSVHeader(self):
data = pd.read_csv("ordered.csv")
@ -71,7 +78,9 @@ class insertFromCSVPerformace:
cursor.execute("create table if not exists t1(ts timestamp, c1 int, c2 float, c3 int, c4 int)")
startTime = time.time()
cursor.execute("insert into t1 file 'outoforder.csv'")
totalTime += time.time() - startTime
totalTime += time.time() - startTime
time.sleep(1)
out_of_order_time = (float) (totalTime / 10)
print("Out of Order - Insert time: %f" % out_of_order_time)
@ -81,7 +90,8 @@ class insertFromCSVPerformace:
cursor.execute("create table if not exists t2(ts timestamp, c1 int, c2 float, c3 int, c4 int)")
startTime = time.time()
cursor.execute("insert into t2 file 'ordered.csv'")
totalTime += time.time() - startTime
totalTime += time.time() - startTime
time.sleep(1)
in_order_time = (float) (totalTime / 10)
print("In order - Insert time: %f" % in_order_time)

View File

@ -29,7 +29,6 @@ class TDTestCase:
self.tables = 10
self.rowsPerTable = 100
def run(self):
# tdSql.execute("drop database db ")
tdSql.prepare()

View File

@ -65,6 +65,10 @@ class TDTestCase:
# TD-2208
tdSql.error("select diff(tagtype),top(tagtype,1) from dev_001")
# TD-6006
tdSql.error("select * from dev_001 where 'name' is not null")
tdSql.error("select * from dev_001 where \"name\" = 'first'")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -17,6 +17,7 @@ import os
import taos
import time
import argparse
import json
class taosdemoQueryPerformace:
@ -48,7 +49,7 @@ class taosdemoQueryPerformace:
cursor2 = self.conn2.cursor()
cursor2.execute("create database if not exists %s" % self.dbName)
cursor2.execute("use %s" % self.dbName)
cursor2.execute("create table if not exists %s(ts timestamp, query_time float, commit_id binary(50), branch binary(50), type binary(20)) tags(query_id int, query_sql binary(300))" % self.stbName)
cursor2.execute("create table if not exists %s(ts timestamp, query_time_avg float, query_time_max float, query_time_min float, commit_id binary(50), branch binary(50), type binary(20)) tags(query_id int, query_sql binary(300))" % self.stbName)
sql = "select count(*) from test.meters"
tableid = 1
@ -74,7 +75,7 @@ class taosdemoQueryPerformace:
tableid = 6
cursor2.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
sql = "select * from meters"
sql = "select * from meters limit 10000"
tableid = 7
cursor2.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
@ -87,37 +88,96 @@ class taosdemoQueryPerformace:
cursor2.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
cursor2.close()
def generateQueryJson(self):
sqls = []
cursor2 = self.conn2.cursor()
cursor2.execute("select query_id, query_sql from %s.%s" % (self.dbName, self.stbName))
i = 0
for data in cursor2:
sql = {
"sql": data[1],
"result_mode": "onlyformat",
"result_file": "./query_sql_res%d.txt" % i
}
sqls.append(sql)
i += 1
query_data = {
"filetype": "query",
"cfgdir": "/etc/perf",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "test",
"specified_table_query": {
"query_times": 100,
"concurrent": 1,
"sqls": sqls
}
}
query_json_file = f"/tmp/query.json"
with open(query_json_file, 'w') as f:
json.dump(query_data, f)
return query_json_file
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosdemo" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def getCMDOutput(self, cmd):
cmd = os.popen(cmd)
output = cmd.read()
cmd.close()
return output
def query(self):
cursor = self.conn.cursor()
buildPath = self.getBuildPath()
if (buildPath == ""):
print("taosdemo not found!")
sys.exit(1)
binPath = buildPath + "/build/bin/"
os.system(
"%sperfMonitor -f %s > query_res.txt" %
(binPath, self.generateQueryJson()))
cursor = self.conn2.cursor()
print("==================== query performance ====================")
cursor.execute("use %s" % self.dbName)
cursor.execute("select tbname, query_id, query_sql from %s" % self.stbName)
cursor.execute("select tbname, query_sql from %s" % self.stbName)
i = 0
for data in cursor:
table_name = data[0]
query_id = data[1]
sql = data[2]
totalTime = 0
cursor2 = self.conn.cursor()
cursor2.execute("use test")
for i in range(100):
if(self.clearCache == True):
# root permission is required
os.system("echo 3 > /proc/sys/vm/drop_caches")
startTime = time.time()
cursor2.execute(sql)
totalTime += time.time() - startTime
cursor2.close()
print("query time for: %s %f seconds" % (sql, totalTime / 100))
cursor3 = self.conn2.cursor()
cursor3.execute("insert into %s.%s values(now, %f, '%s', '%s', '%s')" % (self.dbName, table_name, totalTime / 100, self.commitID, self.branch, self.type))
sql = data[1]
cursor3.close()
self.avgDelay = self.getCMDOutput("grep 'avgDelay' query_res.txt | awk 'NR==%d{print $2}'" % (i + 1))
self.maxDelay = self.getCMDOutput("grep 'avgDelay' query_res.txt | awk 'NR==%d{print $5}'" % (i + 1))
self.minDelay = self.getCMDOutput("grep 'avgDelay' query_res.txt | awk 'NR==%d{print $8}'" % (i + 1))
i += 1
print("query time for: %s %f seconds" % (sql, float(self.avgDelay)))
c = self.conn2.cursor()
c.execute("insert into %s.%s values(now, %f, %f, %f, '%s', '%s', '%s')" % (self.dbName, table_name, float(self.avgDelay), float(self.maxDelay), float(self.minDelay), self.commitID, self.branch, self.type))
c.close()
cursor.close()
if __name__ == '__main__':
@ -174,4 +234,4 @@ if __name__ == '__main__':
args = parser.parse_args()
perftest = taosdemoQueryPerformace(args.remove_cache, args.commit_id, args.database_name, args.stable_name, args.table_perfix, args.git_branch, args.build_type)
perftest.createPerfTables()
perftest.query()
perftest.query()

View File

@ -0,0 +1,78 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.common import tdCom
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def checkStbWhereIn(self):
'''
where in ---> upper lower mixed
'''
tdCom.cleanTb()
table_name = tdCom.getLongName(8, "letters_mixed")
table_name_sub = f'{table_name}_sub'
tb_name_lower = table_name_sub.lower()
tb_name_upper = table_name_sub.upper()
## create stb and tb
tdSql.execute(f'CREATE TABLE {table_name} (ts timestamp, id int, bi1 binary(20)) tags (si1 binary(20))')
tdSql.execute(f'create table {table_name_sub}1 using {table_name} tags ("{table_name_sub}1")')
tdSql.execute(f'create table {tb_name_lower}2 using {table_name} tags ("{tb_name_lower}2")')
tdSql.execute(f'create table {tb_name_upper}3 using {table_name} tags ("{tb_name_upper}3")')
## insert values
tdSql.execute(f'insert into {table_name_sub}1 values (now-1s, 1, "{table_name_sub}1")')
tdSql.execute(f'insert into {tb_name_lower}2 values (now-2s, 2, "{tb_name_lower}21")')
tdSql.execute(f'insert into {tb_name_lower}2 values (now-3s, 3, "{tb_name_lower}22")')
tdSql.execute(f'insert into {tb_name_upper}3 values (now-4s, 4, "{tb_name_upper}31")')
tdSql.execute(f'insert into {tb_name_upper}3 values (now-5s, 5, "{tb_name_upper}32")')
tdSql.execute(f'insert into {tb_name_upper}3 values (now-6s, 6, "{tb_name_upper}33")')
## query where tbname in single
tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub}1")')
tdSql.checkRows(1)
tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub.upper()}1")')
tdSql.checkRows(1)
tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub.lower()}1")')
tdSql.checkRows(1)
tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_lower}2")')
tdSql.checkRows(2)
tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_lower.upper()}2")')
tdSql.checkRows(2)
tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_upper}3")')
tdSql.checkRows(3)
tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_upper.lower()}3")')
tdSql.checkRows(3)
## query where tbname in multi
tdSql.query(f'select * from {table_name} where id=5 and tbname in ("{table_name_sub}1", "{tb_name_lower.upper()}2", "{tb_name_upper.lower()}3")')
tdSql.checkRows(1)
tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub}1", "{tb_name_lower.upper()}2", "{tb_name_upper.lower()}3")')
tdSql.checkRows(6)
def run(self):
tdSql.prepare()
self.checkStbWhereIn()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -53,6 +53,9 @@ class TDTestCase:
"select * from cars where id=0 and tbname in ('carzero', 'cartwo')")
tdSql.checkRows(1)
tdSql.query("select * from cars where tbname in ('carZero', 'CARONE')")
tdSql.checkRows(2)
"""
tdSql.query("select * from cars where tbname like 'car%'")
tdSql.checkRows(2)

View File

@ -47,6 +47,7 @@ class TDTestCase:
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# insert data from a special timestamp
# check stable stb0
@ -89,6 +90,7 @@ class TDTestCase:
os.system(
"%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " %
binPath)
tdSql.execute("use nsdb2")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)

View File

@ -49,24 +49,18 @@ class taosdemoPerformace:
def generateJson(self):
db = {
"name": "%s" % self.insertDB,
"drop": "yes",
"replica": 1
"drop": "yes"
}
stb = {
"name": "meters",
"child_table_exists": "no",
"childtable_count": self.numOfTables,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"data_source": "rand",
"batch_create_tbl_num": 10,
"insert_mode": "taosc",
"insert_mode": "rand",
"insert_rows": self.numOfRows,
"interlace_rows": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"batch_rows": 1000000,
"max_sql_len": 1048576,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
@ -100,11 +94,8 @@ class taosdemoPerformace:
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"num_of_records_per_req": 30000,
"databases": [db]
}
@ -145,7 +136,7 @@ class taosdemoPerformace:
binPath = buildPath + "/build/bin/"
os.system(
"%staosdemo -f %s > /dev/null 2>&1" %
"%sperfMonitor -f %s > /dev/null 2>&1" %
(binPath, self.generateJson()))
self.createTableTime = self.getCMDOutput(
"grep 'Spent' insert_res.txt | awk 'NR==1{print $2}'")

View File

@ -60,7 +60,7 @@ class TDSimClient:
self.cfgDict.update({option: value})
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
@ -320,7 +320,7 @@ class TDDnode:
tdLog.exit(cmd)
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)

View File

@ -58,7 +58,7 @@ class TDSimClient:
self.cfgDict.update({option: value})
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
@ -318,7 +318,7 @@ class TDDnode:
tdLog.exit(cmd)
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)

View File

@ -58,7 +58,7 @@ class TDSimClient:
self.cfgDict.update({option: value})
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
@ -318,7 +318,7 @@ class TDDnode:
tdLog.exit(cmd)
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)

View File

@ -14,15 +14,16 @@
import sys
import os
import os.path
import platform
import subprocess
from time import sleep
from util.log import *
class TDSimClient:
def __init__(self):
def __init__(self, path):
self.testCluster = False
self.path = path
self.cfgDict = {
"numOfLogLines": "100000000",
"numOfThreadsPerCore": "2.0",
@ -41,10 +42,7 @@ class TDSimClient:
"jnidebugFlag": "135",
"qdebugFlag": "135",
"telemetryReporting": "0",
}
def init(self, path):
self.__init__()
self.path = path
}
def getLogDir(self):
self.logDir = "%s/sim/psim/log" % (self.path)
@ -61,7 +59,7 @@ class TDSimClient:
self.cfgDict.update({option: value})
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
@ -246,7 +244,7 @@ class TDDnode:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
if (("taosd") in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
@ -404,7 +402,7 @@ class TDDnode:
tdLog.exit(cmd)
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
@ -480,8 +478,7 @@ class TDDnodes:
for i in range(len(self.dnodes)):
self.dnodes[i].init(self.path)
self.sim = TDSimClient()
self.sim.init(self.path)
self.sim = TDSimClient(self.path)
def setTestCluster(self, value):
self.testCluster = value

View File

@ -313,6 +313,12 @@ if $rows != 6 then
return -1
endi
print =============================> TD-6086
sql create stable td6086st(ts timestamp, d double) tags(t nchar(50));
sql create table td6086ct1 using td6086st tags("ct1");
sql create table td6086ct2 using td6086st tags("ct2");
sql SELECT LAST(d),t FROM td6086st WHERE tbname in ('td6086ct1', 'td6086ct2') and ts>="2019-07-30 00:00:00" and ts<="2021-08-31 00:00:00" interval(1800s) fill(prev) GROUP BY tbname;
print ==================> td-2624
sql create table tm2(ts timestamp, k int, b binary(12));
sql insert into tm2 values('2011-01-02 18:42:45.326', -1,'abc');
@ -1149,9 +1155,11 @@ endi
sql select derivative(test_column_alias_name, 1s, 0) from (select avg(k) test_column_alias_name from t1 interval(1s));
sql create table smeters (ts timestamp, current float, voltage int);
sql insert into smeters values ('2021-08-08 10:10:10', 10, 1);
sql insert into smeters values ('2021-08-08 10:10:12', 10, 2);
sql create table smeters (ts timestamp, current float, voltage int) tags (t1 int);
sql create table smeter1 using smeters tags (1);
sql insert into smeter1 values ('2021-08-08 10:10:10', 10, 2);
sql insert into smeter1 values ('2021-08-08 10:10:12', 10, 2);
sql insert into smeter1 values ('2021-08-08 10:10:14', 20, 1);
sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10 interval(1000a);
if $rows != 2 then
@ -1160,9 +1168,21 @@ endi
if $data00 != @21-08-08 10:10:10.000@ then
return -1
endi
if $data01 != 0.000000000 then
return -1
endi
if $data10 != @21-08-08 10:10:12.000@ then
return -1
endi
if $data11 != 0.000000000 then
return -1
endi
sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10;
if $rows != 1 then
return -1
endi
if $data00 != 0.000000000 then
return -1
endi

View File

@ -68,7 +68,6 @@ print ================== server restart completed
run general/parser/interp_test.sim
print ================= TD-5931
sql create stable st5931(ts timestamp, f int) tags(t int)
sql create table ct5931 using st5931 tags(1)
@ -76,6 +75,7 @@ sql create table nt5931(ts timestamp, f int)
sql select interp(*) from nt5931 where ts=now
sql select interp(*) from st5931 where ts=now
sql select interp(*) from ct5931 where ts=now
if $rows != 0 then
return -1
endi

View File

@ -930,8 +930,254 @@ if $data44 != @18-11-25 19:06:00.000@ then
endi
sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear);
if $rows != 8 then
return -1
endi
if $data00 != @18-09-17 20:35:00.000@ then
return -1
endi
if $data01 != NULL then
return -1
endi
if $data10 != @18-09-17 20:36:00.000@ then
return -1
endi
if $data11 != NULL then
return -1
endi
if $data20 != @18-09-17 20:37:00.000@ then
return -1
endi
if $data21 != NULL then
return -1
endi
if $data30 != @18-09-17 20:38:00.000@ then
return -1
endi
if $data31 != NULL then
return -1
endi
if $data40 != @18-09-17 20:39:00.000@ then
return -1
endi
if $data41 != NULL then
return -1
endi
if $data50 != @18-09-17 20:40:00.000@ then
return -1
endi
if $data51 != 0 then
return -1
endi
if $data60 != @18-09-17 20:41:00.000@ then
return -1
endi
if $data61 != NULL then
return -1
endi
if $data70 != @18-09-17 20:42:00.000@ then
return -1
endi
if $data71 != NULL then
return -1
endi
sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear) order by ts desc;
if $rows != 8 then
return -1
endi
if $data00 != @18-09-17 20:42:00.000@ then
return -1
endi
if $data01 != NULL then
return -1
endi
if $data10 != @18-09-17 20:41:00.000@ then
return -1
endi
if $data11 != NULL then
return -1
endi
if $data20 != @18-09-17 20:40:00.000@ then
return -1
endi
if $data21 != 0 then
return -1
endi
if $data30 != @18-09-17 20:39:00.000@ then
return -1
endi
if $data31 != NULL then
return -1
endi
if $data40 != @18-09-17 20:38:00.000@ then
return -1
endi
if $data41 != NULL then
return -1
endi
if $data50 != @18-09-17 20:37:00.000@ then
return -1
endi
if $data51 != NULL then
return -1
endi
if $data60 != @18-09-17 20:36:00.000@ then
return -1
endi
if $data61 != NULL then
return -1
endi
if $data70 != @18-09-17 20:35:00.000@ then
return -1
endi
if $data71 != NULL then
return -1
endi
sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(2m) fill(linear) order by ts;
if $rows != 9 then
return -1
endi
if $data00 != @18-09-17 20:34:00.000@ then
return -1
endi
if $data01 != NULL then
return -1
endi
if $data10 != @18-09-17 20:36:00.000@ then
return -1
endi
if $data11 != NULL then
return -1
endi
if $data20 != @18-09-17 20:38:00.000@ then
return -1
endi
if $data21 != NULL then
return -1
endi
if $data30 != @18-09-17 20:40:00.000@ then
return -1
endi
if $data31 != 0.00000 then
return -1
endi
if $data40 != @18-09-17 20:42:00.000@ then
return -1
endi
if $data41 != 0.20000 then
return -1
endi
if $data50 != @18-09-17 20:44:00.000@ then
return -1
endi
if $data51 != 0.40000 then
return -1
endi
if $data60 != @18-09-17 20:46:00.000@ then
return -1
endi
if $data61 != 0.60000 then
return -1
endi
if $data70 != @18-09-17 20:48:00.000@ then
return -1
endi
if $data71 != 0.80000 then
return -1
endi
if $data80 != @18-09-17 20:50:00.000@ then
return -1
endi
if $data81 != 1.00000 then
return -1
endi
sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts;
if $rows != 6 then
return -1
endi
if $data00 != @18-09-17 20:33:00.000@ then
return -1
endi
if $data01 != NULL then
return -1
endi
if $data10 != @18-09-17 20:36:00.000@ then
return -1
endi
if $data11 != NULL then
return -1
endi
if $data20 != @18-09-17 20:39:00.000@ then
return -1
endi
if $data21 != NULL then
return -1
endi
if $data30 != @18-09-17 20:42:00.000@ then
return -1
endi
if $data31 != 0.20000 then
return -1
endi
if $data40 != @18-09-17 20:45:00.000@ then
return -1
endi
if $data41 != 0.50000 then
return -1
endi
if $data50 != @18-09-17 20:48:00.000@ then
return -1
endi
if $data51 != 0.80000 then
return -1
endi
sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts desc;
if $rows != 6 then
return -1
endi
if $data00 != @18-09-17 20:48:00.000@ then
return -1
endi
if $data01 != 0.80000 then
return -1
endi
if $data10 != @18-09-17 20:45:00.000@ then
return -1
endi
if $data11 != 0.50000 then
return -1
endi
if $data20 != @18-09-17 20:42:00.000@ then
return -1
endi
if $data21 != 0.20000 then
return -1
endi
if $data30 != @18-09-17 20:39:00.000@ then
return -1
endi
if $data31 != NULL then
return -1
endi
if $data40 != @18-09-17 20:36:00.000@ then
return -1
endi
if $data41 != NULL then
return -1
endi
if $data50 != @18-09-17 20:33:00.000@ then
return -1
endi
if $data51 != NULL then
return -1
endi

View File

@ -75,4 +75,9 @@ sleep 100
run general/parser/limit_tb.sim
run general/parser/limit_stb.sim
print ========> TD-6017
sql use $db
sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 order by ts desc limit 3 offset 1)
sql select * from (select ts, top(c1, 5) from $stb where ts >= $ts0 order by ts desc limit 3 offset 1)
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -355,6 +355,10 @@ sql select top(c1, 1) from $tb where ts >= $ts0 and ts <= $tsu limit 5 offset 1
if $rows != 0 then
return -1
endi
print ========> TD-6017
sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1)
sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
print select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1
print $data00 $data01

View File

@ -101,6 +101,30 @@ if $data11 != 2 then
return -1
endi
## tbname in can accpet Upper case table name
sql select count(*) from $stb where tbname in ('ti_tb0', 'TI_tb1', 'TI_TB2') group by t1 order by t1
if $rows != 3 then
return -1
endi
if $data00 != 10 then
return -1
endi
if $data01 != 0 then
return -1
endi
if $data10 != 10 then
return -1
endi
if $data11 != 1 then
return -1
endi
if $data20 != 10 then
return -1
endi
if $data21 != 2 then
return -1
endi
# multiple tbname in is not allowed NOW
sql_error select count(*) from $stb where tbname in ('ti_tb1', 'ti_tb300') and tbname in ('ti_tb5', 'ti_tb1000') group by t1 order by t1 asc
#if $rows != 4 then

View File

@ -0,0 +1,128 @@
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <pthread.h>
#define MAXLINE 1024
typedef struct {
pthread_t pid;
int threadId;
int rows;
int tables;
} ThreadObj;
void post(char *ip,int port,char *page,char *msg) {
int sockfd,n;
char recvline[MAXLINE];
struct sockaddr_in servaddr;
char content[4096];
char content_page[50];
sprintf(content_page,"POST /%s HTTP/1.1\r\n",page);
char content_host[50];
sprintf(content_host,"HOST: %s:%d\r\n",ip,port);
char content_type[] = "Content-Type: text/plain\r\n";
char Auth[] = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
char content_len[50];
sprintf(content_len,"Content-Length: %ld\r\n\r\n",strlen(msg));
sprintf(content,"%s%s%s%s%s%s",content_page,content_host,content_type,Auth,content_len,msg);
if((sockfd = socket(AF_INET,SOCK_STREAM,0)) < 0) {
printf("socket error\n");
}
bzero(&servaddr,sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_port = htons(port);
if(inet_pton(AF_INET,ip,&servaddr.sin_addr) <= 0) {
printf("inet_pton error\n");
}
if(connect(sockfd,(struct sockaddr *)&servaddr,sizeof(servaddr)) < 0) {
printf("connect error\n");
}
write(sockfd,content,strlen(content));
printf("%s\n", content);
while((n = read(sockfd,recvline,MAXLINE)) > 0) {
recvline[n] = 0;
if(fputs(recvline,stdout) == EOF) {
printf("fputs error\n");
}
}
if(n < 0) {
printf("read error\n");
}
}
void singleThread() {
char ip[] = "127.0.0.1";
int port = 6041;
char page[] = "rest/sql";
char page1[] = "rest/sql/db1";
char page2[] = "rest/sql/db2";
char nonexit[] = "rest/sql/xxdb";
post(ip,port,page,"drop database if exists db1");
post(ip,port,page,"create database if not exists db1");
post(ip,port,page,"drop database if exists db2");
post(ip,port,page,"create database if not exists db2");
post(ip,port,page1,"create table t11 (ts timestamp, c1 int)");
post(ip,port,page2,"create table t21 (ts timestamp, c1 int)");
post(ip,port,page1,"insert into t11 values (now, 1)");
post(ip,port,page2,"insert into t21 values (now, 2)");
post(ip,port,nonexit,"create database if not exists db3");
}
void execute(void *params) {
char ip[] = "127.0.0.1";
int port = 6041;
char page[] = "rest/sql";
char *unique = calloc(1, 1024);
char *sql = calloc(1, 1024);
ThreadObj *pThread = (ThreadObj *)params;
printf("Thread %d started\n", pThread->threadId);
sprintf(unique, "rest/sql/db%d",pThread->threadId);
sprintf(sql, "drop database if exists db%d", pThread->threadId);
post(ip,port,page, sql);
sprintf(sql, "create database if not exists db%d", pThread->threadId);
post(ip,port,page, sql);
for (int i = 0; i < pThread->tables; i++) {
sprintf(sql, "create table t%d (ts timestamp, c1 int)", i);
post(ip,port,unique, sql);
}
for (int i = 0; i < pThread->rows; i++) {
sprintf(sql, "insert into t%d values (now + %ds, %d)", pThread->threadId, i, pThread->threadId);
post(ip,port,unique, sql);
}
free(unique);
free(sql);
return;
}
void multiThread() {
int numOfThreads = 100;
int numOfTables = 100;
int numOfRows = 1;
ThreadObj *threads = calloc((size_t)numOfThreads, sizeof(ThreadObj));
for (int i = 0; i < numOfThreads; i++) {
ThreadObj *pthread = threads + i;
pthread_attr_t thattr;
pthread->threadId = i + 1;
pthread->rows = numOfRows;
pthread->tables = numOfTables;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
pthread_create(&pthread->pid, &thattr, (void *(*)(void *))execute, pthread);
}
for (int i = 0; i < numOfThreads; i++) {
pthread_join(threads[i].pid, NULL);
}
free(threads);
}
int main() {
singleThread();
multiThread();
exit(0);
}

View File

@ -0,0 +1,2 @@
all:
gcc -g httpTest.c -o httpTest -lpthread

View File

@ -90,6 +90,14 @@ cd ../../../debug; make
./test.sh -f general/parser/function.sim
./test.sh -f unique/cluster/vgroup100.sim
./test.sh -f unique/http/admin.sim
./test.sh -f unique/http/opentsdb.sim
./test.sh -f unique/import/replica2.sim
./test.sh -f unique/import/replica3.sim
./test.sh -f general/alter/cached_schema_after_alter.sim
#======================b1-end===============
#======================b2-start===============