[td-255] merge develop

This commit is contained in:
Haojun Liao 2021-08-25 13:56:13 +08:00
commit fa72b565a9
136 changed files with 8098 additions and 3799 deletions

View File

@ -23,6 +23,7 @@ steps:
branch: branch:
- develop - develop
- master - master
- 2.0
--- ---
kind: pipeline kind: pipeline
name: test_arm64_bionic name: test_arm64_bionic
@ -150,6 +151,7 @@ steps:
branch: branch:
- develop - develop
- master - master
- 2.0
--- ---
kind: pipeline kind: pipeline
name: build_trusty name: build_trusty
@ -176,6 +178,7 @@ steps:
branch: branch:
- develop - develop
- master - master
- 2.0
--- ---
kind: pipeline kind: pipeline
name: build_xenial name: build_xenial
@ -201,7 +204,7 @@ steps:
branch: branch:
- develop - develop
- master - master
- 2.0
--- ---
kind: pipeline kind: pipeline
name: build_bionic name: build_bionic
@ -226,6 +229,7 @@ steps:
branch: branch:
- develop - develop
- master - master
- 2.0
--- ---
kind: pipeline kind: pipeline
name: build_centos7 name: build_centos7
@ -249,4 +253,4 @@ steps:
branch: branch:
- develop - develop
- master - master
- 2.0

1
.gitignore vendored
View File

@ -1,4 +1,5 @@
build/ build/
.ycm_extra_conf.py
.vscode/ .vscode/
.idea/ .idea/
cmake-build-debug/ cmake-build-debug/

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER}) SET(TD_VER_NUMBER ${VERNUMBER})
ELSE () ELSE ()
SET(TD_VER_NUMBER "2.1.6.0") SET(TD_VER_NUMBER "2.1.7.1")
ENDIF () ENDIF ()
IF (DEFINED VERCOMPATIBLE) IF (DEFINED VERCOMPATIBLE)

View File

@ -98,6 +98,7 @@ int ConvertString(char *buf, size_t nBytes, UINT cpFrom, UINT cpTo, LPCSTR lpDef
char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefaultChar) { char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefaultChar) {
int nBytes; int nBytes;
char *pBuf; char *pBuf;
char *pBuf1;
nBytes = 4 * ((int)lstrlen(string) + 1); /* Worst case for the size needed */ nBytes = 4 * ((int)lstrlen(string) + 1); /* Worst case for the size needed */
pBuf = (char *)malloc(nBytes); pBuf = (char *)malloc(nBytes);
if (!pBuf) { if (!pBuf) {
@ -110,8 +111,9 @@ char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefault
free(pBuf); free(pBuf);
return NULL; return NULL;
} }
pBuf = realloc(pBuf, nBytes+1); pBuf1 = realloc(pBuf, nBytes+1);
return pBuf; if(pBuf1 == NULL && pBuf != NULL) free(pBuf);
return pBuf1;
} }
int CountCharacters(const char *string, UINT cp) { int CountCharacters(const char *string, UINT cp) {

View File

@ -68,6 +68,7 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
int iString = FALSE; /* TRUE = string mode; FALSE = non-string mode */ int iString = FALSE; /* TRUE = string mode; FALSE = non-string mode */
int nBackslash = 0; int nBackslash = 0;
char **ppszArg; char **ppszArg;
char **ppszArg1;
int iArg = FALSE; /* TRUE = inside an argument; FALSE = between arguments */ int iArg = FALSE; /* TRUE = inside an argument; FALSE = between arguments */
ppszArg = (char **)malloc((argc+1)*sizeof(char *)); ppszArg = (char **)malloc((argc+1)*sizeof(char *));
@ -89,7 +90,10 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
if ((!iArg) && (c != ' ') && (c != '\t')) { /* Beginning of a new argument */ if ((!iArg) && (c != ' ') && (c != '\t')) { /* Beginning of a new argument */
iArg = TRUE; iArg = TRUE;
ppszArg[argc++] = pszCopy+j; ppszArg[argc++] = pszCopy+j;
ppszArg = (char **)realloc(ppszArg, (argc+1)*sizeof(char *)); ppszArg1 = (char **)realloc(ppszArg, (argc+1)*sizeof(char *));
if(ppszArg1 == NULL && ppszArg != NULL)
free(ppszArg);
ppszArg = ppszArg1;
if (!ppszArg) return -1; if (!ppszArg) return -1;
pszCopy[j] = c0 = '\0'; pszCopy[j] = c0 = '\0';
} }
@ -212,7 +216,7 @@ int _initU(void) {
fprintf(stderr, "Warning: Can't convert the argument line to UTF-8\n"); fprintf(stderr, "Warning: Can't convert the argument line to UTF-8\n");
_acmdln[0] = '\0'; _acmdln[0] = '\0';
} }
realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */ //realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */
/* Should not fail since we make it smaller */ /* Should not fail since we make it smaller */
/* Record the console code page, to allow converting the output accordingly */ /* Record the console code page, to allow converting the output accordingly */

View File

@ -196,6 +196,7 @@ not_compact_enough:
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
char *realpath(const char *path, char *outbuf) { char *realpath(const char *path, char *outbuf) {
char *pOutbuf = outbuf; char *pOutbuf = outbuf;
char *pOutbuf1 = NULL;
int iErr; int iErr;
const char *pc; const char *pc;
@ -242,8 +243,11 @@ realpath_failed:
return NULL; return NULL;
} }
if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1); if (!outbuf) {
return pOutbuf; pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
}
return pOutbuf1;
} }
#endif #endif
@ -517,6 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) {
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
char *realpathU(const char *path, char *outbuf) { char *realpathU(const char *path, char *outbuf) {
char *pOutbuf = outbuf; char *pOutbuf = outbuf;
char *pOutbuf1 = NULL;
char *pPath1 = NULL; char *pPath1 = NULL;
char *pPath2 = NULL; char *pPath2 = NULL;
int iErr; int iErr;
@ -590,10 +595,13 @@ realpathU_failed:
} }
DEBUG_LEAVE(("return 0x%p; // \"%s\"\n", pOutbuf, pOutbuf)); DEBUG_LEAVE(("return 0x%p; // \"%s\"\n", pOutbuf, pOutbuf));
if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1); if (!outbuf) {
pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
}
free(pPath1); free(pPath1);
free(pPath2); free(pPath2);
return pOutbuf; return pOutbuf1;
} }
#endif /* defined(_WIN32) */ #endif /* defined(_WIN32) */

View File

@ -2,18 +2,18 @@
## <a class="anchor" id="intro"></a>TDengine 简介 ## <a class="anchor" id="intro"></a>TDengine 简介
TDengine是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品它不依赖任何第三方软件也不是优化或包装了一个开源的数据库或流式计算产品而是在吸取众多传统关系型数据库、NoSQL数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品在时序空间大数据处理上有着自己独到的优势。 TDengine 是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品它不依赖任何第三方软件也不是优化或包装了一个开源的数据库或流式计算产品而是在吸取众多传统关系型数据库、NoSQL 数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品,在时序空间大数据处理上,有着自己独到的优势。
TDengine的模块之一是时序数据库。但除此之外为减少研发的复杂度、系统维护的难度TDengine还提供缓存、消息队列、订阅、流式计算等功能为物联网、工业互联网大数据的处理提供全栈的技术方案是一个高效易用的物联网大数据平台。与Hadoop等典型的大数据平台相比它具有如下鲜明的特点 TDengine 的模块之一是时序数据库。但除此之外为减少研发的复杂度、系统维护的难度TDengine 还提供缓存、消息队列、订阅、流式计算等功能,为物联网、工业互联网大数据的处理提供全栈的技术方案,是一个高效易用的物联网大数据平台。与 Hadoop 等典型的大数据平台相比,它具有如下鲜明的特点:
* __10倍以上的性能提升__定义了创新的数据存储结构单核每秒能处理至少2万次请求插入数百万个数据点读出一千万以上数据点比现有通用数据库快十倍以上。 * __10 倍以上的性能提升__定义了创新的数据存储结构单核每秒能处理至少 2 万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。
* __硬件或云服务成本降至1/5__由于超强性能计算资源不到通用大数据方案的1/5通过列式存储和先进的压缩算法存储空间不到通用数据库的1/10。 * __硬件或云服务成本降至 1/5__由于超强性能计算资源不到通用大数据方案的 1/5通过列式存储和先进的压缩算法存储空间不到通用数据库的 1/10。
* __全栈时序数据处理引擎__将数据库、消息队列、缓存、流式计算等功能融为一体应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件大幅降低应用开发和维护的复杂度成本。 * __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成 Kafka/Redis/HBase/Spark/HDFS 等软件,大幅降低应用开发和维护的复杂度成本。
* __强大的分析功能__无论是十年前还是一秒钟前的数据指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, MATLAB随时进行。 * __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过 Shell, Python, R, MATLAB 随时进行。
* __与第三方工具无缝连接__不用一行代码即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R等集成。后续将支持OPC, Hadoop, Spark等BI工具也将无缝连接。 * __与第三方工具无缝连接__:不用一行代码,即可与 Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R 等集成。后续将支持 OPC, Hadoop, Spark BI 工具也将无缝连接。
* __零运维成本、零学习成本__安装集群简单快捷无需分库分表实时备份。类标准SQL支持RESTful支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似零学习成本。 * __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类标准 SQL支持 RESTful支持 Python/Java/C/C++/C#/Go/Node.js, 与 MySQL 相似,零学习成本。
采用TDengine可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是因充分利用了物联网时序数据的特点它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。 采用 TDengine可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是因充分利用了物联网时序数据的特点它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。
![TDengine技术生态图](page://images/eco_system.png) ![TDengine技术生态图](page://images/eco_system.png)
<center>图 1. TDengine技术生态图</center> <center>图 1. TDengine技术生态图</center>
@ -21,42 +21,47 @@ TDengine的模块之一是时序数据库。但除此之外为减少研发的
## <a class="anchor" id="scenes"></a>TDengine 总体适用场景 ## <a class="anchor" id="scenes"></a>TDengine 总体适用场景
作为一个IOT大数据平台TDengine的典型适用场景是在IOT范畴而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统比如CRMERP等不在本文讨论范围内。 作为一个 IoT 大数据平台TDengine 的典型适用场景是在 IoT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRMERP 等,不在本文讨论范围内。
### 数据源特点和需求 ### 数据源特点和需求
从数据源角度设计人员可以从下面几个角度分析TDengine在目标应用系统里面的适用性。
从数据源角度,设计人员可以从下面几个角度分析 TDengine 在目标应用系统里面的适用性。
|数据源特点和需求|不适用|可能适用|非常适用|简单说明| |数据源特点和需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---| |---|---|---|---|---|
|总体数据量巨大| | | √ |TDengine在容量方面提供出色的水平扩展功能并且具备匹配高压缩的存储结构达到业界最优的存储效率。| |总体数据量巨大| | | √ |TDengine 在容量方面提供出色的水平扩展功能,并且具备匹配高压缩的存储结构,达到业界最优的存储效率。|
|数据输入速度偶尔或者持续巨大| | | √ | TDengine的性能大大超过同类产品可以在同样的硬件环境下持续处理大量的输入数据并且提供很容易在用户环境里面运行的性能评估工具。| |数据输入速度偶尔或者持续巨大| | | √ | TDengine 的性能大大超过同类产品,可以在同样的硬件环境下持续处理大量的输入数据,并且提供很容易在用户环境里面运行的性能评估工具。|
|数据源数目巨大| | | √ |TDengine设计中包含专门针对大量数据源的优化包括数据的写入和查询尤其适合高效处理海量千万或者更多量级的数据源。| |数据源数目巨大| | | √ | TDengine 设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。|
### 系统架构要求 ### 系统架构要求
|系统架构要求|不适用|可能适用|非常适用|简单说明| |系统架构要求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---| |---|---|---|---|---|
|要求简单可靠的系统架构| | | √ |TDengine的系统架构非常简单可靠自带消息队列缓存流式计算监控等功能无需集成额外的第三方产品。| |要求简单可靠的系统架构| | | √ | TDengine 的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。|
|要求容错和高可靠| | | √ |TDengine的集群功能自动提供容错灾备等高可靠功能。| |要求容错和高可靠| | | √ | TDengine 的集群功能,自动提供容错灾备等高可靠功能。|
|标准化规范| | | √ |TDengine使用标准的SQL语言提供主要功能遵守标准化规范。| |标准化规范| | | √ | TDengine 使用标准的 SQL 语言提供主要功能,遵守标准化规范。|
### 系统功能需求 ### 系统功能需求
|系统功能需求|不适用|可能适用|非常适用|简单说明| |系统功能需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---| |---|---|---|---|---|
|要求完整的内置数据处理算法| | √ | |TDengine的实现了通用的数据处理算法但是还没有做到妥善处理各行各业的所有要求因此特殊类型的处理还需要应用层面处理。| |要求完整的内置数据处理算法| | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。|
|需要大量的交叉查询处理| | √ | |这种类型的处理更多应该用关系型数据系统处理或者应该考虑TDengine和关系型数据系统配合实现系统功能。| |需要大量的交叉查询处理| | √ | |这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。|
### 系统性能需求 ### 系统性能需求
|系统性能需求|不适用|可能适用|非常适用|简单说明| |系统性能需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---| |---|---|---|---|---|
|要求较大的总体处理能力| | | √ |TDengine的集群功能可以轻松地让多服务器配合达成处理能力的提升。| |要求较大的总体处理能力| | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。|
|要求高速处理数据 | | | √ |TDengine的专门为IOT优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。| |要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。|
|要求快速处理小粒度数据| | | √ |这方面TDengine性能可以完全对标关系型和NoSQL型数据处理系统。| |要求快速处理小粒度数据| | | √ |这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。|
### 系统维护需求 ### 系统维护需求
|系统维护需求|不适用|可能适用|非常适用|简单说明| |系统维护需求|不适用|可能适用|非常适用|简单说明|
|---|---|---|---|---| |---|---|---|---|---|
|要求系统可靠运行| | | √ |TDengine的系统架构非常稳定可靠日常维护也简单便捷对维护人员的要求简洁明了最大程度上杜绝人为错误和事故。| |要求系统可靠运行| | | √ | TDengine 的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。|
|要求运维学习成本可控| | | √ |同上。| |要求运维学习成本可控| | | √ |同上。|
|要求市场有大量人才储备| √ | | |TDengine作为新一代产品目前人才市场里面有经验的人员还有限。但是学习成本低我们作为厂家也提供运维的培训和辅助服务。| |要求市场有大量人才储备| √ | | | TDengine 作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。|

View File

@ -1,6 +1,6 @@
# 通过 Docker 快速体验 TDengine # 通过 Docker 快速体验 TDengine
虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine而无需安装虚拟机或额外租用 Linux 服务器。 虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine而无需安装虚拟机或额外租用 Linux 服务器。另外从2.0.14.0版本开始TDengine提供的镜像已经可以同时支持X86-64、X86、arm64、arm32平台像NAS、树莓派、嵌入式开发板之类可以运行docker的非主流计算机也可以基于本文档轻松体验TDengine。
下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。 下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。
@ -12,7 +12,7 @@ Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.c
```bash ```bash
$ docker -v $ docker -v
Docker version 20.10.5, build 55c4c88 Docker version 20.10.3, build 48d30b5
``` ```
## 在 Docker 容器中运行 TDengine ## 在 Docker 容器中运行 TDengine
@ -20,21 +20,22 @@ Docker version 20.10.5, build 55c4c88
1使用命令拉取 TDengine 镜像,并使它在后台运行。 1使用命令拉取 TDengine 镜像,并使它在后台运行。
```bash ```bash
$ docker run -d tdengine/tdengine $ docker run -d --name tdengine tdengine/tdengine
cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316 7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292
``` ```
- **docker run**:通过 Docker 运行一个容器。 - **docker run**:通过 Docker 运行一个容器
- **-d**:让容器在后台运行。 - **--name tdengine**:设置容器名称,我们可以通过容器名称来查看对应的容器
- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像。 - **-d**:让容器在后台运行
- **cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316**:这个返回的长字符是容器 ID我们可以通过容器 ID 来查看对应的容器。 - **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像
- **7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292**:这个返回的长字符是容器 ID我们也可以通过容器 ID 来查看对应的容器
2确认容器是否已经正确运行。 2确认容器是否已经正确运行。
```bash ```bash
$ docker ps $ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS ··· CONTAINER ID IMAGE COMMAND CREATED STATUS ···
cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ··· c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
``` ```
- **docker ps**:列出所有正在运行状态的容器信息。 - **docker ps**:列出所有正在运行状态的容器信息。
@ -47,22 +48,22 @@ cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ·
3进入 Docker 容器内,使用 TDengine。 3进入 Docker 容器内,使用 TDengine。
```bash ```bash
$ docker exec -it cdf548465318 /bin/bash $ docker exec -it tdengine /bin/bash
root@cdf548465318:~/TDengine-server-2.0.13.0# root@c452519b0f9b:~/TDengine-server-2.0.20.13#
``` ```
- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。 - **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。
- **-i**:进入交互模式。 - **-i**:进入交互模式。
- **-t**:指定一个终端。 - **-t**:指定一个终端。
- **cdf548465318**:容器 ID需要根据 docker ps 指令返回的值进行修改。 - **c452519b0f9b**:容器 ID需要根据 docker ps 指令返回的值进行修改。
- **/bin/bash**:载入容器后运行 bash 来进行交互。 - **/bin/bash**:载入容器后运行 bash 来进行交互。
4进入容器后执行 taos shell 客户端程序。 4进入容器后执行 taos shell 客户端程序。
```bash ```bash
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos $ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.13.0 Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos> taos>
@ -78,42 +79,71 @@ TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息
```bash ```bash
$ taos> q $ taos> q
root@cdf548465318:~/TDengine-server-2.0.13.0# root@c452519b0f9b:~/TDengine-server-2.0.20.13#
``` ```
2在命令行界面执行 taosdemo。 2在命令行界面执行 taosdemo。
```bash ```bash
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taosdemo root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo
###################################################################
# Server IP: localhost:0 taosdemo is simulating data generated by power equipments monitoring...
# User: root
# Password: taosdata host: 127.0.0.1:6030
# Use metric: true user: root
# Datatype of Columns: int int int int int int int float password: taosdata
# Binary Length(If applicable): -1 configDir:
# Number of Columns per record: 3 resultFile: ./output.txt
# Number of Threads: 10 thread num of insert data: 10
# Number of Tables: 10000 thread num of create table: 10
# Number of Data per Table: 100000 top insert interval: 0
# Records/Request: 1000 number of records per req: 30000
# Database name: test max sql length: 1048576
# Table prefix: t database count: 1
# Delete method: 0 database[0]:
# Test time: 2021-04-13 02:05:20 database[0] name: test
################################################################### drop: yes
replica: 1
precision: ms
super table count: 1
super table[0]:
stbName: meters
autoCreateTable: no
childTblExists: no
childTblCount: 10000
childTblPrefix: d
dataSource: rand
iface: taosc
insertRows: 10000
interlaceRows: 0
disorderRange: 1000
disorderRatio: 0
maxSqlLen: 1048576
timeStampStep: 1
startTimestamp: 2017-07-14 10:40:00.000
sampleFormat:
sampleFile:
tagsFile:
columnCount: 3
column[0]:FLOAT column[1]:INT column[2]:FLOAT
tagCount: 2
tag[0]:INT tag[1]:BINARY(16)
Press enter key to continue or Ctrl-C to stop
``` ```
回车后,该命令将新建一个数据库 test并且自动创建一张超级表 meters并以超级表 meters 为模版创建了 1 万张表,表名从 "t0" 到 "t9999"。每张表有 10 万条记录,每条记录有 f1f2f3 三个字段,时间戳 ts 字段从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:41:39 999"。每张表带有 areaid 和 loc 两个标签 TAGareaid 被设置为 1 到 10loc 被设置为 "beijing" 或 "shanghai"。 回车后,该命令将在数据库 test 下面自动创建一张超级表 meters该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupIdgroupId 被设置为 1 到 10 location 被设置为 "beijing" 或者 "shanghai"。
执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
3进入 TDengine 终端,查看 taosdemo 生成的数据。 3进入 TDengine 终端,查看 taosdemo 生成的数据。
- **进入命令行。** - **进入命令行。**
```bash ```bash
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos $ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
Welcome to the TDengine shell from Linux, Client Version:2.0.13.0 Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
taos> taos>
@ -124,8 +154,8 @@ taos>
```bash ```bash
$ taos> show databases; $ taos> show databases;
name | created_time | ntables | vgroups | ··· name | created_time | ntables | vgroups | ···
test | 2021-04-13 02:14:15.950 | 10000 | 6 | ··· test | 2021-08-18 06:01:11.021 | 10000 | 6 | ···
log | 2021-04-12 09:36:37.549 | 4 | 1 | ··· log | 2021-08-18 05:51:51.065 | 4 | 1 | ···
``` ```
@ -137,9 +167,9 @@ Database changed.
$ taos> show stables; $ taos> show stables;
name | created_time | columns | tags | tables | name | created_time | columns | tags | tables |
===================================================================================== ============================================================================================
meters | 2021-04-13 02:14:15.955 | 4 | 2 | 10000 | meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 |
Query OK, 1 row(s) in set (0.001737s) Query OK, 1 row(s) in set (0.003259s)
``` ```
@ -147,42 +177,45 @@ Query OK, 1 row(s) in set (0.001737s)
```bash ```bash
$ taos> select * from test.t0 limit 10; $ taos> select * from test.t0 limit 10;
ts | f1 | f2 | f3 |
==================================================================== DB error: Table does not exist (0.002857s)
2017-07-14 02:40:01.000 | 3 | 9 | 0 | taos> select * from test.d0 limit 10;
2017-07-14 02:40:02.000 | 0 | 1 | 2 | ts | current | voltage | phase |
2017-07-14 02:40:03.000 | 7 | 2 | 3 | ======================================================================================
2017-07-14 02:40:04.000 | 9 | 4 | 5 | 2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 |
2017-07-14 02:40:05.000 | 1 | 2 | 5 | 2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 |
2017-07-14 02:40:06.000 | 6 | 3 | 2 | 2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 |
2017-07-14 02:40:07.000 | 4 | 7 | 8 | 2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 |
2017-07-14 02:40:08.000 | 4 | 6 | 6 | 2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 |
2017-07-14 02:40:09.000 | 5 | 7 | 7 | 2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 |
2017-07-14 02:40:10.000 | 1 | 5 | 0 | 2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 |
Query OK, 10 row(s) in set (0.003638s) 2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 |
2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 |
2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 |
Query OK, 10 row(s) in set (0.016791s)
``` ```
- **查看 t0 表的标签值。** - **查看 d0 表的标签值。**
```bash ```bash
$ taos> select areaid, loc from test.t0; $ taos> select groupid, location from test.d0;
areaid | loc | groupid | location |
=========================== =================================
10 | shanghai | 0 | shanghai |
Query OK, 1 row(s) in set (0.002904s) Query OK, 1 row(s) in set (0.003490s)
``` ```
## 停止正在 Docker 中运行的 TDengine 服务 ## 停止正在 Docker 中运行的 TDengine 服务
```bash ```bash
$ docker stop cdf548465318 $ docker stop tdengine
cdf548465318 tdengine
``` ```
- **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。 - **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。
- **cdf548465318**:容器 ID根据 docker ps 指令返回的结果进行修改 - **tdengine**:容器名称
## 编程开发时连接在 Docker 中的 TDengine ## 编程开发时连接在 Docker 中的 TDengine
@ -191,11 +224,11 @@ cdf548465318
1通过端口映射(-p),将容器内部开放的网络端口映射到宿主机的指定端口上。通过挂载本地目录(-v),可以实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。 1通过端口映射(-p),将容器内部开放的网络端口映射到宿主机的指定端口上。通过挂载本地目录(-v),可以实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。
```bash ```bash
$ docker run -d -v /etc/taos:/etc/taos -p 6041:6041 tdengine/tdengine $ docker run -d -v /etc/taos:/etc/taos -P 6041:6041 tdengine/tdengine
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd 526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql $ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0} {"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
``` ```
- 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。 - 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。
@ -206,6 +239,5 @@ $ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
2直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。 2直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。
```bash ```bash
$ docker exec -it 526aa188da /bin/bash $ docker exec -it tdengine /bin/bash
``` ```

View File

@ -105,7 +105,7 @@ $ taos -h h1.taos.com -s "use db; show tables;"
**运行 SQL 命令脚本** **运行 SQL 命令脚本**
TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本. TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本
```mysql ```mysql
taos> source <filename>; taos> source <filename>;
@ -166,14 +166,12 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
**Note:** taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。 **Note:** taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。
## 客户端和报警模块 ## 客户端和报警模块
如果客户端和服务端运行在不同的电脑上可以单独安装客户端。Linux 和 Windows 安装包可以在 [这里](https://www.taosdata.com/cn/getting-started/#客户端) 下载。 如果客户端和服务端运行在不同的电脑上可以单独安装客户端。Linux 和 Windows 安装包可以在 [这里](https://www.taosdata.com/cn/getting-started/#客户端) 下载。
报警模块的 Linux 和 Windows 安装包请在 [所有下载链接](https://www.taosdata.com/cn/all-downloads/) 页面搜索“TDengine Alert Linux”章节或“TDengine Alert Windows”章节进行下载。使用方法请参考 [报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)。 报警模块的 Linux 和 Windows 安装包请在 [所有下载链接](https://www.taosdata.com/cn/all-downloads/) 页面搜索“TDengine Alert Linux”章节或“TDengine Alert Windows”章节进行下载。使用方法请参考 [报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)。
## <a class="anchor" id="platforms"></a>支持平台列表 ## <a class="anchor" id="platforms"></a>支持平台列表
### TDengine 服务器支持的平台列表 ### TDengine 服务器支持的平台列表
@ -193,8 +191,6 @@ taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
### TDengine 客户端和连接器支持的平台列表 ### TDengine 客户端和连接器支持的平台列表
目前 TDengine 的连接器可支持的平台广泛目前包括X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32 等开发环境。 目前 TDengine 的连接器可支持的平台广泛目前包括X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32 等开发环境。

View File

@ -161,17 +161,17 @@ TDengine 分布式架构的逻辑结构图如下:
一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine应用驱动(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过taosc的API与TDengine集群进行互动。下面对每个逻辑单元进行简要介绍。 一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine应用驱动(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过taosc的API与TDengine集群进行互动。下面对每个逻辑单元进行简要介绍。
**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯如果不了解FQDN请看博文[《一篇文章说清楚TDengine的FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。 **物理节点(pnode)** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机可以是安装有OS的物理机、虚拟机或Docker容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯如果不了解FQDN请看博文[《一篇文章说清楚TDengine的FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。
**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(vnode),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP)决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。 **数据节点(dnode)** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(vnode),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP)决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。
**虚拟节点(vnode)**: 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode图中V2, V3, V4等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个DB但一个DB可以有多个 vnode。一个 vnode 除存储的时序数据外也保存有所包含的表的schema、标签值等。一个虚拟节点由所属的数据节点的EP以及所属的VGroup ID在系统内唯一标识由管理节点创建并管理。 **虚拟节点(vnode)** 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode图中V2, V3, V4等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个DB但一个DB可以有多个 vnode。一个 vnode 除存储的时序数据外也保存有所包含的表的schema、标签值等。一个虚拟节点由所属的数据节点的EP以及所属的VGroup ID在系统内唯一标识由管理节点创建并管理。
**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(开源版最多不超过3个) mnode它们自动构建成为一个虚拟管理节点组(图中M0, M1, M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步, 任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成无需人工干预。每个dnode上至多有一个mnode由所属的数据节点的EP来唯一标识。每个dnode通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的EP。 **管理节点(mnode)** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(开源版最多不超过3个) mnode它们自动构建成为一个虚拟管理节点组(图中M0, M1, M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步, 任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成无需人工干预。每个dnode上至多有一个mnode由所属的数据节点的EP来唯一标识。每个dnode通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的EP。
**虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vnode group)来保证系统的高可靠。虚拟节点组内采取master/slave的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个DB的副本数为N系统必须有至少N个数据节点。副本数在创建DB时通过参数 replica 可以指定缺省为1。使用 TDengine 的多副本特性可以不再需要昂贵的磁盘阵列等存储设备就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理并且由管理节点分配一个系统唯一的IDVGroup ID。如果两个虚拟节点的vnode group ID相同说明他们属于同一个组数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的容许只有一个也就是没有数据复制。VGroup ID是永远不变的即使一个虚拟节点组被删除它的ID也不会被收回重复利用。 **虚拟节点组(VGroup)** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vnode group)来保证系统的高可靠。虚拟节点组内采取master/slave的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个DB的副本数为N系统必须有至少N个数据节点。副本数在创建DB时通过参数 replica 可以指定缺省为1。使用 TDengine 的多副本特性可以不再需要昂贵的磁盘阵列等存储设备就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理并且由管理节点分配一个系统唯一的IDVGroup ID。如果两个虚拟节点的vnode group ID相同说明他们属于同一个组数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的容许只有一个也就是没有数据复制。VGroup ID是永远不变的即使一个虚拟节点组被删除它的ID也不会被收回重复利用。
**TAOSC:** taosc是TDengine给应用提供的驱动程序(driver)负责处理应用与集群的接口交互提供C/C++语言原生接口内嵌于JDBC、C#、Python、Go、Node.js语言连接库里。应用都是通过taosc而不是直接连接集群中的数据节点与整个集群进行交互的。这个模块负责获取并缓存元数据将插入、查询等请求转发到正确的数据节点在把结果返回给应用时还需要负责最后一级的聚合、排序、过滤等操作。对于JDBC, C/C++/C#/Python/Go/Node.js接口而言这个模块是在应用所处的物理节点上运行。同时为支持全分布式的RESTful接口taosc在TDengine集群的每个dnode上都有一运行实例。 **TAOSC** taosc是TDengine给应用提供的驱动程序(driver)负责处理应用与集群的接口交互提供C/C++语言原生接口内嵌于JDBC、C#、Python、Go、Node.js语言连接库里。应用都是通过taosc而不是直接连接集群中的数据节点与整个集群进行交互的。这个模块负责获取并缓存元数据将插入、查询等请求转发到正确的数据节点在把结果返回给应用时还需要负责最后一级的聚合、排序、过滤等操作。对于JDBC、C/C++、C#、Python、Go、Node.js接口而言这个模块是在应用所处的物理节点上运行。同时为支持全分布式的RESTful接口taosc在TDengine集群的每个dnode上都有一运行实例。
### 节点之间的通讯 ### 节点之间的通讯
@ -181,11 +181,9 @@ TDengine 分布式架构的逻辑结构图如下:
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定对集群内部通讯的端口是serverPort+5。为支持多线程高效的处理UDP数据每个对内和对外的UDP连接都需要占用5个连续的端口。 **端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定对集群内部通讯的端口是serverPort+5。为支持多线程高效的处理UDP数据每个对内和对外的UDP连接都需要占用5个连续的端口。
集群内数据节点之间的数据复制操作占用一个TCP端口是serverPort+10。 - 集群内数据节点之间的数据复制操作占用一个TCP端口是serverPort+10。
- 集群数据节点对外提供RESTful服务占用一个TCP端口是serverPort+11。
集群数据节点对外提供RESTful服务占用一个TCP端口是serverPort+11。 - 集群内数据节点与Arbitrator节点之间通讯占用一个TCP端口是serverPort+12。
集群内数据节点与Arbitrator节点之间通讯占用一个TCP端口是serverPort+12。
因此一个数据节点总的端口范围为serverPort到serverPort+12总共13个TCP/UDP端口。使用时需要确保防火墙将这些端口打开。每个数据节点可以配置不同的serverPort。详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port) 因此一个数据节点总的端口范围为serverPort到serverPort+12总共13个TCP/UDP端口。使用时需要确保防火墙将这些端口打开。每个数据节点可以配置不同的serverPort。详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)
@ -193,11 +191,9 @@ TDengine 分布式架构的逻辑结构图如下:
**集群内部通讯:**各个数据节点之间通过TCP/UDP进行连接。一个数据节点启动时将获取mnode所在的dnode的EP信息然后与系统中的mnode建立起连接交换信息。获取mnode的EP信息有三步 **集群内部通讯:**各个数据节点之间通过TCP/UDP进行连接。一个数据节点启动时将获取mnode所在的dnode的EP信息然后与系统中的mnode建立起连接交换信息。获取mnode的EP信息有三步
1检查mnodeEpSet.json文件是否存在如果不存在或不能正常打开获得mnode EP信息进入第二步 1. 检查mnodeEpSet.json文件是否存在如果不存在或不能正常打开获得mnode EP信息进入第二步
2. 检查系统配置文件taos.cfg获取节点配置参数firstEp、secondEp这两个参数指定的节点可以是不带mnode的普通节点这样的话节点被连接时会尝试重定向到mnode节点如果不存在或者taos.cfg里没有这两个配置参数或无效进入第三步
2检查系统配置文件taos.cfg获取节点配置参数firstEp、secondEp这两个参数指定的节点可以是不带mnode的普通节点这样的话节点被连接时会尝试重定向到mnode节点如果不存在或者taos.cfg里没有这两个配置参数或无效进入第三步 3. 将自己的EP设为mnode EP并独立运行起来。
3将自己的EP设为mnode EP并独立运行起来。
获取mnode EP列表后数据节点发起连接如果连接成功则成功加入进工作的集群如果不成功则尝试mnode EP列表中的下一个。如果都尝试了但连接都仍然失败则休眠几秒后再进行尝试。 获取mnode EP列表后数据节点发起连接如果连接成功则成功加入进工作的集群如果不成功则尝试mnode EP列表中的下一个。如果都尝试了但连接都仍然失败则休眠几秒后再进行尝试。
@ -271,6 +267,7 @@ TDengine除vnode分片之外还对时序数据按照时间段进行分区。
当新的数据节点被添加进集群,因为新的计算和存储被添加进来,系统也将自动启动负载均衡流程。 当新的数据节点被添加进集群,因为新的计算和存储被添加进来,系统也将自动启动负载均衡流程。
负载均衡过程无需任何人工干预,应用也无需重启,将自动连接新的节点,完全透明。 负载均衡过程无需任何人工干预,应用也无需重启,将自动连接新的节点,完全透明。
**提示负载均衡由参数balance控制决定开启/关闭自动负载均衡。** **提示负载均衡由参数balance控制决定开启/关闭自动负载均衡。**
## <a class="anchor" id="replication"></a>数据写入与复制流程 ## <a class="anchor" id="replication"></a>数据写入与复制流程
@ -293,13 +290,13 @@ Master Vnode遵循下面的写入流程
### Slave Vnode写入流程 ### Slave Vnode写入流程
对于slave vnode, 写入流程是: 对于slave vnode写入流程是:
![TDengine Slave写入流程](page://images/architecture/write_slave.png) ![TDengine Slave写入流程](page://images/architecture/write_slave.png)
<center> 图 4 TDengine Slave写入流程 </center> <center> 图 4 TDengine Slave写入流程 </center>
1. slave vnode收到Master vnode转发了的数据插入请求。检查last version是否与master一致如果一致进入下一步。如果不一致需要进入同步状态。 1. slave vnode收到Master vnode转发了的数据插入请求。检查last version是否与master一致如果一致进入下一步。如果不一致需要进入同步状态。
2. 如果系统配置参数walLevel大于0vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2而且fsync设置为0TDengine还将WAL数据立即落盘以保证即使宕机也能从数据库日志文件中恢复数据避免数据的丢失 2. 如果系统配置参数walLevel大于0vnode将把该请求的原始数据包写入数据库日志文件WAL。如果walLevel设置为2而且fsync设置为0TDengine还将WAL数据立即落盘以保证即使宕机也能从数据库日志文件中恢复数据避免数据的丢失
3. 写入内存更新内存中的skip list。 3. 写入内存更新内存中的skip list。
与master vnode相比slave vnode不存在转发环节也不存在回复确认环节少了两步。但写内存与WAL是完全一样的。 与master vnode相比slave vnode不存在转发环节也不存在回复确认环节少了两步。但写内存与WAL是完全一样的。

View File

@ -2,7 +2,7 @@
# TDengine数据建模 # TDengine数据建模
TDengine采用关系型数据模型需要建库、建表。因此对于一个具体的应用场景需要考虑库的设计,超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。 TDengine采用关系型数据模型需要建库、建表。因此对于一个具体的应用场景需要考虑库超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。
关于数据建模请参考[视频教程](https://www.taosdata.com/blog/2020/11/11/1945.html)。 关于数据建模请参考[视频教程](https://www.taosdata.com/blog/2020/11/11/1945.html)。
@ -11,9 +11,9 @@ TDengine采用关系型数据模型需要建库、建表。因此对于一个
不同类型的数据采集点往往具有不同的数据特征包括数据采集频率的高低数据保留时间的长短副本的数目数据块的大小是否允许更新数据等等。为了在各种场景下TDengine都能最大效率的工作TDengine建议将不同数据特征的表创建在不同的库里因为每个库可以配置不同的存储策略。创建一个库时除SQL标准的选项外应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如 不同类型的数据采集点往往具有不同的数据特征包括数据采集频率的高低数据保留时间的长短副本的数目数据块的大小是否允许更新数据等等。为了在各种场景下TDengine都能最大效率的工作TDengine建议将不同数据特征的表创建在不同的库里因为每个库可以配置不同的存储策略。创建一个库时除SQL标准的选项外应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如
```mysql ```mysql
CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1; CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1;
``` ```
上述语句将创建一个名为power的库这个库的数据将保留365天超过365天将被自动删除每10天一个数据文件内存块数为4,允许更新数据。详细的语法及参数请见 [TAOS SQL 的数据管理](https://www.taosdata.com/cn/documentation/taos-sql#management) 章节。 上述语句将创建一个名为power的库这个库的数据将保留365天超过365天将被自动删除每10天一个数据文件内存块数为6,允许更新数据。详细的语法及参数请见 [TAOS SQL 的数据管理](https://www.taosdata.com/cn/documentation/taos-sql#management) 章节。
创建库之后需要使用SQL命令USE将当前库切换过来例如 创建库之后需要使用SQL命令USE将当前库切换过来例如
@ -65,7 +65,7 @@ TDengine建议将数据采集点的全局唯一ID作为表名(比如设备序列
INSERT INTO d1001 USING meters TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32); INSERT INTO d1001 USING meters TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);
``` ```
上述SQL语句将记录 (now, 10.2, 219, 0.32) 插入表d1001。如果表d1001还未创建则使用超级表meters做模板自动创建同时打上标签值“Beijing.Chaoyang", 2。 上述SQL语句将记录 (now, 10.2, 219, 0.32) 插入表d1001。如果表d1001还未创建则使用超级表meters做模板自动创建同时打上标签值 `“Beijing.Chaoyang", 2`
关于自动建表的详细语法请参见 [插入记录时自动建表](https://www.taosdata.com/cn/documentation/taos-sql#auto_create_table) 章节。 关于自动建表的详细语法请参见 [插入记录时自动建表](https://www.taosdata.com/cn/documentation/taos-sql#auto_create_table) 章节。

View File

@ -35,7 +35,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码使用Golang语言编译器编译生成可执行文件。在开始编译前需要准备好以下条件 用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码使用Golang语言编译器编译生成可执行文件。在开始编译前需要准备好以下条件
- Linux操作系统的服务器 - Linux操作系统的服务器
- 安装好Golang, 1.10版本以上 - 安装好Golang1.10版本以上
- 对应的TDengine版本。因为用到了TDengine的客户端动态链接库因此需要安装好和服务端相同版本的TDengine程序比如服务端版本是TDengine 2.0.0, 则在Bailongma所在的Linux服务器可以与TDengine在同一台服务器或者不同服务器 - 对应的TDengine版本。因为用到了TDengine的客户端动态链接库因此需要安装好和服务端相同版本的TDengine程序比如服务端版本是TDengine 2.0.0, 则在Bailongma所在的Linux服务器可以与TDengine在同一台服务器或者不同服务器
Bailongma项目中有一个文件夹blm_prometheus存放了prometheus的写入API程序。编译过程如下 Bailongma项目中有一个文件夹blm_prometheus存放了prometheus的写入API程序。编译过程如下
@ -48,13 +48,15 @@ go build
### 安装Prometheus ### 安装Prometheus
通过Prometheus的官网下载安装。[下载地址](https://prometheus.io/download/) 通过Prometheus的官网下载安装。具体请见:[下载地址](https://prometheus.io/download/)
### 配置Prometheus ### 配置Prometheus
参考Prometheus的[配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/)在Prometheus的配置文件中的<remote_write>部分,增加以下配置 参考Prometheus的[配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/)在Prometheus的配置文件中的<remote_write>部分,增加以下配置
- url: bailongma API服务提供的URL参考下面的blm_prometheus启动示例章节 ```
- url: "bailongma API服务提供的URL"参考下面的blm_prometheus启动示例章节
```
启动Prometheus后可以通过taos客户端查询确认数据是否成功写入。 启动Prometheus后可以通过taos客户端查询确认数据是否成功写入。
@ -62,7 +64,7 @@ go build
blm_prometheus程序有以下选项在启动blm_prometheus程序时可以通过设定这些选项来设定blm_prometheus的配置。 blm_prometheus程序有以下选项在启动blm_prometheus程序时可以通过设定这些选项来设定blm_prometheus的配置。
```bash ```bash
--tdengine-name --tdengine-name
如果TDengine安装在一台具备域名的服务器上也可以通过配置TDengine的域名来访问TDengine。在K8S环境下可以配置成TDengine所运行的service name 如果TDengine安装在一台具备域名的服务器上也可以通过配置TDengine的域名来访问TDengine。在K8S环境下可以配置成TDengine所运行的service name
--batch-size --batch-size
blm_prometheus会将收到的prometheus的数据拼装成TDengine的写入请求这个参数控制一次发给TDengine的写入请求中携带的数据条数。 blm_prometheus会将收到的prometheus的数据拼装成TDengine的写入请求这个参数控制一次发给TDengine的写入请求中携带的数据条数。
@ -71,10 +73,10 @@ blm_prometheus会将收到的prometheus的数据拼装成TDengine的写入请求
设置在TDengine中创建的数据库名称blm_prometheus会自动在TDengine中创建一个以dbname为名称的数据库缺省值是prometheus。 设置在TDengine中创建的数据库名称blm_prometheus会自动在TDengine中创建一个以dbname为名称的数据库缺省值是prometheus。
--dbuser --dbuser
设置访问TDengine的用户名缺省值是'root' 设置访问TDengine的用户名缺省值是'root'
--dbpassword --dbpassword
设置访问TDengine的密码缺省值是'taosdata' 设置访问TDengine的密码缺省值是'taosdata'
--port --port
blm_prometheus对prometheus提供服务的端口号。 blm_prometheus对prometheus提供服务的端口号。
@ -125,7 +127,7 @@ select * from apiserver_request_latencies_bucket;
用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码使用Golang语言编译器编译生成可执行文件。在开始编译前需要准备好以下条件 用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码使用Golang语言编译器编译生成可执行文件。在开始编译前需要准备好以下条件
- Linux操作系统的服务器 - Linux操作系统的服务器
- 安装好Golang, 1.10版本以上 - 安装好Golang1.10版本以上
- 对应的TDengine版本。因为用到了TDengine的客户端动态链接库因此需要安装好和服务端相同版本的TDengine程序比如服务端版本是TDengine 2.0.0, 则在Bailongma所在的Linux服务器可以与TDengine在同一台服务器或者不同服务器 - 对应的TDengine版本。因为用到了TDengine的客户端动态链接库因此需要安装好和服务端相同版本的TDengine程序比如服务端版本是TDengine 2.0.0, 则在Bailongma所在的Linux服务器可以与TDengine在同一台服务器或者不同服务器
Bailongma项目中有一个文件夹blm_telegraf存放了Telegraf的写入API程序。编译过程如下 Bailongma项目中有一个文件夹blm_telegraf存放了Telegraf的写入API程序。编译过程如下
@ -139,7 +141,7 @@ go build
### 安装Telegraf ### 安装Telegraf
目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统到Telegraf官网下载安装包并执行安装。下载地址如下https://portal.influxdata.com/downloads 目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统到Telegraf官网下载安装包并执行安装。下载地址如下https://portal.influxdata.com/downloads
### 配置Telegraf ### 配置Telegraf
@ -153,7 +155,7 @@ go build
在agent部分 在agent部分
- hostname: 区分不同采集设备的机器名称,需确保其唯一性 - hostname: 区分不同采集设备的机器名称,需确保其唯一性
- metric_batch_size: 100允许Telegraf每批次写入记录最大数量增大其数量可以降低Telegraf的请求发送频率。 - metric_batch_size: 100允许Telegraf每批次写入记录最大数量增大其数量可以降低Telegraf的请求发送频率。
关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)。 关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)。
@ -163,7 +165,7 @@ blm_telegraf程序有以下选项在启动blm_telegraf程序时可以通过
```bash ```bash
--host --host
TDengine服务端的IP地址缺省值为空 TDengine服务端的IP地址缺省值为空
--batch-size --batch-size
blm_telegraf会将收到的telegraf的数据拼装成TDengine的写入请求这个参数控制一次发给TDengine的写入请求中携带的数据条数。 blm_telegraf会将收到的telegraf的数据拼装成TDengine的写入请求这个参数控制一次发给TDengine的写入请求中携带的数据条数。
@ -172,10 +174,10 @@ blm_telegraf会将收到的telegraf的数据拼装成TDengine的写入请求
设置在TDengine中创建的数据库名称blm_telegraf会自动在TDengine中创建一个以dbname为名称的数据库缺省值是prometheus。 设置在TDengine中创建的数据库名称blm_telegraf会自动在TDengine中创建一个以dbname为名称的数据库缺省值是prometheus。
--dbuser --dbuser
设置访问TDengine的用户名缺省值是'root' 设置访问TDengine的用户名缺省值是'root'
--dbpassword --dbpassword
设置访问TDengine的密码缺省值是'taosdata' 设置访问TDengine的密码缺省值是'taosdata'
--port --port
blm_telegraf对telegraf提供服务的端口号。 blm_telegraf对telegraf提供服务的端口号。
@ -183,7 +185,7 @@ blm_telegraf对telegraf提供服务的端口号。
### 启动示例 ### 启动示例
通过以下命令启动一个blm_telegraf的API服务 通过以下命令启动一个blm_telegraf的API服务
```bash ```bash
./blm_telegraf -host 127.0.0.1 -port 8089 ./blm_telegraf -host 127.0.0.1 -port 8089
``` ```

View File

@ -35,13 +35,13 @@ select avg(voltage) from meters interval(1m) sliding(30s);
select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s); select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s);
``` ```
这样做没有问题但TDengine提供了更简单的方法只要在最初的查询语句前面加上 `create table {tableName} as ` 就可以了, 例如: 这样做没有问题但TDengine提供了更简单的方法只要在最初的查询语句前面加上 `create table {tableName} as ` 就可以了例如:
```sql ```sql
create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s); create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s);
``` ```
会自动创建一个名为 `avg_vol` 的新表然后每隔30秒TDengine会增量执行 `as` 后面的 SQL 语句,并将查询结果写入这个表中,用户程序后续只要从 `avg_vol` 中查询数据即可。 例如: 会自动创建一个名为 `avg_vol` 的新表然后每隔30秒TDengine会增量执行 `as` 后面的 SQL 语句,并将查询结果写入这个表中,用户程序后续只要从 `avg_vol` 中查询数据即可。例如:
```mysql ```mysql
taos> select * from avg_vol; taos> select * from avg_vol;

View File

@ -1,72 +1,6 @@
# Java Connector # Java Connector
## 安装 ## 总体介绍
Java连接器支持的系统有 Linux 64/Windows x64/Windows x86。
**安装前准备:**
- 已安装TDengine服务器端
- 已安装好TDengine应用驱动具体请参照 [安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver) 章节
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索并下载。
由于 TDengine 的应用驱动是使用C语言开发的使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
- libtaos.so 在 Linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
- taos.dll 在 Windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
注意:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client)Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。
### 如何获取 TAOS-JDBCDriver
**maven仓库**
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。
- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [maven.aliyun](https://maven.aliyun.com/mvn/search)
maven 项目中使用如下 pom.xml 配置即可:
```xml-dtd
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.18</version>
</dependency>
```
**源码编译打包**
下载 TDengine 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
### 示例程序
示例程序源码位于install_directory/examples/JDBC有如下目录
JDBCDemo JDBC示例源程序
JDBCConnectorChecker JDBC安装校验源程序及jar包
Springbootdemo springboot示例源程序
SpringJdbcTemplate SpringJDBC模板
### 安装验证
运行如下指令:
```Bash
cd {install_directory}/examples/JDBC/JDBCConnectorChecker
java -jar JDBCConnectorChecker.jar -host <fqdn>
```
验证通过将打印出成功信息。
## Java连接器的使用
`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTfultaos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful。 JDBC-JNI 通过调用客户端 libtaos.so或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。 `taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTfultaos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful。 JDBC-JNI 通过调用客户端 libtaos.so或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。
@ -78,13 +12,10 @@ java -jar JDBCConnectorChecker.jar -host <fqdn>
* RESTful应用将 SQL 发送给位于物理节点2pnode2上的 RESTful 连接器,再调用客户端 APIlibtaos.so * RESTful应用将 SQL 发送给位于物理节点2pnode2上的 RESTful 连接器,再调用客户端 APIlibtaos.so
* JDBC-RESTfulJava 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求发送给物理节点2的 RESTful 连接器。 * JDBC-RESTfulJava 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求发送给物理节点2的 RESTful 连接器。
TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点: TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但TDengine与关系对象型数据库的使用场景和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
* TDengine 目前不支持针对单条数据记录的删除操作。 * TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。 * 目前不支持事务操作。
* 目前不支持嵌套查询nested query
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询taos-jdbcdriver 会自动关闭上一个 ResultSet。
### JDBC-JNI和JDBC-RESTful的对比 ### JDBC-JNI和JDBC-RESTful的对比
@ -115,13 +46,17 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
</tr> </tr>
</table> </table>
注意:与 JNI 方式不同RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。 注意:与 JNI 方式不同RESTful 接口是无状态的。在使用JDBC-RESTful时需要在sql中指定表、超级表的数据库名称。例如
```sql
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6);
```
### <a class="anchor" id="version"></a>TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 ## <a class="anchor" id="version"></a>TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | | taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- | | -------------------- | ----------------- | -------- |
| 2.0.31 | 2.1.3.0 及以上 | 1.8.x | | 2.0.33 - 2.0.34 | 2.0.3.0 及以上 | 1.8.x |
| 2.0.31 - 2.0.32 | 2.1.3.0 及以上 | 1.8.x |
| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | | 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | | 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | | 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
@ -129,7 +64,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | | 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | | 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
### TDengine DataType 和 Java DataType ## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
@ -146,10 +81,50 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
| BINARY | byte array | | BINARY | byte array |
| NCHAR | java.lang.String | | NCHAR | java.lang.String |
## 安装Java Connector
### 安装前准备
使用Java Connector连接数据库前需要具备以下条件
1. Linux或Windows操作系统
2. Java 1.8以上运行时环境
3. TDengine-client使用JDBC-JNI时必须使用JDBC-RESTful时非必须
**注意**:由于 TDengine 的应用驱动是使用C语言开发的使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
- libtaos.so 在 Linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
- taos.dll 在 Windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
**注意**:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client)Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。
### 通过maven获取JDBC driver
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。
- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
- [maven.aliyun](https://maven.aliyun.com/mvn/search)
maven 项目中在pom.xml 中添加以下依赖:
```xml-dtd
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.18</version>
</dependency>
```
### 通过源码编译获取JDBC driver
可以通过下载TDengine的源码自己编译最新版本的java connector
```shell
git clone https://github.com/taosdata/TDengine.git
cd TDengine/src/connector/jdbc
mvn clean package -Dmaven.test.skip=true
```
编译后在target目录下会产生taos-jdbcdriver-2.0.XX-dist.jar的jar包。
## Java连接器的使用
### 获取连接 ### 获取连接
#### 指定URL获取连接 #### 指定URL获取连接
通过指定URL获取连接如下所示 通过指定URL获取连接如下所示
```java ```java
@ -157,34 +132,24 @@ Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl); Connection conn = DriverManager.getConnection(jdbcUrl);
``` ```
以上示例,使用 **JDBC-RESTful** 的 driver建立了到 hostname 为 taosdemo.com端口为 6041数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。 以上示例,使用 **JDBC-RESTful** 的 driver建立了到 hostname 为 taosdemo.com端口为 6041数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要: 使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要:
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver” 1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”
2. jdbcUrl 以“jdbc:TAOS-RS://”开头; 2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
3. 使用 6041 作为连接端口。 3. 使用 6041 作为连接端口。
如果希望获得更好的写入和查询性能Java 应用可以使用 **JDBC-JNI** 的driver如下所示 如果希望获得更好的写入和查询性能Java 应用可以使用 **JDBC-JNI** 的driver如下所示
```java ```java
Class.forName("com.taosdata.jdbc.TSDBDriver"); Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl); Connection conn = DriverManager.getConnection(jdbcUrl);
``` ```
以上示例,使用了 JDBC-JNI 的 driver建立了到 hostname 为 taosdemo.com端口为 6030TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。 以上示例,使用了 JDBC-JNI 的 driver建立了到 hostname 为 taosdemo.com端口为 6030TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
**注意**:使用 JDBC-JNI 的 drivertaos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 **注意**:使用 JDBC-JNI 的 drivertaos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库Linux 下是 libtaos.soWindows 下是 taos.dll
* libtaos.so > 在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client)Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#%E5%AE%A2%E6%88%B7%E7%AB%AF) 连接远程 TDengine Server。
在 Linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
* taos.dll
在 Windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
> 在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14]Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
JDBC-JNI 的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。 JDBC-JNI 的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
@ -192,14 +157,15 @@ TDengine 的 JDBC URL 规范格式为:
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` `jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
url中的配置参数如下 url中的配置参数如下
* user登录 TDengine 用户名,默认值 root。 * user登录 TDengine 用户名,默认值 'root'
* password用户登录密码默认值 taosdata。 * password用户登录密码默认值 'taosdata'
* cfgdir客户端配置文件目录路径Linux OS 上默认值 /etc/taos Windows OS 上默认值 C:/TDengine/cfg * cfgdir客户端配置文件目录路径Linux OS 上默认值 `/etc/taos`Windows OS 上默认值 `C:/TDengine/cfg`
* charset客户端使用的字符集默认值为系统字符集。 * charset客户端使用的字符集默认值为系统字符集。
* locale客户端语言环境默认值系统当前 locale。 * locale客户端语言环境默认值系统当前 locale。
* timezone客户端使用的时区默认值为系统当前时区。 * timezone客户端使用的时区默认值为系统当前时区。
* batchfetch: 仅在使用JDBC-JNI时生效。true在执行查询时批量拉取结果集false逐行拉取结果集。默认值为false。
* timestampFormat: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP'结果集中timestamp类型的字段为一个long值; 'UTC'结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING'结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。
* batchErrorIgnoretrue在执行Statement的executeBatch时如果中间有一条sql执行失败继续执行下面的sq了。false不再执行失败sql后的任何语句。默认值为false。
#### 指定URL和Properties获取连接 #### 指定URL和Properties获取连接
@ -222,19 +188,19 @@ public Connection getConn() throws Exception{
以上示例,建立一个到 hostname 为 taosdemo.com端口为 6030数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root密码password为 taosdata并在 connProps 中指定了使用的字符集、语言环境、时区等信息。 以上示例,建立一个到 hostname 为 taosdemo.com端口为 6030数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root密码password为 taosdata并在 connProps 中指定了使用的字符集、语言环境、时区等信息。
properties 中的配置参数如下: properties 中的配置参数如下:
* TSDBDriver.PROPERTY_KEY_USER登录 TDengine 用户名,默认值 root。 * TSDBDriver.PROPERTY_KEY_USER登录 TDengine 用户名,默认值 'root'
* TSDBDriver.PROPERTY_KEY_PASSWORD用户登录密码默认值 taosdata。 * TSDBDriver.PROPERTY_KEY_PASSWORD用户登录密码默认值 'taosdata'
* TSDBDriver.PROPERTY_KEY_CONFIG_DIR客户端配置文件目录路径Linux OS 上默认值 /etc/taos Windows OS 上默认值 C:/TDengine/cfg * TSDBDriver.PROPERTY_KEY_CONFIG_DIR客户端配置文件目录路径Linux OS 上默认值 `/etc/taos`Windows OS 上默认值 `C:/TDengine/cfg`
* TSDBDriver.PROPERTY_KEY_CHARSET客户端使用的字符集默认值为系统字符集。 * TSDBDriver.PROPERTY_KEY_CHARSET客户端使用的字符集默认值为系统字符集。
* TSDBDriver.PROPERTY_KEY_LOCALE客户端语言环境默认值系统当前 locale。 * TSDBDriver.PROPERTY_KEY_LOCALE客户端语言环境默认值系统当前 locale。
* TSDBDriver.PROPERTY_KEY_TIME_ZONE客户端使用的时区默认值为系统当前时区。 * TSDBDriver.PROPERTY_KEY_TIME_ZONE客户端使用的时区默认值为系统当前时区。
* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: 仅在使用JDBC-JNI时生效。true在执行查询时批量拉取结果集false逐行拉取结果集。默认值为false。
* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP'结果集中timestamp类型的字段为一个long值; 'UTC'结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING'结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。
* TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNOREtrue在执行Statement的executeBatch时如果中间有一条sql执行失败继续执行下面的sq了。false不再执行失败sql后的任何语句。默认值为false。
#### 使用客户端配置文件建立连接 #### 使用客户端配置文件建立连接
当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。如下所示 当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。如下所示
1. 在 Java 应用中不指定 hostname 和 port 1. 在 Java 应用中不指定 hostname 和 port
```java ```java
@ -251,7 +217,6 @@ public Connection getConn() throws Exception{
``` ```
2. 在配置文件中指定 firstEp 和 secondEp 2. 在配置文件中指定 firstEp 和 secondEp
``` ```
# first fully qualified domain name (FQDN) for TDengine system # first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030 firstEp cluster_node1:6030
@ -432,9 +397,9 @@ public void setNString(int columnIndex, ArrayList<String> list, int size) throws
``` ```
其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。 其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。
### <a class="anchor" id="subscribe"></a>订阅 ## <a class="anchor" id="subscribe"></a>订阅
#### 创建 ### 创建
```java ```java
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
@ -448,7 +413,7 @@ TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from met
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。 如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
#### 消费数据 ### 消费数据
```java ```java
int total = 0; int total = 0;
@ -466,7 +431,7 @@ while(true) {
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。 `consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
#### 关闭订阅 ### 关闭订阅
```java ```java
sub.close(true); sub.close(true);
@ -474,7 +439,7 @@ sub.close(true);
`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。 `close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
### 关闭资源 ## 关闭资源
```java ```java
resultSet.close(); resultSet.close();
@ -484,23 +449,10 @@ conn.close();
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 > `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
## 与连接池使用 ## 与连接池使用
**HikariCP** ### HikariCP
使用示例如下:
* 引入相应 HikariCP maven 依赖:
```xml
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>3.4.1</version>
</dependency>
```
* 使用示例如下:
```java ```java
public static void main(String[] args) throws SQLException { public static void main(String[] args) throws SQLException {
@ -530,21 +482,10 @@ conn.close();
``` ```
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 > 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP) > 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP)
**Druid** ### Druid
使用示例如下:
* 引入相应 Druid maven 依赖:
```xml
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.20</version>
</dependency>
```
* 使用示例如下:
```java ```java
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
@ -571,9 +512,9 @@ public static void main(String[] args) throws Exception {
} }
``` ```
> 更多 druid 使用问题请查看[官方说明](https://github.com/alibaba/druid) > 更多 druid 使用问题请查看[官方说明](https://github.com/alibaba/druid)
**注意事项** **注意事项**
* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。 * TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。
如下所示,`select server_status()` 执行成功会返回 `1` 如下所示,`select server_status()` 执行成功会返回 `1`
@ -585,14 +526,20 @@ server_status()|
Query OK, 1 row(s) in set (0.000141s) Query OK, 1 row(s) in set (0.000141s)
``` ```
## 在框架中使用 ## 在框架中使用
* Spring JdbcTemplate 中使用 taos-jdbcdriver可参考 [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) * Spring JdbcTemplate 中使用 taos-jdbcdriver可参考 [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate)
* Springboot + Mybatis 中使用,可参考 [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) * Springboot + Mybatis 中使用,可参考 [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo)
## 示例程序
示例程序源码位于TDengine/test/examples/JDBC下:
* JDBCDemoJDBC示例源程序
* JDBCConnectorCheckerJDBC安装校验源程序及jar包
* Springbootdemospringboot示例源程序
* SpringJdbcTemplateSpringJDBC模板
请参考:![JDBC example](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC)
## 常见问题 ## 常见问题

View File

@ -58,7 +58,7 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
*connector*: 各种编程语言连接器go/grafanaplugin/nodejs/python/JDBC *connector*: 各种编程语言连接器go/grafanaplugin/nodejs/python/JDBC
*examples*: 各种编程语言的示例程序(c/C#/go/JDBC/MATLAB/python/R) *examples*: 各种编程语言的示例程序(c/C#/go/JDBC/MATLAB/python/R)
运行install_client.sh进行安装 运行install_client.sh进行安装
**4. 配置taos.cfg** **4. 配置taos.cfg**
@ -95,9 +95,8 @@ TDengine提供了丰富的应用程序开发接口其中包括C/C++、Java、
**提示:** **提示:**
**1. 如利用FQDN连接服务器必须确认本机网络环境DNS已配置好或在hosts文件中添加FQDN寻址记录如编辑C:\Windows\system32\drivers\etc\hosts添加如下的记录** **192.168.1.99 h1.taos.com** 1. **如利用FQDN连接服务器必须确认本机网络环境DNS已配置好或在hosts文件中添加FQDN寻址记录如编辑C:\Windows\system32\drivers\etc\hosts添加如下的记录`192.168.1.99 h1.taos.com` **
2**卸载运行unins000.exe可卸载TDengine应用驱动。**
**2卸载运行unins000.exe可卸载TDengine应用驱动。**
### 安装验证 ### 安装验证
@ -408,11 +407,11 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
- `TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *))` - `TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *))`
该API用来创建数据流其中 该API用来创建数据流其中
* taos已经建立好的数据库连接 * taos已经建立好的数据库连接
* sqlSQL查询语句仅能使用查询语句 * sqlSQL查询语句仅能使用查询语句
* fp用户定义的回调函数指针每次流式计算完成后TDengine将查询的结果TAOS_ROW、查询状态TAOS_RES、用户定义参数PARAM传递给回调函数在回调函数内用户可以使用taos_num_fields获取结果集列数taos_fetch_fields获取结果集每列数据的类型。 * fp用户定义的回调函数指针每次流式计算完成后TDengine将查询的结果TAOS_ROW、查询状态TAOS_RES、用户定义参数PARAM传递给回调函数在回调函数内用户可以使用taos_num_fields获取结果集列数taos_fetch_fields获取结果集每列数据的类型。
* stime是流式计算开始的时间。如果是“64位整数最小值”表示从现在开始如果不为“64位整数最小值”表示从指定的时间开始计算UTC时间从1970/1/1算起的毫秒数 * stime是流式计算开始的时间。如果是“64位整数最小值”表示从现在开始如果不为“64位整数最小值”表示从指定的时间开始计算UTC时间从1970/1/1算起的毫秒数
* param是应用提供的用于回调的一个参数回调时提供给应用 * param是应用提供的用于回调的一个参数回调时提供给应用
* callback: 第二个回调函数,会在连续查询自动停止时被调用。 * callback: 第二个回调函数,会在连续查询自动停止时被调用。
返回值为NULL表示创建失败返回值不为空表示成功。 返回值为NULL表示创建失败返回值不为空表示成功。
@ -458,7 +457,6 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
<!-- REPLACE_OPEN_TO_ENTERPRISE__JAVA_CONNECTOR_DOC --> <!-- REPLACE_OPEN_TO_ENTERPRISE__JAVA_CONNECTOR_DOC -->
## <a class="anchor" id="python"></a>Python Connector ## <a class="anchor" id="python"></a>Python Connector
Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1963.html) Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1963.html)
@ -513,13 +511,12 @@ python -m pip install .
- 通过TDengineConnection对象的 .cursor()方法获取一个新的游标对象,这个游标对象必须保证每个线程独享。 - 通过TDengineConnection对象的 .cursor()方法获取一个新的游标对象,这个游标对象必须保证每个线程独享。
- 通过游标对象的execute()方法执行写入或查询的SQL语句 - 通过游标对象的execute()方法执行写入或查询的SQL语句
- 如果执行的是写入语句execute返回的是成功写入的行数信息affected rows - 如果执行的是写入语句execute返回的是成功写入的行数信息affected rows
- 如果执行的是查询语句则execute执行成功后需要通过fetchall方法去拉取结果集。 具体方法可以参考示例代码。 - 如果执行的是查询语句则execute执行成功后需要通过fetchall方法去拉取结果集。 具体方法可以参考示例代码。
### 安装验证 ### 安装验证
运行如下指令: 运行如下指令:
@ -531,7 +528,6 @@ python3 PythonChecker.py -host <fqdn>
验证通过将打印出成功信息。 验证通过将打印出成功信息。
### Python连接器的使用 ### Python连接器的使用
#### 代码示例 #### 代码示例
@ -649,8 +645,8 @@ conn.close()
- 通过taos.connect获取TDengineConnection对象这个对象可以一个程序只申请一个在多线程中共享。 - 通过taos.connect获取TDengineConnection对象这个对象可以一个程序只申请一个在多线程中共享。
- 通过TDengineConnection对象的 .cursor()方法获取一个新的游标对象,这个游标对象必须保证每个线程独享。 - 通过TDengineConnection对象的 .cursor()方法获取一个新的游标对象,这个游标对象必须保证每个线程独享。
- 通过游标对象的execute()方法执行写入或查询的SQL语句 - 通过游标对象的execute()方法执行写入或查询的SQL语句
- 如果执行的是写入语句execute返回的是成功写入的行数信息affected rows - 如果执行的是写入语句execute返回的是成功写入的行数信息affected rows
- 如果执行的是查询语句则execute执行成功后需要通过fetchall方法去拉取结果集。 - 如果执行的是查询语句则execute执行成功后需要通过fetchall方法去拉取结果集。
具体方法可以参考示例代码。 具体方法可以参考示例代码。
@ -888,7 +884,7 @@ HTTP请求URL采用`sqlutc`时返回结果集的时间戳将采用UTC时间
### 重要配置项 ### 重要配置项
下面仅列出一些与RESTful接口有关的配置参数其他系统参数请看配置文件里的说明。注意配置修改后需要重启taosd服务才能生效 下面仅列出一些与RESTful接口有关的配置参数其他系统参数请看配置文件里的说明。注意配置修改后需要重启taosd服务才能生效
- 对外提供RESTful服务的端口号默认绑定到 6041实际取值是 serverPort + 11因此可以通过修改 serverPort 参数的设置来修改) - 对外提供RESTful服务的端口号默认绑定到 6041实际取值是 serverPort + 11因此可以通过修改 serverPort 参数的设置来修改)
- httpMaxThreads: 启动的线程数量默认为22.0.17.0版本开始默认值改为CPU核数的一半向下取整 - httpMaxThreads: 启动的线程数量默认为22.0.17.0版本开始默认值改为CPU核数的一半向下取整
@ -927,7 +923,7 @@ C#Checker.exe -h <fqdn>
在Windows系统上C#应用程序可以使用TDengine的C#连接器接口来执行所有数据库的操作。使用的具体步骤如下所示: 在Windows系统上C#应用程序可以使用TDengine的C#连接器接口来执行所有数据库的操作。使用的具体步骤如下所示:
1. 将接口文件TDengineDrivercs.cs加入到应用程序所在的项目空间中。 1. 将接口文件TDengineDrivercs.cs加入到应用程序所在的项目空间中。
2. 用户可以参考TDengineTest.cs来定义数据库连接参数以及如何执行数据插入、查询等操作 2. 用户可以参考TDengineTest.cs来定义数据库连接参数以及如何执行数据插入、查询等操作
此接口需要用到taos.dll文件所以在执行应用程序前拷贝Windows客户端install_directory/driver目录中的taos.dll文件到项目最后生成.exe可执行文件所在的文件夹。之后运行exe文件即可访问TDengine数据库并做插入、查询等操作。 此接口需要用到taos.dll文件所以在执行应用程序前拷贝Windows客户端install_directory/driver目录中的taos.dll文件到项目最后生成.exe可执行文件所在的文件夹。之后运行exe文件即可访问TDengine数据库并做插入、查询等操作。
@ -960,23 +956,27 @@ Go连接器支持的系统有
安装前准备: 安装前准备:
- 已安装好TDengine应用驱动参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver) - 已安装好TDengine应用驱动参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)
### 示例程序 ### 示例程序
使用 Go 连接器的示例代码请参考 https://github.com/taosdata/TDengine/tree/develop/tests/examples/go 以及[视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)。 使用 Go 连接器的示例代码请参考 https://github.com/taosdata/TDengine/tree/develop/tests/examples/go 以及[视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)。
示例程序源码也位于安装目录下的 examples/go/taosdemo.go 文件中 示例程序源码也位于安装目录下的 examples/go/taosdemo.go 文件中
**提示建议Go版本是1.13及以上,并开启模块支持:** **提示建议Go版本是1.13及以上,并开启模块支持:**
```sh ```sh
go env -w GO111MODULE=on go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.io,direct go env -w GOPROXY=https://goproxy.io,direct
``` ```
在taosdemo.go所在目录下进行编译和执行 在taosdemo.go所在目录下进行编译和执行
```sh ```sh
go mod init *demo* go mod init taosdemo
go build ./demo -h fqdn -p serverPort go get github.com/taosdata/driver-go/taosSql
# use win branch in Windows platform.
#go get github.com/taosdata/driver-go/taosSql@win
go build
./taosdemo -h fqdn -p serverPort
``` ```
### Go连接器的使用 ### Go连接器的使用
@ -1035,7 +1035,7 @@ Node.js连接器支持的系统有
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux | | **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** | | **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
Node.js连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1957.html) Node.js连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1957.html)
### 安装准备 ### 安装准备
@ -1045,14 +1045,14 @@ Node.js连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020
用户可以通过[npm](https://www.npmjs.com/)来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。具体安装步骤如下: 用户可以通过[npm](https://www.npmjs.com/)来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。具体安装步骤如下:
首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器. 首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器
```bash ```bash
npm install td2.0-connector npm install td2.0-connector
``` ```
我们建议用户使用npm 安装node.js连接器。如果您没有安装npm, 可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下 我们建议用户使用npm 安装node.js连接器。如果您没有安装npm可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下
我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件: 我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js连接器之前,还需要根据具体操作系统来安装下文提到的一些依赖工具。
### Linux ### Linux
@ -1065,17 +1065,17 @@ npm install td2.0-connector
#### 安装方法1 #### 安装方法1
使用微软的[windows-build-tools](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具 使用微软的[windows-build-tools](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具
#### 安装方法2 #### 安装方法2
手动安装以下工具: 手动安装以下工具
- 安装Visual Studio相关[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) - 安装Visual Studio相关[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community)
- 安装 [Python](https://www.python.org/downloads/) 2.7(`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7` - 安装 [Python](https://www.python.org/downloads/) 2.7(`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7`
- 进入`cmd`命令行界面,`npm config set msvs_version 2017` - 进入`cmd`命令行界面,`npm config set msvs_version 2017`
如果以上步骤不能成功执行可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) 如果以上步骤不能成功执行可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules)
如果在Windows 10 ARM 上使用ARM64 Node.js还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64"。 如果在Windows 10 ARM 上使用ARM64 Node.js还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64"。
@ -1148,7 +1148,7 @@ TDengine目前还不支持update和delete语句。
var query = cursor.query('show databases;') var query = cursor.query('show databases;')
``` ```
查询的结果可以通过 `query.execute()` 函数获取并打印出来 查询的结果可以通过 `query.execute()` 函数获取并打印出来
```javascript ```javascript
var promise = query.execute(); var promise = query.execute();
@ -1196,6 +1196,6 @@ promise2.then(function(result) {
### 示例 ### 示例
[node-example.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js)提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例 [node-example.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js)提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例
[node-example-raw.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`. [node-example-raw.js](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`

View File

@ -12,7 +12,7 @@ TDengine的集群管理极其简单除添加和删除节点需要人工干预
**第零步**规划集群所有物理节点的FQDN将规划好的FQDN分别添加到每个物理节点的/etc/hostname修改每个物理节点的/etc/hosts将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS请联系网络管理员在DNS上做好相关配置】 **第零步**规划集群所有物理节点的FQDN将规划好的FQDN分别添加到每个物理节点的/etc/hostname修改每个物理节点的/etc/hosts将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS请联系网络管理员在DNS上做好相关配置】
**第一步**如果搭建集群的物理节点中存有之前的测试数据、装过1.X的版本或者装过其他版本的TDengine请先将其删除并清空所有数据如果需要保留原有数据请联系涛思交付团队进行旧版本升级、数据迁移具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html ) **第一步**如果搭建集群的物理节点中存有之前的测试数据、装过1.X的版本或者装过其他版本的TDengine请先将其删除并清空所有数据如果需要保留原有数据请联系涛思交付团队进行旧版本升级、数据迁移具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)
**注意1**因为FQDN的信息会写进文件如果之前没有配置或者更改FQDN且启动了TDengine。请一定在确保数据无用或者备份的前提下清理一下之前的数据`rm -rf /var/lib/taos/*` **注意1**因为FQDN的信息会写进文件如果之前没有配置或者更改FQDN且启动了TDengine。请一定在确保数据无用或者备份的前提下清理一下之前的数据`rm -rf /var/lib/taos/*`
**注意2**客户端也需要配置确保它可以正确解析每个节点的FQDN配置不管是通过DNS服务还是 Host 文件。 **注意2**客户端也需要配置确保它可以正确解析每个节点的FQDN配置不管是通过DNS服务还是 Host 文件。
@ -25,7 +25,7 @@ TDengine的集群管理极其简单除添加和删除节点需要人工干预
1. 每个物理节点上执行命令`hostname -f`查看和确认所有节点的hostname是不相同的(应用驱动所在节点无需做此项检查) 1. 每个物理节点上执行命令`hostname -f`查看和确认所有节点的hostname是不相同的(应用驱动所在节点无需做此项检查)
2. 每个物理节点上执行`ping host`其中host是其他物理节点的hostname看能否ping通其它物理节点如果不能ping通需要检查网络设置或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts)或DNS的配置。如果无法ping通是无法组成集群的 2. 每个物理节点上执行`ping host`其中host是其他物理节点的hostname看能否ping通其它物理节点如果不能ping通需要检查网络设置或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts)或DNS的配置。如果无法ping通是无法组成集群的
3. 从应用运行的物理节点ping taosd运行的数据节点如果无法ping通应用是无法连接taosd的请检查应用所在物理节点的DNS设置或hosts文件 3. 从应用运行的物理节点ping taosd运行的数据节点如果无法ping通应用是无法连接taosd的请检查应用所在物理节点的DNS设置或hosts文件
4. 每个数据节点的End Point就是输出的hostname外加端口号比如h1.taosdata.com:6030 4. 每个数据节点的End Point就是输出的hostname外加端口号比如`h1.taosdata.com:6030`。
**第五步**修改TDengine的配置文件所有节点的文件/etc/taos/taos.cfg都需要修改。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030其与集群配置相关参数如下 **第五步**修改TDengine的配置文件所有节点的文件/etc/taos/taos.cfg都需要修改。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030其与集群配置相关参数如下

View File

@ -73,7 +73,7 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
因为 TDengine 具有很好的水平扩展能力,根据总量,再根据单个物理机或虚拟机的资源,就可以轻松决定需要购置多少台物理机或虚拟机了。 因为 TDengine 具有很好的水平扩展能力,根据总量,再根据单个物理机或虚拟机的资源,就可以轻松决定需要购置多少台物理机或虚拟机了。
**立即计算 CPU、内存、存储请参见[资源估算方法](https://www.taosdata.com/config/config.html)** **立即计算 CPU、内存、存储请参见[资源估算方法](https://www.taosdata.com/config/config.html)**
## <a class="anchor" id="tolerance"></a>容错和灾备 ## <a class="anchor" id="tolerance"></a>容错和灾备
@ -217,7 +217,7 @@ taosd -C
| 99 | queryBufferSize | | **S** | MB | 为所有并发查询占用保留的内存大小。 | | | 计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。2.0.15 以前的版本中,此参数的单位是字节) | | 99 | queryBufferSize | | **S** | MB | 为所有并发查询占用保留的内存大小。 | | | 计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。2.0.15 以前的版本中,此参数的单位是字节) |
| 100 | ratioOfQueryCores | | **S** | | 设置查询线程的最大数量。 | | | 最小值0 表示只有1个查询线程最大值2表示最大建立2倍CPU核数的查询线程。默认为1表示最大和CPU核数相等的查询线程。该值可以为小数即0.5表示最大建立CPU核数一半的查询线程。 | | 100 | ratioOfQueryCores | | **S** | | 设置查询线程的最大数量。 | | | 最小值0 表示只有1个查询线程最大值2表示最大建立2倍CPU核数的查询线程。默认为1表示最大和CPU核数相等的查询线程。该值可以为小数即0.5表示最大建立CPU核数一半的查询线程。 |
| 101 | update | | **S** | | 允许更新已存在的数据行 | 0 \| 1 | 0 | 从 2.0.8.0 版本开始 | | 101 | update | | **S** | | 允许更新已存在的数据行 | 0 \| 1 | 0 | 从 2.0.8.0 版本开始 |
| 102 | cacheLast | | **S** | | 是否在内存中缓存子表的最近数据 | 0关闭1缓存子表最近一行数据2缓存子表每一列的最近的非NULL值3同时打开缓存最近行和列功能。 | 0 | 2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。 | | 102 | cacheLast | | **S** | | 是否在内存中缓存子表的最近数据 | 0关闭1缓存子表最近一行数据2缓存子表每一列的最近的非NULL值3同时打开缓存最近行和列功能。2.1.2.0 版本开始此参数支持 03 的取值范围,在此之前取值只能是 [0, 1] | 0 | 2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。 |
| 103 | numOfCommitThreads | YES | **S** | | 设置写入线程的最大数量 | | | | | 103 | numOfCommitThreads | YES | **S** | | 设置写入线程的最大数量 | | | |
| 104 | maxWildCardsLength | | **C** | bytes | 设定 LIKE 算子的通配符字符串允许的最大长度 | 0-16384 | 100 | 2.1.6.1 版本新增。 | | 104 | maxWildCardsLength | | **C** | bytes | 设定 LIKE 算子的通配符字符串允许的最大长度 | 0-16384 | 100 | 2.1.6.1 版本新增。 |
@ -230,7 +230,7 @@ taosd -C
| 1 | days | 天 | 一个数据文件存储数据的时间跨度 | | 10 | | 1 | days | 天 | 一个数据文件存储数据的时间跨度 | | 10 |
| 2 | keep | 天 | (可通过 alter database 修改<!-- REPLACE_OPEN_TO_ENTERPRISE__KEEP_PARAM_DESCRIPTION_IN_PARAM_LIST -->)数据库中数据保留的天数。 | 3650 | | 2 | keep | 天 | (可通过 alter database 修改<!-- REPLACE_OPEN_TO_ENTERPRISE__KEEP_PARAM_DESCRIPTION_IN_PARAM_LIST -->)数据库中数据保留的天数。 | 3650 |
| 3 | cache | MB | 内存块的大小 | | 16 | | 3 | cache | MB | 内存块的大小 | | 16 |
| 4 | blocks | | (可通过 alter database 修改)每个 VNODETSDB中有多少个 cache 大小的内存块。因此一个 VNODE 使用的内存大小粗略为cache * blocks。 | | 4 | | 4 | blocks | | (可通过 alter database 修改)每个 VNODETSDB中有多少个 cache 大小的内存块。因此一个 VNODE 使用的内存大小粗略为cache * blocks。 | | 6 |
| 5 | quorum | | (可通过 alter database 修改)多副本环境下指令执行的确认数要求 | 1-2 | 1 | | 5 | quorum | | (可通过 alter database 修改)多副本环境下指令执行的确认数要求 | 1-2 | 1 |
| 6 | minRows | | 文件块中记录的最小条数 | | 100 | | 6 | minRows | | 文件块中记录的最小条数 | | 100 |
| 7 | maxRows | | 文件块中记录的最大条数 | | 4096 | | 7 | maxRows | | 文件块中记录的最大条数 | | 4096 |
@ -375,7 +375,7 @@ taos -C 或 taos --dump-config
timezone GMT-8 timezone GMT-8
timezone Asia/Shanghai timezone Asia/Shanghai
``` ```
均是合法的设置东八区时区的格式。 均是合法的设置东八区时区的格式。但需注意Windows 下并不支持 `timezone Asia/Shanghai` 这样的写法,而必须写成 `timezone UTC-8`
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容时间戳字符串、关键词now的解析产生影响。例如 时区的设置对于查询和写入SQL语句中非Unix时间戳的内容时间戳字符串、关键词now的解析产生影响。例如
```sql ```sql
@ -433,7 +433,7 @@ SHOW USERS;
显示所有用户 显示所有用户
**注意:**SQL 语法中,< >表示需要用户输入的部分,但请不要输入< >本身 **注意:**SQL 语法中,< >表示需要用户输入的部分,但请不要输入< >本身
## <a class="anchor" id="import"></a>数据导入 ## <a class="anchor" id="import"></a>数据导入
@ -445,7 +445,7 @@ TDengine的shell支持source filename命令用于批量运行文件中的SQL
**按数据文件导入** **按数据文件导入**
TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。CSV文件只属于一张表且CSV文件中的数据格式需与要导入表的结构相同, 在导入的时候,其语法如下 TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。CSV文件只属于一张表且CSV文件中的数据格式需与要导入表的结构相同,在导入的时候,其语法如下:
```mysql ```mysql
insert into tb1 file 'path/data.csv'; insert into tb1 file 'path/data.csv';
@ -487,7 +487,7 @@ Query OK, 9 row(s) affected (0.004763s)
**taosdump工具导入** **taosdump工具导入**
TDengine提供了方便的数据库导入导出工具taosdump。用户可以将taosdump从一个系统导出的数据导入到其他系统中。具体使用方法请参见博客[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html) TDengine提供了方便的数据库导入导出工具taosdump。用户可以将taosdump从一个系统导出的数据导入到其他系统中。具体使用方法请参见博客[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html)
## <a class="anchor" id="export"></a>数据导出 ## <a class="anchor" id="export"></a>数据导出
@ -627,7 +627,7 @@ Active: inactive (dead)
...... ......
``` ```
卸载 TDengine只需要执行如下命令 卸载 TDengine只需要执行如下命令
``` ```
rmtaos rmtaos
``` ```
@ -724,7 +724,7 @@ rmtaos
2. 服务端命令行输入:`taos -n server -P <port>` 以服务端身份启动对端口 port 为基准端口的监听 2. 服务端命令行输入:`taos -n server -P <port>` 以服务端身份启动对端口 port 为基准端口的监听
3. 客户端命令行输入:`taos -n client -h <fqdn of server> -P <port>` 以客户端身份启动对指定的服务器、指定的端口发送测试包 3. 客户端命令行输入:`taos -n client -h <fqdn of server> -P <port>` 以客户端身份启动对指定的服务器、指定的端口发送测试包
服务端运行正常的话会输出以下信息 服务端运行正常的话会输出以下信息
```bash ```bash
# taos -n server -P 6000 # taos -n server -P 6000
@ -800,7 +800,7 @@ taos -n sync -P 6042 -h <fqdn of server>
`taos -n speed -h <fqdn of server> -P 6030 -N 10 -l 10000000 -S TCP` `taos -n speed -h <fqdn of server> -P 6030 -N 10 -l 10000000 -S TCP`
从 2.1.7.0 版本开始taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下: 从 2.1.8.0 版本开始taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下:
-n设为“speed”时表示对网络速度进行诊断。 -n设为“speed”时表示对网络速度进行诊断。
-h所要连接的服务端的 FQDN 或 ip 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。 -h所要连接的服务端的 FQDN 或 ip 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。
@ -809,6 +809,15 @@ taos -n sync -P 6042 -h <fqdn of server>
-l单个网络包的大小单位字节。最小值是 1024、最大值是 1024*1024*1024默认值为 1000。 -l单个网络包的大小单位字节。最小值是 1024、最大值是 1024*1024*1024默认值为 1000。
-S网络封包的类型。可以是 TCP 或 UDP默认值为 TCP。 -S网络封包的类型。可以是 TCP 或 UDP默认值为 TCP。
#### FQDN 解析速度诊断
`taos -n fqdn -h <fqdn of server>`
从 2.1.8.0 版本开始taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下:
-n设为“fqdn”时表示对 FQDN 解析进行诊断。
-h所要解析的目标 FQDN 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。
#### 服务端日志 #### 服务端日志
taosd 服务端日志文件标志位 debugflag 默认为 131在 debug 时往往需要将其提升到 135 或 143 。 taosd 服务端日志文件标志位 debugflag 默认为 131在 debug 时往往需要将其提升到 135 或 143 。

View File

@ -9,7 +9,7 @@ TAOS SQL 不支持关键字的缩写,例如 DESCRIBE 不能缩写为 DESC。
本章节 SQL 语法遵循如下约定: 本章节 SQL 语法遵循如下约定:
- < > 里的内容是用户需要输入的,但不要输入 <> 本身 - < > 里的内容是用户需要输入的,但不要输入 <> 本身
- [ ] 表示内容为可选项,但不能输入 [] 本身 - \[ \] 表示内容为可选项,但不能输入 [] 本身
- | 表示多选一,选择其中一个即可,但不能输入 | 本身 - | 表示多选一,选择其中一个即可,但不能输入 | 本身
- … 表示前面的项可重复多个 - … 表示前面的项可重复多个
@ -206,10 +206,6 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
显示当前数据库下的所有数据表信息。 显示当前数据库下的所有数据表信息。
说明:可在 like 中使用通配符进行名称的匹配,这一通配符字符串最长不能超过 20 字节。( 从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
通配符匹配1'%'百分号匹配0到任意个字符2'\_'下划线匹配单个任意字符。
- **显示一个数据表的创建语句** - **显示一个数据表的创建语句**
```mysql ```mysql
@ -265,7 +261,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
```mysql ```mysql
CREATE STABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]); CREATE STABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]);
``` ```
创建 STable与创建表的 SQL 语法相似,但需要指定 TAGS 字段的名称和类型 创建 STable与创建表的 SQL 语法相似,但需要指定 TAGS 字段的名称和类型
说明: 说明:
@ -718,27 +714,19 @@ Query OK, 1 row(s) in set (0.001091s)
| = | equal to | all types | | = | equal to | all types |
| <> | not equal to | all types | | <> | not equal to | all types |
| between and | within a certain range | **`timestamp`** and all numeric types | | between and | within a certain range | **`timestamp`** and all numeric types |
| in | matches any value in a set | all types except first column `timestamp` | | in | match any value in a set | all types except first column `timestamp` |
| like | match a wildcard string | **`binary`** **`nchar`** |
| % | match with any char sequences | **`binary`** **`nchar`** | | % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** | | _ | match with a single char | **`binary`** **`nchar`** |
1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。 1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。
2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 2. like 算子使用通配符字符串进行匹配检查。
3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))` * 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意字符。
4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 * 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明BOOL 类型写作 `{true, false}``{0, 1}` 均可,但不能写作 0、1 之外的整数FLOAT 和 DOUBLE 类型会受到浮点数精度影响集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功TIMESTAMP 类型支持非主键的列。<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER --> 3. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
4. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`
<!-- 5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
<a class="anchor" id="having"></a> 6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明BOOL 类型写作 `{true, false}``{0, 1}` 均可,但不能写作 0、1 之外的整数FLOAT 和 DOUBLE 类型会受到浮点数精度影响集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功TIMESTAMP 类型支持非主键的列。<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER -->
### GROUP BY 之后的 HAVING 过滤
从 2.0.20.0 版本开始GROUP BY 之后允许再跟一个 HAVING 子句对成组后的各组数据再做筛选。HAVING 子句可以使用聚合函数和选择函数作为过滤条件(但暂时不支持 LEASTSQUARES、TOP、BOTTOM、LAST_ROW
例如,如下语句只会输出 `AVG(f1) > 0` 的分组:
```mysql
SELECT AVG(f1), SPREAD(f1, f2, st2.f1) FROM st2 WHERE f1 > 0 GROUP BY f1 HAVING AVG(f1) > 0;
```
-->
<a class="anchor" id="union"></a> <a class="anchor" id="union"></a>
### UNION ALL 操作符 ### UNION ALL 操作符
@ -1025,9 +1013,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
1如果要返回各个列的首个时间戳最小非NULL值可以使用FIRST(\*) 1如果要返回各个列的首个时间戳最小非NULL值可以使用FIRST(\*)
2) 如果结果集中的某列全部为NULL值则该列的返回结果也是NULL 2如果结果集中的某列全部为NULL值则该列的返回结果也是NULL
3) 如果结果集中所有列全部为NULL值则不返回结果。 3如果结果集中所有列全部为NULL值则不返回结果。
示例: 示例:
```mysql ```mysql
@ -1187,7 +1175,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
适用于:**表、超级表**。 适用于:**表、超级表**。
说明:*P*值取值范围0≤*P*≤100为0的时候等同于MIN为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数 说明:*P*值取值范围0≤*P*≤100为0的时候等同于MIN为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数
```mysql ```mysql
taos> SELECT APERCENTILE(current, 20) FROM d1001; taos> SELECT APERCENTILE(current, 20) FROM d1001;
@ -1209,8 +1197,6 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
适用于:**表、超级表**。 适用于:**表、超级表**。
说明与LAST函数不同LAST_ROW不支持时间范围限制强制返回最后一条记录。
限制LAST_ROW()不能与INTERVAL一起使用。 限制LAST_ROW()不能与INTERVAL一起使用。
示例: 示例:
@ -1297,6 +1283,19 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
说明:(从 2.1.3.0 版本开始新增此函数输出结果行数是范围内总行数减一第一行没有结果输出。DERIVATIVE 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname 说明:(从 2.1.3.0 版本开始新增此函数输出结果行数是范围内总行数减一第一行没有结果输出。DERIVATIVE 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname
示例:
```mysql
taos> select derivative(current, 10m, 0) from t1;
ts | derivative(current, 10m, 0) |
========================================================
2021-08-20 10:11:22.790 | 0.500000000 |
2021-08-20 11:11:22.791 | 0.166666620 |
2021-08-20 12:11:22.791 | 0.000000000 |
2021-08-20 13:11:22.792 | 0.166666620 |
2021-08-20 14:11:22.792 | -0.666666667 |
Query OK, 5 row(s) in set (0.004883s)
```
- **SPREAD** - **SPREAD**
```mysql ```mysql
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
@ -1416,13 +1415,13 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
## <a class="anchor" id="limitation"></a>TAOS SQL 边界限制 ## <a class="anchor" id="limitation"></a>TAOS SQL 边界限制
- 数据库名最大长度为 32 - 数据库名最大长度为 32
- 表名最大长度为 192每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) - 表名最大长度为 192每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
- 列名最大长度为 64最多允许 1024 列,最少需要 2 列,第一列必须是时间戳 - 列名最大长度为 64最多允许 1024 列,最少需要 2 列,第一列必须是时间戳
- 标签名最大长度为 64最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符 - 标签名最大长度为 64最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符
- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M - SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。 - SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
## TAOS SQL其他约定 ## TAOS SQL其他约定

View File

@ -26,15 +26,15 @@
## 2. Windows平台下JDBCDriver找不到动态链接库怎么办 ## 2. Windows平台下JDBCDriver找不到动态链接库怎么办
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/950.html) 请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)
## 3. 创建数据表时提示more dnodes are needed ## 3. 创建数据表时提示more dnodes are needed
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/965.html) 请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)
## 4. 如何让TDengine crash时生成core文件 ## 4. 如何让TDengine crash时生成core文件
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/974.html) 请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)
## 5. 遇到错误“Unable to establish connection”, 我怎么办? ## 5. 遇到错误“Unable to establish connection”, 我怎么办?
@ -49,7 +49,7 @@
3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd* 3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得),FQDN配置参考[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name —— 可在服务器上执行Linux命令hostname -f获得FQDN配置参考[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
5. ping服务器FQDN如果没有反应请检查你的网络DNS设置或客户端所在计算机的系统hosts文件。如果部署的是TDengine集群客户端需要能ping通所有集群节点的FQDN。 5. ping服务器FQDN如果没有反应请检查你的网络DNS设置或客户端所在计算机的系统hosts文件。如果部署的是TDengine集群客户端需要能ping通所有集群节点的FQDN。
@ -74,16 +74,16 @@
产生这个错误是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用请做如下检查 产生这个错误是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用请做如下检查
1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html) 1. 请检查连接的服务器的FQDN是否正确FQDN配置参考[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)
2. 如果网络配置有DNS server, 请检查是否正常工作 2. 如果网络配置有DNS server请检查是否正常工作
3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件查看该FQDN是否配置并是否有正确的IP地址 3. 如果网络没有配置DNS server请检查客户端所在机器的hosts文件查看该FQDN是否配置并是否有正确的IP地址
4. 如果网络配置OK从客户端所在机器你需要能Ping该连接的FQDN否则客户端是无法连接服务器的 4. 如果网络配置OK从客户端所在机器你需要能Ping该连接的FQDN否则客户端是无法连接服务器的
## 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误 ## 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误
如果你确认语法正确2.0之前版本请检查SQL语句长度是否超过64K。如果超过也会返回这个错误。 如果你确认语法正确2.0之前版本请检查SQL语句长度是否超过64K。如果超过也会返回这个错误。
## 8. 是否支持validation queries? ## 8. 是否支持validation queries
TDengine还没有一组专用的validation queries。然而建议你使用系统监测的数据库”log"来做。 TDengine还没有一组专用的validation queries。然而建议你使用系统监测的数据库”log"来做。
@ -137,7 +137,7 @@ Connection = DriverManager.getConnection(url, properties);
TDengine是根据hostname唯一标志一台机器的在数据文件从机器A移动机器B时注意如下两件事 TDengine是根据hostname唯一标志一台机器的在数据文件从机器A移动机器B时注意如下两件事
- 2.0.0.0 至 2.0.6.x 的版本重新配置机器B的hostname为机器A的hostname - 2.0.0.0 至 2.0.6.x 的版本重新配置机器B的hostname为机器A的hostname
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下修复dnodeEps.json的dnodeId对应的FQDN重启。确保机器内所有机器的此文件是完全相同的。 - 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下修复dnodeEps.json的dnodeId对应的FQDN重启。确保机器内所有机器的此文件是完全相同的。
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 - 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。

View File

@ -71,7 +71,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
## [Connector](/connector) ## [Connector](/connector)
- [C/C++ Connector](/connector#c-cpp): primary method to connect to TDengine server through libtaos client library - [C/C++ Connector](/connector#c-cpp): primary method to connect to TDengine server through libtaos client library
- [Java Connector(JDBC)]: driver for connecting to the server from Java applications using the JDBC API - [Java Connector(JDBC)](/connector/java): driver for connecting to the server from Java applications using the JDBC API
- [Python Connector](/connector#python): driver for connecting to TDengine server from Python applications - [Python Connector](/connector#python): driver for connecting to TDengine server from Python applications
- [RESTful Connector](/connector#restful): a simple way to interact with TDengine via HTTP - [RESTful Connector](/connector#restful): a simple way to interact with TDengine via HTTP
- [Go Connector](/connector#go): driver for connecting to TDengine server from Go applications - [Go Connector](/connector#go): driver for connecting to TDengine server from Go applications

View File

@ -16,9 +16,7 @@ Please visit our [TDengine Official Docker Image: Distribution, Downloading, and
Its extremely easy to install for TDengine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs: Its extremely easy to install for TDengine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs:
Click [here](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) to download the install package. Click [here](https://www.taosdata.com/en/getting-started/#Install-from-Package) to download the install package.
For more about installation process, please refer [TDengine Installation Packages: Install and Uninstall](https://www.taosdata.com/blog/2019/08/09/566.html), and [Video Tutorials](https://www.taosdata.com/blog/2020/11/11/1941.html).
## <a class="anchor" id="start"></a>Quick Launch ## <a class="anchor" id="start"></a>Quick Launch

View File

@ -9,7 +9,7 @@ Please watch the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1945.
Different types of data collection points often have different data characteristics, including frequency of data collecting, length of data retention time, number of replicas, size of data blocks, whether to update data or not, and so on. To ensure TDengine working with great efficiency in various scenarios, TDengine suggests creating tables with different data characteristics in different databases, because each database can be configured with different storage strategies. When creating a database, in addition to SQL standard options, the application can also specify a variety of parameters such as retention duration, number of replicas, number of memory blocks, time accuracy, max and min number of records in a file block, whether it is compressed or not, and number of days a data file will be overwritten. For example: Different types of data collection points often have different data characteristics, including frequency of data collecting, length of data retention time, number of replicas, size of data blocks, whether to update data or not, and so on. To ensure TDengine working with great efficiency in various scenarios, TDengine suggests creating tables with different data characteristics in different databases, because each database can be configured with different storage strategies. When creating a database, in addition to SQL standard options, the application can also specify a variety of parameters such as retention duration, number of replicas, number of memory blocks, time accuracy, max and min number of records in a file block, whether it is compressed or not, and number of days a data file will be overwritten. For example:
```mysql ```mysql
CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1; CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 6 UPDATE 1;
``` ```
The above statement will create a database named “power”. The data of this database will be kept for 365 days (it will be automatically deleted 365 days later), one data file created per 10 days, and the number of memory blocks is 4 for data updating. For detailed syntax and parameters, please refer to [Data Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#management). The above statement will create a database named “power”. The data of this database will be kept for 365 days (it will be automatically deleted 365 days later), one data file created per 10 days, and the number of memory blocks is 4 for data updating. For detailed syntax and parameters, please refer to [Data Management section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#management).

View File

@ -0,0 +1,525 @@
# Java connector
## Introduction
The taos-jdbcdriver is implemented in two forms: JDBC-JNI and JDBC-RESTful (supported from taos-jdbcdriver-2.0.18). JDBC-JNI is implemented by calling the local methods of libtaos.so (or taos.dll) on the client, while JDBC-RESTful encapsulates the RESTful interface implementation internally.
![tdengine-connector](page://images/tdengine-jdbc-connector.png)
The figure above shows the three ways Java applications can access the TDengine:
* JDBC-JNI: The Java application uses JDBC-JNI's API on physical node1 (pnode1) and directly calls the client API (libtaos.so or taos.dll) to send write or query requests to the taosd instance on physical node2 (pnode2).
* RESTful: The Java application sends the SQL to the RESTful connector on physical node2 (pnode2), which then calls the client API (libtaos.so).
* JDBC-RESTful: The Java application uses the JDBC-restful API to encapsulate SQL into a RESTful request and send it to the RESTful connector of physical node 2.
In terms of implementation, the JDBC driver of TDengine is as consistent as possible with the behavior of the relational database driver. However, due to the differences between TDengine and relational database in the object and technical characteristics of services, there are some differences between taos-jdbcdriver and traditional relational database JDBC driver. The following points should be watched:
* deleting a record is not supported in TDengine.
* transaction is not supported in TDengine.
### Difference between JDBC-JNI and JDBC-restful
<table>
<tr align="center"><th>Difference</th><th>JDBC-JNI</th><th>JDBC-RESTful</th></tr>
<tr align="center">
<td>Supported OS</td>
<td>linux、windows</td>
<td>all platform</td>
</tr>
<tr align="center">
<td>Whether to install the Client</td>
<td>need</td>
<td>do not need</td>
</tr>
<tr align="center">
<td>Whether to upgrade the client after the server is upgraded</td>
<td>need</td>
<td>do not need</td>
</tr>
<tr align="center">
<td>Write performance</td>
<td colspan="2">JDBC-RESTful is 50% to 90% of JDBC-JNI</td>
</tr>
<tr align="center">
<td>Read performance</td>
<td colspan="2">JDBC-RESTful is no different from JDBC-JNI</td>
</tr>
</table>
**Note**: RESTful interfaces are stateless. Therefore, when using JDBC-restful, you should specify the database name in SQL before all table names and super table names, for example:
```sql
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6);
```
## JDBC driver version and supported TDengine and JDK versions
| taos-jdbcdriver | TDengine | JDK |
| -------------------- | ----------------- | -------- |
| 2.0.33 - 2.0.34 | 2.0.3.0 and above | 1.8.x |
| 2.0.31 - 2.0.32 | 2.1.3.0 and above | 1.8.x |
| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x and above | 1.8.x |
| 1.0.2 | 1.6.1.x and above | 1.8.x |
| 1.0.1 | 1.6.1.x and above | 1.8.x |
## DataType in TDengine and Java connector
The TDengine supports the following data types and Java data types:
| TDengine DataType | Java DataType |
| ----------------- | ------------------ |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY | byte[] |
| NCHAR | java.lang.String |
## Install Java connector
### Runtime Requirements
To run TDengine's Java connector, the following requirements shall be met:
1. A Linux or Windows System
2. Java Runtime Environment 1.8 or later
3. TDengine client (required for JDBC-JNI, not required for JDBC-restful)
**Note**:
* After the TDengine client is successfully installed on Linux, the libtaos.so file is automatically copied to /usr/lib/libtaos.so, which is included in the Linux automatic scan path and does not need to be specified separately.
* After the TDengine client is installed on Windows, the taos.dll file that the driver package depends on is automatically copied to the default search path C:/Windows/System32. You do not need to specify it separately.
### Obtain JDBC driver by maven
To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver). Add the following dependencies in pom.xml for your maven projects.
```xml
<dependencies>
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.34</version>
</dependency>
</dependencies>
```
### Obtain JDBC driver by compiling source code
You can download the TDengine source code and compile the latest version of the JDBC Connector.
```shell
git clone https://github.com/taosdata/TDengine.git
cd TDengine/src/connector/jdbc
mvn clean package -Dmaven.test.skip=true
```
a taos-jdbcdriver-2.0.xx-dist.jar will be released in the target directory.
## Usage of java connector
### Establishing a Connection
#### Establishing a connection with URL
Establish the connection by specifying the URL, as shown below:
```java
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
In the example above, the JDBC-RESTful driver is used to establish a connection to the hostname of 'taosdemo.com', port of 6041, and database name of 'test'. This URL specifies the user name as 'root' and the password as 'taosdata'.
The JDBC-RESTful does not depend on the local function library. Compared with JDBC-JNI, only the following is required:
* DriverClass designated as "com.taosdata.jdbc.rs.RestfulDriver"
* JdbcUrl starts with "JDBC:TAOS-RS://"
* Use port 6041 as the connection port
For better write and query performance, Java applications can use the JDBC-JNI driver, as shown below:
```java
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
In the example above, The JDBC-JNI driver is used to establish a connection to the hostname of 'taosdemo.com', port 6030 (TDengine's default port), and database name of 'test'. This URL specifies the user name as 'root' and the password as 'taosdata'.
<!-- You can also see the JDBC-JNI video tutorial: [JDBC connector of TDengine](https://www.taosdata.com/blog/2020/11/11/1955.html) -->
The format of JDBC URL is:
```url
jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]
```
The configuration parameters in the URL are as follows:
* user: user name for logging in to the TDengine. The default value is 'root'.
* password: the user login password. The default value is 'taosdata'.
* cfgdir: directory of the client configuration file. It is valid only for JDBC-JNI. The default value is `/etc/taos` on Linux and `C:/TDengine/cfg` on Windows.
* charset: character set used by the client. The default value is the system character set.
* locale: client locale. The default value is the current system locale.
* timezone: timezone used by the client. The default value is the current timezone of the system.
* batchfetch: only valid for JDBC-JNI. True if batch ResultSet fetching is enabled; false if row-by-row ResultSet fetching is enabled. Default value is flase.
* timestampFormat: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'.
* batchErrorIgnore: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false.
#### Establishing a connection with URL and Properties
In addition to establish the connection with the specified URL, you can also use Properties to specify the parameters to set up the connection, as shown below:
```java
public Connection getConn() throws Exception{
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
// String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}
```
In the example above, JDBC-JNI is used to establish a connection to hostname of 'taosdemo.com', port at 6030, and database name of 'test'. The annotation is the method when using JDBC-RESTful. The connection specifies the user name as 'root' and the password as 'taosdata' in the URL, and the character set to use, locale, time zone, and so on in connProps.
The configuration parameters in properties are as follows:
* TSDBDriver.PROPERTY_KEY_USER: user name for logging in to the TDengine. The default value is 'root'.
* TSDBDriver.PROPERTY_KEY_PASSWORD: the user login password. The default value is 'taosdata'.
* TSDBDriver.PROPERTY_KEY_CONFIG_DIR: directory of the client configuration file. It is valid only for JDBC-JNI. The default value is `/etc/taos` on Linux and `C:/TDengine/cfg on Windows`.
* TSDBDriver.PROPERTY_KEY_CHARSET: character set used by the client. The default value is the system character set.
* TSDBDriver.PROPERTY_KEY_LOCALE: client locale. The default value is the current system locale.
* TSDBDriver.PROPERTY_KEY_TIME_ZONE: timezone used by the client. The default value is the current timezone of the system.
* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-JNI. True if batch ResultSet fetching is enabled; false if row-by-row ResultSet fetching is enabled. Default value is flase.
* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'.
* TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false.
#### Establishing a connection with configuration file
When JDBC-JNI is used to connect to the TDengine cluster, you can specify firstEp and secondEp parameters of the cluster in the client configuration file. As follows:
1. The hostname and port are not specified in Java applications
```java
public Connection getConn() throws Exception{
String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}
```
2. Specify firstEp and secondEp in the configuration file
```txt
# first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030
# second fully qualified domain name (FQDN) for TDengine system, for cluster only
secondEp cluster_node2:6030
```
In the above example, JDBC driver uses the client configuration file to establish a connection to the hostname of 'cluster_node1', port 6030, and database name of 'test'. When the firstEp node in the cluster fails, JDBC will try to connect to the cluster using secondEp. In the TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established.
**Note**: In this case, the configuration file belongs to TDengine client which is running inside a Java application. default file path of Linux OS is '/etc/taos/taos.cfg', and default file path of Windows OS is 'C://TDengine/cfg/taos.cfg'.
#### Priority of the parameters
If the parameters in the URL, Properties, and client configuration file are repeated set, the priorities of the parameters in descending order are as follows:
1. URL parameters
2. Properties
3. Client configuration file in taos.cfg
For example, if you specify password as 'taosdata' in the URL and password as 'taosdemo' in the Properties, JDBC will establish a connection using the password in the URL.
For details, see Client Configuration:[client configuration](https://www.taosdata.com/en/documentation/administrator#client)
### Create database and table
```java
Statement stmt = conn.createStatement();
// create database
stmt.executeUpdate("create database if not exists db");
// use database
stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
### Insert
```java
// insert data
int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
System.out.println("insert " + affectedRows + " rows.");
```
**Note**: 'now' is an internal system function. The default value is the current time of the computer where the client resides. 'now + 1s' indicates that the current time on the client is added by one second. The following time units are a(millisecond), s (second), m(minute), h(hour), d(day), w(week), n(month), and y(year).
### Query
```java
// query data
ResultSet resultSet = stmt.executeQuery("select * from tb");
Timestamp ts = null;
int temperature = 0;
float humidity = 0;
while(resultSet.next()){
ts = resultSet.getTimestamp(1);
temperature = resultSet.getInt(2);
humidity = resultSet.getFloat("humidity");
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
**Note**: The query is consistent with the operation of the relational database, and the index in ResultSet starts from 1.
### Handle exceptions
```java
try (Statement statement = connection.createStatement()) {
// executeQuery
ResultSet resultSet = statement.executeQuery(sql);
// print result
printResult(resultSet);
} catch (SQLException e) {
System.out.println("ERROR Message: " + e.getMessage());
System.out.println("ERROR Code: " + e.getErrorCode());
e.printStackTrace();
}
```
The Java connector may report three types of error codes: JDBC Driver (error codes ranging from 0x2301 to 0x2350), JNI method (error codes ranging from 0x2351 to 0x2400), and TDengine Error. For details about the error code, see:
- https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
- https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
### Write data through parameter binding
Since version 2.1.2.0, TDengine's JDBC-JNI implementation has significantly improved parameter binding support for data write (INSERT) scenarios. Data can be written in the following way, avoiding SQL parsing and significantly improving the write performance.(**Note**: parameter binding is not supported in JDBC-RESTful)
```java
Statement stmt = conn.createStatement();
Random r = new Random();
// In the INSERT statement, the VALUES clause allows you to specify a specific column; If automatic table creation is adopted, the TAGS clause needs to set the parameter values of all TAGS columns
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)");
s.setTableName("w1");
// set tags
s.setTagInt(0, r.nextInt(10));
s.setTagString(1, "Beijing");
int numOfRows = 10;
// set values
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
ArrayList<Integer> s1 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
s1.add(r.nextInt(100));
}
s.setInt(1, s1);
ArrayList<String> s2 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
s2.add("test" + r.nextInt(100));
}
s.setString(2, s2, 10);
// The cache is not cleared after AddBatch. Do not bind new data again before ExecuteBatch
s.columnDataAddBatch();
s.columnDataExecuteBatch();
// Clear the cache, after which you can bind new data(including table names, tags, values):
s.columnDataClearBatch();
s.columnDataCloseBatch();
```
The methods used to set tags are:
```java
public void setTagNull(int index, int type)
public void setTagBoolean(int index, boolean value)
public void setTagInt(int index, int value)
public void setTagByte(int index, byte value)
public void setTagShort(int index, short value)
public void setTagLong(int index, long value)
public void setTagTimestamp(int index, long value)
public void setTagFloat(int index, float value)
public void setTagDouble(int index, double value)
public void setTagString(int index, String value)
public void setTagNString(int index, String value)
```
The methods used to set columns are:
```java
public void setInt(int columnIndex, ArrayList<Integer> list) throws SQLException
public void setFloat(int columnIndex, ArrayList<Float> list) throws SQLException
public void setTimestamp(int columnIndex, ArrayList<Long> list) throws SQLException
public void setLong(int columnIndex, ArrayList<Long> list) throws SQLException
public void setDouble(int columnIndex, ArrayList<Double> list) throws SQLException
public void setBoolean(int columnIndex, ArrayList<Boolean> list) throws SQLException
public void setByte(int columnIndex, ArrayList<Byte> list) throws SQLException
public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
```
**Note**: Both setString and setNString require the user to declare the column width of the corresponding column in the table definition in the size parameter.
### Data Subscription
#### Subscribe
```java
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
```
parameters:
* topic: the unique topic name of the subscription.
* sql: a select statement.
* restart: true if restart the subscription already exists; false if continue the previous subscription.
In the example above, a subscription named 'topic' is created which use the SQL statement 'select * from meters'. If the subscription already exists, it will continue with the previous query progress, rather than consuming all the data from scratch.
#### Consume
```java
int total = 0;
while(true) {
TSDBResultSet rs = sub.consume();
int count = 0;
while(rs.next()) {
count++;
}
total += count;
System.out.printf("%d rows consumed, total %d\n", count, total);
Thread.sleep(1000);
}
```
The consume method returns a result set containing all the new data so far since the last consume. Make sure to call consume as often as you need (like Thread.sleep(1000) in the example), otherwise you will put unnecessary stress on the server.
#### Close
```java
sub.close(true);
// release resources
resultSet.close();
stmt.close();
conn.close();
```
The close method closes a subscription. If the parameter is true, the subscription progress information is reserved, and a subscription with the same name can be created later to continue consuming data. If false, the subscription progress is not retained.
**Note**: the connection must be closed; otherwise, a connection leak may occur.
## Connection Pool
### HikariCP example
```java
public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig();
// jdbc properties
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root");
config.setPassword("taosdata");
// connection pool configurations
config.setMinimumIdle(10); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool
config.setMaxLifetime(0); // maximum life time for each connection
config.setIdleTimeout(0); // max idle time for recycle idle connection
config.setConnectionTestQuery("select server_status()"); //validation query
HikariDataSource ds = new HikariDataSource(config); //create datasource
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
### Druid example
```java
public static void main(String[] args) throws Exception {
DruidDataSource dataSource = new DruidDataSource();
// jdbc properties
dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
dataSource.setUrl(url);
dataSource.setUsername("root");
dataSource.setPassword("taosdata");
// pool configurations
dataSource.setInitialSize(10);
dataSource.setMinIdle(10);
dataSource.setMaxActive(10);
dataSource.setMaxWait(30000);
dataSource.setValidationQuery("select server_status()");
Connection connection = dataSource.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
**Note**
As of TDengine V1.6.4.1, the function select server_status() is supported specifically for heartbeat detection, so it is recommended to use select server_status() for Validation queries when using connection pools.
Select server_status() returns 1 on success, as shown below.
```sql
taos> select server_status();
server_status()|
================
1 |
Query OK, 1 row(s) in set (0.000141s)
```
## Integrated with framework
- Please refer to [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) if using taos-jdbcdriver in Spring JdbcTemplate.
- Please refer to [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) if using taos-jdbcdriver in Spring JdbcTemplate.
## Example Codes
you see sample code here: ![JDBC example](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC)
## FAQ
- java.lang.UnsatisfiedLinkError: no taos in java.library.path
**Cause**The application program cannot find Library function *taos*
**Answer**Copy `C:\TDengine\driver\taos.dll` to `C:\Windows\System32\` on Windows and make a soft link through `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` on Linux.
- java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
**Cause**Currently TDengine only support 64bit JDK
**Answer**re-install 64bit JDK.
- For other questions, please refer to [Issues](https://github.com/taosdata/TDengine/issues)

View File

@ -284,3 +284,5 @@ keepColumnName 1
# 0 no query allowed, queries are disabled # 0 no query allowed, queries are disabled
# queryBufferSize -1 # queryBufferSize -1
# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
# tsdbMetaCompactRatio 0

View File

@ -142,6 +142,7 @@ function install_bin() {
if [ "$osType" != "Darwin" ]; then if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/perfMonitor || :
${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/set_core || : ${csudo} rm -f ${bin_link_dir}/set_core || :
fi fi
@ -167,6 +168,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
fi fi

View File

@ -1,6 +1,6 @@
name: tdengine name: tdengine
base: core18 base: core18
version: '2.1.6.0' version: '2.1.7.1'
icon: snap/gui/t-dengine.svg icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT. summary: an open-source big data platform designed and optimized for IoT.
description: | description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd - usr/bin/taosd
- usr/bin/taos - usr/bin/taos
- usr/bin/taosdemo - usr/bin/taosdemo
- usr/lib/libtaos.so.2.1.6.0 - usr/lib/libtaos.so.2.1.7.1
- usr/lib/libtaos.so.1 - usr/lib/libtaos.so.1
- usr/lib/libtaos.so - usr/lib/libtaos.so

View File

@ -116,8 +116,17 @@ void bnCleanupDnodes() {
static void bnCheckDnodesSize(int32_t dnodesNum) { static void bnCheckDnodesSize(int32_t dnodesNum) {
if (tsBnDnodes.maxSize <= dnodesNum) { if (tsBnDnodes.maxSize <= dnodesNum) {
tsBnDnodes.maxSize = dnodesNum * 2; int32_t maxSize = dnodesNum * 2;
tsBnDnodes.list = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *)); SDnodeObj** list1 = NULL;
int32_t retry = 0;
while(list1 == NULL && retry++ < 3) {
list1 = realloc(tsBnDnodes.list, maxSize * sizeof(SDnodeObj *));
}
if(list1) {
tsBnDnodes.list = list1;
tsBnDnodes.maxSize = maxSize;
}
} }
} }

View File

@ -4,6 +4,8 @@ PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(inc)
INCLUDE_DIRECTORIES(jni) INCLUDE_DIRECTORIES(jni)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/plugins/http/inc)
AUX_SOURCE_DIRECTORY(src SRC) AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX) IF (TD_LINUX)

View File

@ -50,6 +50,12 @@ void tscUnlockByThread(int64_t *lockedBy);
int tsInsertInitialCheck(SSqlObj *pSql); int tsInsertInitialCheck(SSqlObj *pSql);
void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs);
void tscFreeRetrieveSup(SSqlObj *pSql);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -144,6 +144,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo); bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo); bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
bool tscGroupbyColumn(SQueryInfo* pQueryInfo); bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo);
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo); bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
bool hasTagValOutput(SQueryInfo* pQueryInfo); bool hasTagValOutput(SQueryInfo* pQueryInfo);
bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo); bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo);
@ -214,6 +215,7 @@ SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t function
int16_t size); int16_t size);
size_t tscNumOfExprs(SQueryInfo* pQueryInfo); size_t tscNumOfExprs(SQueryInfo* pQueryInfo);
int32_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo);
SExprInfo *tscExprGet(SQueryInfo* pQueryInfo, int32_t index); SExprInfo *tscExprGet(SQueryInfo* pQueryInfo, int32_t index);
int32_t tscExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy); int32_t tscExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy);
int32_t tscExprCopyAll(SArray* dst, const SArray* src, bool deepcopy); int32_t tscExprCopyAll(SArray* dst, const SArray* src, bool deepcopy);

View File

@ -38,6 +38,11 @@ extern "C" {
#include "qUtil.h" #include "qUtil.h"
#include "tcmdtype.h" #include "tcmdtype.h"
typedef enum {
TAOS_REQ_FROM_SHELL,
TAOS_REQ_FROM_HTTP
} SReqOrigin;
// forward declaration // forward declaration
struct SSqlInfo; struct SSqlInfo;
@ -123,17 +128,15 @@ typedef struct {
int32_t kvLen; // len of SKVRow int32_t kvLen; // len of SKVRow
} SMemRowInfo; } SMemRowInfo;
typedef struct { typedef struct {
uint8_t memRowType; uint8_t memRowType; // default is 0, that is SDataRow
uint8_t compareStat; // 0 unknown, 1 need compare, 2 no need uint8_t compareStat; // 0 no need, 1 need compare
TDRowTLenT dataRowInitLen;
TDRowTLenT kvRowInitLen; TDRowTLenT kvRowInitLen;
SMemRowInfo *rowInfo; SMemRowInfo *rowInfo;
} SMemRowBuilder; } SMemRowBuilder;
typedef enum { typedef enum {
ROW_COMPARE_UNKNOWN = 0, ROW_COMPARE_NO_NEED = 0,
ROW_COMPARE_NEED = 1, ROW_COMPARE_NEED = 1,
ROW_COMPARE_NO_NEED = 2,
} ERowCompareStat; } ERowCompareStat;
int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec); int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec);
@ -342,6 +345,7 @@ typedef struct STscObj {
SRpcCorEpSet *tscCorMgmtEpSet; SRpcCorEpSet *tscCorMgmtEpSet;
pthread_mutex_t mutex; pthread_mutex_t mutex;
int32_t numOfObj; // number of sqlObj from this tscObj int32_t numOfObj; // number of sqlObj from this tscObj
SReqOrigin from;
} STscObj; } STscObj;
typedef struct SSubqueryState { typedef struct SSubqueryState {

View File

@ -649,7 +649,7 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
for(int32_t j = 0; j < numOfExpr; ++j) { for(int32_t j = 0; j < numOfExpr; ++j) {
pCtx[j].pOutput += (pCtx[j].outputBytes * numOfRows); pCtx[j].pOutput += (pCtx[j].outputBytes * numOfRows);
if (pCtx[j].functionId == TSDB_FUNC_TOP || pCtx[j].functionId == TSDB_FUNC_BOTTOM) { if (pCtx[j].functionId == TSDB_FUNC_TOP || pCtx[j].functionId == TSDB_FUNC_BOTTOM) {
pCtx[j].ptsOutputBuf = pCtx[0].pOutput; if(j > 0)pCtx[j].ptsOutputBuf = pCtx[j - 1].pOutput;
} }
} }

View File

@ -51,20 +51,18 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3
} }
} }
// default compareStat is ROW_COMPARE_NO_NEED
if (nBoundCols == 0) { // file input if (nBoundCols == 0) { // file input
pBuilder->memRowType = SMEM_ROW_DATA; pBuilder->memRowType = SMEM_ROW_DATA;
pBuilder->compareStat = ROW_COMPARE_NO_NEED;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else { } else {
float boundRatio = ((float)nBoundCols / (float)nCols); float boundRatio = ((float)nBoundCols / (float)nCols);
if (boundRatio < KVRatioKV) { if (boundRatio < KVRatioKV) {
pBuilder->memRowType = SMEM_ROW_KV; pBuilder->memRowType = SMEM_ROW_KV;
pBuilder->compareStat = ROW_COMPARE_NO_NEED;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else if (boundRatio > KVRatioData) { } else if (boundRatio > KVRatioData) {
pBuilder->memRowType = SMEM_ROW_DATA; pBuilder->memRowType = SMEM_ROW_DATA;
pBuilder->compareStat = ROW_COMPARE_NO_NEED;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
pBuilder->compareStat = ROW_COMPARE_NEED; pBuilder->compareStat = ROW_COMPARE_NEED;
@ -76,7 +74,6 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3
} }
} }
pBuilder->dataRowInitLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen;
pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx); pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx);
if (nRows > 0) { if (nRows > 0) {
@ -86,7 +83,7 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3
} }
for (int i = 0; i < nRows; ++i) { for (int i = 0; i < nRows; ++i) {
(pBuilder->rowInfo + i)->dataLen = pBuilder->dataRowInitLen; (pBuilder->rowInfo + i)->dataLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen;
(pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen; (pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen;
} }
} }
@ -460,7 +457,7 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
STableMeta * pTableMeta = pDataBlocks->pTableMeta; STableMeta * pTableMeta = pDataBlocks->pTableMeta;
SSchema * schema = tscGetTableSchema(pTableMeta); SSchema * schema = tscGetTableSchema(pTableMeta);
SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder; SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder;
int32_t dataLen = pBuilder->dataRowInitLen; int32_t dataLen = spd->allNullLen + TD_MEM_ROW_DATA_HEAD_SIZE;
int32_t kvLen = pBuilder->kvRowInitLen; int32_t kvLen = pBuilder->kvRowInitLen;
bool isParseBindParam = false; bool isParseBindParam = false;
@ -809,13 +806,12 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk
// allocate memory // allocate memory
size_t nAlloc = nRows * sizeof(SBlockKeyTuple); size_t nAlloc = nRows * sizeof(SBlockKeyTuple);
if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) { if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) {
size_t nRealAlloc = nAlloc + 10 * sizeof(SBlockKeyTuple); char *tmp = trealloc(pBlkKeyInfo->pKeyTuple, nAlloc);
char * tmp = trealloc(pBlkKeyInfo->pKeyTuple, nRealAlloc);
if (tmp == NULL) { if (tmp == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY; return TSDB_CODE_TSC_OUT_OF_MEMORY;
} }
pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp; pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp;
pBlkKeyInfo->maxBytesAlloc = (int32_t)nRealAlloc; pBlkKeyInfo->maxBytesAlloc = (int32_t)nAlloc;
} }
memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc); memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc);
@ -1697,7 +1693,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
STableComInfo tinfo = tscGetTableInfo(pTableMeta); STableComInfo tinfo = tscGetTableInfo(pTableMeta);
SInsertStatementParam* pInsertParam = &pCmd->insertParam; SInsertStatementParam *pInsertParam = &pCmd->insertParam;
destroyTableNameList(pInsertParam); destroyTableNameList(pInsertParam);
pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks); pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks);
@ -1726,12 +1722,6 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
goto _error; goto _error;
} }
if (TSDB_CODE_SUCCESS !=
(ret = initMemRowBuilder(&pTableDataBlock->rowBuilder, 0, tinfo.numOfColumns, pTableDataBlock->numOfParams,
pTableDataBlock->boundColumnInfo.allNullLen))) {
goto _error;
}
while ((readLen = tgetline(&line, &n, fp)) != -1) { while ((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
line[--readLen] = 0; line[--readLen] = 0;

View File

@ -1527,8 +1527,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT; pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT;
pCmd->insertParam.objectId = pSql->self; pCmd->insertParam.objectId = pSql->self;
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
pSql->sqlstr = sqlstr;
if (pSql->sqlstr == NULL) { if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql); tscError("%p failed to malloc sql string buffer", pSql);
STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY); STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY);

View File

@ -40,6 +40,7 @@
#include "qScript.h" #include "qScript.h"
#include "ttype.h" #include "ttype.h"
#include "qFilter.h" #include "qFilter.h"
#include "httpInt.h"
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0" #define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
@ -1686,8 +1687,28 @@ static bool has(SArray* pFieldList, int32_t startIdx, const char* name) {
static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; } static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; }
static char* cloneCurrentDBName(SSqlObj* pSql) { static char* cloneCurrentDBName(SSqlObj* pSql) {
char *p = NULL;
HttpContext *pCtx = NULL;
pthread_mutex_lock(&pSql->pTscObj->mutex); pthread_mutex_lock(&pSql->pTscObj->mutex);
char *p = strdup(pSql->pTscObj->db); STscObj *pTscObj = pSql->pTscObj;
switch (pTscObj->from) {
case TAOS_REQ_FROM_HTTP:
pCtx = pSql->param;
if (pCtx && pCtx->db[0] != '\0') {
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN] = {0};
int32_t len = sprintf(db, "%s%s%s", pTscObj->acctId, TS_PATH_DELIMITER, pCtx->db);
assert(len <= sizeof(db));
p = strdup(db);
}
break;
default:
break;
}
if (p == NULL) {
p = strdup(pSql->pTscObj->db);
}
pthread_mutex_unlock(&pSql->pTscObj->mutex); pthread_mutex_unlock(&pSql->pTscObj->mutex);
return p; return p;
@ -2048,6 +2069,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
} }
bool hasDistinct = false; bool hasDistinct = false;
bool hasAgg = false; bool hasAgg = false;
size_t numOfExpr = taosArrayGetSize(pSelNodeList); size_t numOfExpr = taosArrayGetSize(pSelNodeList);
@ -2105,7 +2127,6 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
} }
} }
//TODO(dengyihao), refactor as function //TODO(dengyihao), refactor as function
//handle distinct func mixed with other func //handle distinct func mixed with other func
if (hasDistinct == true) { if (hasDistinct == true) {
@ -2121,6 +2142,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
if (pQueryInfo->pDownstream != NULL) { if (pQueryInfo->pDownstream != NULL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
} }
pQueryInfo->distinct = true; pQueryInfo->distinct = true;
} }
@ -2605,13 +2627,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
// set the first column ts for diff query // set the first column ts for diff query
if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
colIndex += 1;
SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0}; SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0};
SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP,
TSDB_KEYSIZE, getNewResColId(pCmd), TSDB_KEYSIZE, false); TSDB_KEYSIZE, getNewResColId(pCmd), TSDB_KEYSIZE, false);
SColumnList ids = createColumnList(1, 0, 0); SColumnList ids = createColumnList(1, 0, 0);
insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr); insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr);
} }
SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false); SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false);
@ -2679,8 +2700,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
assert(ids.num == 1); assert(ids.num == 1);
tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema); tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema);
} }
tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid); tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -2884,7 +2905,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX; const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX;
SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX); SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX);
insertResultField(pQueryInfo, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP,
aAggs[TSDB_FUNC_TS].name, pExpr); aAggs[TSDB_FUNC_TS].name, pExpr);
colIndex += 1; // the first column is ts colIndex += 1; // the first column is ts
@ -3062,7 +3083,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, &s); tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, &s);
} }
} }
tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -4660,7 +4680,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
} }
pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY; pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY;
ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg); ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pCmd->payload);
*pExpr = NULL; *pExpr = NULL;
if (type) { if (type) {
*type |= TSQL_EXPR_JOIN; *type |= TSQL_EXPR_JOIN;
@ -5642,6 +5662,7 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
const char* msg3 = "top/bottom not support fill"; const char* msg3 = "top/bottom not support fill";
const char* msg4 = "illegal value or data overflow"; const char* msg4 = "illegal value or data overflow";
const char* msg5 = "fill only available for interval query"; const char* msg5 = "fill only available for interval query";
const char* msg6 = "not supported function now";
if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) { if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
@ -5680,6 +5701,9 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
} }
} else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) { } else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_PREV; pQueryInfo->fillType = TSDB_FILL_PREV;
if (tscIsPointInterpQuery(pQueryInfo) && pQueryInfo->order.order == TSDB_ORDER_DESC) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
} else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) { } else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_NEXT; pQueryInfo->fillType = TSDB_FILL_NEXT;
} else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) { } else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) {
@ -5784,14 +5808,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
const char* msg7 = "only primary timestamp/column in groupby clause allowed as order column"; const char* msg7 = "only primary timestamp/column in groupby clause allowed as order column";
const char* msg8 = "only column in groupby clause allowed as order column"; const char* msg8 = "only column in groupby clause allowed as order column";
const char* msg9 = "orderby column must projected in subquery"; const char* msg9 = "orderby column must projected in subquery";
const char* msg10 = "not support distinct mixed with order by";
setDefaultOrderInfo(pQueryInfo); setDefaultOrderInfo(pQueryInfo);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pSqlNode->pSortOrder == NULL) {
if (pQueryInfo->distinct || pSqlNode->pSortOrder == NULL) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
char* pMsgBuf = tscGetErrorMsgPayload(pCmd); char* pMsgBuf = tscGetErrorMsgPayload(pCmd);
SArray* pSortOrder = pSqlNode->pSortOrder; SArray* pSortOrder = pSqlNode->pSortOrder;
@ -5811,6 +5834,9 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return invalidOperationMsg(pMsgBuf, msg2); return invalidOperationMsg(pMsgBuf, msg2);
} }
} }
if (size > 0 && pQueryInfo->distinct) {
return invalidOperationMsg(pMsgBuf, msg10);
}
// handle the first part of order by // handle the first part of order by
tVariant* pVar = taosArrayGet(pSortOrder, 0); tVariant* pVar = taosArrayGet(pSortOrder, 0);
@ -5879,10 +5905,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
} else if (isTopBottomQuery(pQueryInfo)) { } else if (isTopBottomQuery(pQueryInfo)) {
/* order of top/bottom query in interval is not valid */ /* order of top/bottom query in interval is not valid */
SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
int32_t pos = tscExprTopBottomIndex(pQueryInfo);
assert(pos > 0);
SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1);
assert(pExpr->base.functionId == TSDB_FUNC_TS); assert(pExpr->base.functionId == TSDB_FUNC_TS);
pExpr = tscExprGet(pQueryInfo, 1); pExpr = tscExprGet(pQueryInfo, pos);
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(pMsgBuf, msg5); return invalidOperationMsg(pMsgBuf, msg5);
} }
@ -5973,11 +6003,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return invalidOperationMsg(pMsgBuf, msg8); return invalidOperationMsg(pMsgBuf, msg8);
} }
} else { } else {
/* order of top/bottom query in interval is not valid */ int32_t pos = tscExprTopBottomIndex(pQueryInfo);
SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); assert(pos > 0);
SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1);
assert(pExpr->base.functionId == TSDB_FUNC_TS); assert(pExpr->base.functionId == TSDB_FUNC_TS);
pExpr = tscExprGet(pQueryInfo, 1); pExpr = tscExprGet(pQueryInfo, pos);
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidOperationMsg(pMsgBuf, msg5); return invalidOperationMsg(pMsgBuf, msg5);
} }
@ -8681,8 +8713,8 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) {
n += 1; n += 1;
} }
info->numOfColumns = n;
info->numOfColumns = n;
return meta; return meta;
} }

View File

@ -337,10 +337,7 @@ int tscSendMsgToServer(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) { void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
SRpcMsg* rpcMsg = pSchedMsg->ahandle;
SRpcEpSet* pEpSet = pSchedMsg->thandle;
TSDB_CACHE_PTR_TYPE handle = (TSDB_CACHE_PTR_TYPE) rpcMsg->ahandle; TSDB_CACHE_PTR_TYPE handle = (TSDB_CACHE_PTR_TYPE) rpcMsg->ahandle;
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle); SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle);
if (pSql == NULL) { if (pSql == NULL) {
@ -500,6 +497,7 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
if (rpcMsg->code != TSDB_CODE_SUCCESS) { if (rpcMsg->code != TSDB_CODE_SUCCESS) {
pRes->code = rpcMsg->code; pRes->code = rpcMsg->code;
} }
rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? (int32_t)pRes->numOfRows : pRes->code; rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? (int32_t)pRes->numOfRows : pRes->code;
if (pRes->code == TSDB_CODE_RPC_FQDN_ERROR) { if (pRes->code == TSDB_CODE_RPC_FQDN_ERROR) {
tscAllocPayload(pCmd, TSDB_FQDN_LEN + 64); tscAllocPayload(pCmd, TSDB_FQDN_LEN + 64);
@ -522,6 +520,7 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
sprintf(tscGetErrorMsgPayload(pCmd), "%s", tstrerror(pRes->code)); sprintf(tscGetErrorMsgPayload(pCmd), "%s", tstrerror(pRes->code));
} }
} }
(*pSql->fp)(pSql->param, pSql, rpcMsg->code); (*pSql->fp)(pSql->param, pSql, rpcMsg->code);
} }
@ -536,33 +535,6 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
free(pEpSet); free(pEpSet);
} }
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
int64_t st = taosGetTimestampUs();
SSchedMsg schedMsg = {0};
schedMsg.fp = doProcessMsgFromServer;
SRpcMsg* rpcMsgCopy = calloc(1, sizeof(SRpcMsg));
memcpy(rpcMsgCopy, rpcMsg, sizeof(struct SRpcMsg));
schedMsg.ahandle = (void*)rpcMsgCopy;
SRpcEpSet* pEpSetCopy = NULL;
if (pEpSet != NULL) {
pEpSetCopy = calloc(1, sizeof(SRpcEpSet));
memcpy(pEpSetCopy, pEpSet, sizeof(SRpcEpSet));
}
schedMsg.thandle = (void*)pEpSetCopy;
schedMsg.msg = NULL;
taosScheduleTask(tscQhandle, &schedMsg);
int64_t et = taosGetTimestampUs();
if (et - st > 100) {
tscDebug("add message to task queue, elapsed time:%"PRId64, et - st);
}
}
int doBuildAndSendMsg(SSqlObj *pSql) { int doBuildAndSendMsg(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
@ -2678,7 +2650,7 @@ int tscProcessQueryRsp(SSqlObj *pSql) {
return 0; return 0;
} }
static void decompressQueryColData(SSqlRes *pRes, SQueryInfo* pQueryInfo, char **data, int8_t compressed, int compLen) { static void decompressQueryColData(SSqlObj *pSql, SSqlRes *pRes, SQueryInfo* pQueryInfo, char **data, int8_t compressed, int32_t compLen) {
int32_t decompLen = 0; int32_t decompLen = 0;
int32_t numOfCols = pQueryInfo->fieldsInfo.numOfOutput; int32_t numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
int32_t *compSizes; int32_t *compSizes;
@ -2715,6 +2687,9 @@ static void decompressQueryColData(SSqlRes *pRes, SQueryInfo* pQueryInfo, char *
pData = *data + compLen + numOfCols * sizeof(int32_t); pData = *data + compLen + numOfCols * sizeof(int32_t);
} }
tscDebug("0x%"PRIx64" decompress col data, compressed size:%d, decompressed size:%d",
pSql->self, (int32_t)(compLen + numOfCols * sizeof(int32_t)), decompLen);
int32_t tailLen = pRes->rspLen - sizeof(SRetrieveTableRsp) - decompLen; int32_t tailLen = pRes->rspLen - sizeof(SRetrieveTableRsp) - decompLen;
memmove(*data + decompLen, pData, tailLen); memmove(*data + decompLen, pData, tailLen);
memmove(*data, outputBuf, decompLen); memmove(*data, outputBuf, decompLen);
@ -2749,7 +2724,7 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
//Decompress col data if compressed from server //Decompress col data if compressed from server
if (pRetrieve->compressed) { if (pRetrieve->compressed) {
int32_t compLen = htonl(pRetrieve->compLen); int32_t compLen = htonl(pRetrieve->compLen);
decompressQueryColData(pRes, pQueryInfo, &pRes->data, pRetrieve->compressed, compLen); decompressQueryColData(pSql, pRes, pQueryInfo, &pRes->data, pRetrieve->compressed, compLen);
} }
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);

View File

@ -892,7 +892,9 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT; return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
} }
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
pSql->sqlstr = sqlstr;
if (pSql->sqlstr == NULL) { if (pSql->sqlstr == NULL) {
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self); tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
tfree(pSql); tfree(pSql);

View File

@ -2038,17 +2038,14 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
tscAsyncResultOnError(pSql); tscAsyncResultOnError(pSql);
} }
static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) {
assert(numOfSubs <= pSql->subState.numOfSub && numOfSubs >= 0); assert(numOfSubs <= pSql->subState.numOfSub && numOfSubs >= 0);
for(int32_t i = 0; i < numOfSubs; ++i) { for(int32_t i = 0; i < numOfSubs; ++i) {
SSqlObj* pSub = pSql->pSubs[i]; SSqlObj* pSub = pSql->pSubs[i];
assert(pSub != NULL); assert(pSub != NULL);
SRetrieveSupport* pSupport = pSub->param; tscFreeRetrieveSup(pSub);
tfree(pSupport->localBuffer);
tfree(pSupport);
taos_free_result(pSub); taos_free_result(pSub);
} }
@ -2406,6 +2403,10 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
} else { } else {
SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex}; SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex};
tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss); tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss);
int32_t ti = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid);
assert(ti >= 0);
SColumn* x = taosArrayGetP(pNewQueryInfo->colList, ti);
tscColumnCopy(x, pCol);
} }
} }
} }
@ -2607,7 +2608,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static void tscFreeRetrieveSup(SSqlObj *pSql) { void tscFreeRetrieveSup(SSqlObj *pSql) {
SRetrieveSupport *trsupport = pSql->param; SRetrieveSupport *trsupport = pSql->param;
void* p = atomic_val_compare_exchange_ptr(&pSql->param, trsupport, 0); void* p = atomic_val_compare_exchange_ptr(&pSql->param, trsupport, 0);
@ -2765,27 +2766,43 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
int32_t code = pParentSql->res.code; int32_t code = pParentSql->res.code;
if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) { SSqlObj *userSql = NULL;
// remove the cached tableMeta and vgroup id list, and then parse the sql again if (pParentSql->param) {
tscResetSqlCmd( &pParentSql->cmd, true, pParentSql->self); userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql;
}
pParentSql->retry++; if (userSql == NULL) {
pParentSql->res.code = TSDB_CODE_SUCCESS; userSql = pParentSql;
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, }
tstrerror(code), pParentSql->retry);
code = tsParseSql(pParentSql, true); if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) {
if (userSql != pParentSql) {
tscFreeRetrieveSup(pParentSql);
}
tscFreeSubobj(userSql);
tfree(userSql->pSubs);
userSql->res.code = TSDB_CODE_SUCCESS;
userSql->retry++;
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self,
tstrerror(code), userSql->retry);
tscResetSqlCmd(&userSql->cmd, true, userSql->self);
code = tsParseSql(userSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return; return;
} }
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
pParentSql->res.code = code; userSql->res.code = code;
tscAsyncResultOnError(pParentSql); tscAsyncResultOnError(userSql);
return; return;
} }
executeQuery(pParentSql, pQueryInfo); pQueryInfo = tscGetQueryInfo(&userSql->cmd);
executeQuery(userSql, pQueryInfo);
} else { } else {
(*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code); (*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code);
} }
@ -2855,7 +2872,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
pParentSql->self, pState->numOfSub, pState->numOfRetrievedRows); pParentSql->self, pState->numOfSub, pState->numOfRetrievedRows);
SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd); SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
tscClearInterpInfo(pPQueryInfo);
code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self); code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self);
pParentSql->res.code = code; pParentSql->res.code = code;

View File

@ -403,6 +403,27 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) {
return false; return false;
} }
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
if (pExpr == NULL) {
continue;
}
if (pExpr->base.functionId == TSDB_FUNC_TS) {
continue;
}
if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) {
return i;
}
}
return -1;
}
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo) { bool tscIsTopBotQuery(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscNumOfExprs(pQueryInfo); size_t numOfExprs = tscNumOfExprs(pQueryInfo);
@ -659,8 +680,10 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo
} else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) { } else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol // convert unicode to native code in a temporary buffer extra one byte for terminated symbol
pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); char* buffer = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
if(buffer == NULL)
return ;
pRes->buffer[i] = buffer;
// string terminated char for binary data // string terminated char for binary data
memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows); memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows);
@ -1236,6 +1259,7 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
} }
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilters); SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilters);
pOutput->precision = pSqlObjList[0]->res.precision; pOutput->precision = pSqlObjList[0]->res.precision;
SSchema* schema = NULL; SSchema* schema = NULL;
@ -2419,6 +2443,19 @@ size_t tscNumOfExprs(SQueryInfo* pQueryInfo) {
return taosArrayGetSize(pQueryInfo->exprList); return taosArrayGetSize(pQueryInfo->exprList);
} }
int32_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo){
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
for(int32_t i = 0; i < numOfExprs; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
if (pExpr == NULL)
continue;
if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) {
return i;
}
}
return -1;
}
// todo REFACTOR // todo REFACTOR
void tscExprAddParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) { void tscExprAddParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) {
assert (pExpr != NULL || argument != NULL || bytes != 0); assert (pExpr != NULL || argument != NULL || bytes != 0);
@ -3623,6 +3660,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNewQueryInfo->numOfTables = 0; pNewQueryInfo->numOfTables = 0;
pNewQueryInfo->pTableMetaInfo = NULL; pNewQueryInfo->pTableMetaInfo = NULL;
pNewQueryInfo->bufLen = pQueryInfo->bufLen; pNewQueryInfo->bufLen = pQueryInfo->bufLen;
pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
pNewQueryInfo->distinct = pQueryInfo->distinct; pNewQueryInfo->distinct = pQueryInfo->distinct;
pNewQueryInfo->multigroupResult = pQueryInfo->multigroupResult; pNewQueryInfo->multigroupResult = pQueryInfo->multigroupResult;
@ -3842,8 +3881,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
int32_t index = ps->subqueryIndex; int32_t index = ps->subqueryIndex;
bool ret = subAndCheckDone(pSql, pParentSql, index); bool ret = subAndCheckDone(pSql, pParentSql, index);
tfree(ps); tscFreeRetrieveSup(pSql);
pSql->param = NULL;
if (!ret) { if (!ret) {
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index); tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index);
@ -3852,7 +3890,13 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
// todo refactor // todo refactor
tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self); tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self);
tscResetSqlCmd(&pParentSql->cmd, true, pParentSql->self); if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry)) {
tscAsyncResultOnError(pParentSql);
return;
}
tscFreeSubobj(pParentSql);
tfree(pParentSql->pSubs);
pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->res.code = TSDB_CODE_SUCCESS;
pParentSql->retry++; pParentSql->retry++;
@ -3860,6 +3904,9 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self,
tstrerror(code), pParentSql->retry); tstrerror(code), pParentSql->retry);
tscResetSqlCmd(&pParentSql->cmd, true, pParentSql->self);
code = tsParseSql(pParentSql, true); code = tsParseSql(pParentSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return; return;
@ -3894,9 +3941,11 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
} }
if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly
assert(pSql->subState.numOfSub == 0);
pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream); pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream);
assert(pSql->pSubs == NULL);
pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES);
assert(pSql->subState.states == NULL);
pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t)); pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t));
code = pthread_mutex_init(&pSql->subState.mutex, NULL); code = pthread_mutex_init(&pSql->subState.mutex, NULL);
@ -3922,6 +3971,9 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
pNew->sqlstr = strdup(pSql->sqlstr); pNew->sqlstr = strdup(pSql->sqlstr);
pNew->fp = tscSubqueryCompleteCallback; pNew->fp = tscSubqueryCompleteCallback;
pNew->maxRetry = pSql->maxRetry; pNew->maxRetry = pSql->maxRetry;
pNew->cmd.resColumnId = TSDB_RES_COL_ID;
tsem_init(&pNew->rspSem, 0, 0); tsem_init(&pNew->rspSem, 0, 0);
SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id
@ -4454,10 +4506,14 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
assert(*ppChild != NULL); assert(*ppChild != NULL);
STableMeta* p = *ppSTable; STableMeta* p = *ppSTable;
STableMeta* pChild = *ppChild; STableMeta* pChild = *ppChild;
size_t sz = (p != NULL) ? tscGetTableMetaSize(p) : 0; //ppSTableBuf actually capacity may larger than sz, dont care size_t sz = (p != NULL) ? tscGetTableMetaSize(p) : 0; //ppSTableBuf actually capacity may larger than sz, dont care
if (p != NULL && sz != 0) { if (p != NULL && sz != 0) {
memset((char *)p, 0, sz); memset((char *)p, 0, sz);
} }
STableMeta* pChild1;
taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz); taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz);
*ppSTable = p; *ppSTable = p;
@ -4468,7 +4524,10 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema); int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema);
int32_t tableMetaSize = sizeof(STableMeta) + totalBytes; int32_t tableMetaSize = sizeof(STableMeta) + totalBytes;
if (*tableMetaCapacity < tableMetaSize) { if (*tableMetaCapacity < tableMetaSize) {
pChild = realloc(pChild, tableMetaSize); pChild1 = realloc(pChild, tableMetaSize);
if(pChild1 == NULL)
return -1;
pChild = pChild1;
*tableMetaCapacity = (size_t)tableMetaSize; *tableMetaCapacity = (size_t)tableMetaSize;
} }

View File

@ -547,8 +547,9 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder);
static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) { static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) {
if (pBuilder->nCols >= pBuilder->tCols) { if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2; pBuilder->tCols *= 2;
pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
if (pBuilder->pColIdx == NULL) return -1; if (pColIdx == NULL) return -1;
pBuilder->pColIdx = pColIdx;
} }
pBuilder->pColIdx[pBuilder->nCols].colId = colId; pBuilder->pColIdx[pBuilder->nCols].colId = colId;
@ -561,8 +562,9 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId,
while (tlen > pBuilder->alloc - pBuilder->size) { while (tlen > pBuilder->alloc - pBuilder->size) {
pBuilder->alloc *= 2; pBuilder->alloc *= 2;
} }
pBuilder->buf = realloc(pBuilder->buf, pBuilder->alloc); void* buf = realloc(pBuilder->buf, pBuilder->alloc);
if (pBuilder->buf == NULL) return -1; if (buf == NULL) return -1;
pBuilder->buf = buf;
} }
memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen); memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen);

View File

@ -107,6 +107,9 @@ extern int32_t tsQuorum;
extern int8_t tsUpdate; extern int8_t tsUpdate;
extern int8_t tsCacheLastRow; extern int8_t tsCacheLastRow;
//tsdb
extern bool tsdbForceKeepFile;
// balance // balance
extern int8_t tsEnableBalance; extern int8_t tsEnableBalance;
extern int8_t tsAlternativeRole; extern int8_t tsAlternativeRole;
@ -160,6 +163,7 @@ extern char tsDataDir[];
extern char tsLogDir[]; extern char tsLogDir[];
extern char tsScriptDir[]; extern char tsScriptDir[];
extern int64_t tsTickPerDay[3]; extern int64_t tsTickPerDay[3];
extern int32_t tsTopicBianryLen;
// system info // system info
extern char tsOsName[]; extern char tsOsName[];

File diff suppressed because it is too large Load Diff

View File

@ -138,8 +138,9 @@ int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int1
if (pBuilder->nCols >= pBuilder->tCols) { if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2; pBuilder->tCols *= 2;
pBuilder->columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols); STColumn* columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols);
if (pBuilder->columns == NULL) return -1; if (columns == NULL) return -1;
pBuilder->columns = columns;
} }
STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]); STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]);
@ -517,6 +518,7 @@ void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, b
} }
} }
//TODO: refactor this function to eliminate additional memory copy
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull) { int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset, bool forceSetNull) {
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows); ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows);
ASSERT(target->numOfCols == source->numOfCols); ASSERT(target->numOfCols == source->numOfCols);

View File

@ -76,16 +76,15 @@ int32_t tsMaxBinaryDisplayWidth = 30;
int32_t tsCompressMsgSize = -1; int32_t tsCompressMsgSize = -1;
/* denote if server needs to compress the retrieved column data before adding to the rpc response message body. /* denote if server needs to compress the retrieved column data before adding to the rpc response message body.
* 0: disable column data compression * 0: all data are compressed
* 1: enable column data compression * -1: all data are not compressed
* This option is default to disabled. Once enabled, compression will be conducted if any column has size more * other values: if any retrieved column size is greater than the tsCompressColData, all data will be compressed.
* than QUERY_COMP_THRESHOLD. Otherwise, no further compression is needed.
*/ */
int32_t tsCompressColData = 0; int32_t tsCompressColData = -1;
// client // client
int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN;
int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN; int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN;
int8_t tsTscEnableRecordSql = 0; int8_t tsTscEnableRecordSql = 0;
// the maximum number of results for projection query on super table that are returned from // the maximum number of results for projection query on super table that are returned from
@ -150,6 +149,11 @@ int32_t tsMaxVgroupsPerDb = 0;
int32_t tsMinTablePerVnode = TSDB_TABLES_STEP; int32_t tsMinTablePerVnode = TSDB_TABLES_STEP;
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES; int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP; int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int32_t tsTsdbMetaCompactRatio = TSDB_META_COMPACT_RATIO;
// tsdb config
// For backward compatibility
bool tsdbForceKeepFile = false;
// balance // balance
int8_t tsEnableBalance = 1; int8_t tsEnableBalance = 1;
@ -205,6 +209,7 @@ char tsScriptDir[PATH_MAX] = {0};
char tsTempDir[PATH_MAX] = "/tmp/"; char tsTempDir[PATH_MAX] = "/tmp/";
int32_t tsDiskCfgNum = 0; int32_t tsDiskCfgNum = 0;
int32_t tsTopicBianryLen = 16000;
#ifndef _STORAGE #ifndef _STORAGE
SDiskCfg tsDiskCfg[1]; SDiskCfg tsDiskCfg[1];
@ -565,7 +570,6 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "numOfMnodes"; cfg.option = "numOfMnodes";
cfg.ptr = &tsNumOfMnodes; cfg.ptr = &tsNumOfMnodes;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
@ -1001,10 +1005,10 @@ static void doInitGlobalConfig(void) {
cfg.option = "compressColData"; cfg.option = "compressColData";
cfg.ptr = &tsCompressColData; cfg.ptr = &tsCompressColData;
cfg.valType = TAOS_CFG_VTYPE_INT8; cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0; cfg.minValue = -1;
cfg.maxValue = 1; cfg.maxValue = 100000000.0f;
cfg.ptrLength = 0; cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
@ -1233,6 +1237,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "topicBianryLen";
cfg.ptr = &tsTopicBianryLen;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 16;
cfg.maxValue = 16000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "httpEnableRecordSql"; cfg.option = "httpEnableRecordSql";
cfg.ptr = &tsHttpEnableRecordSql; cfg.ptr = &tsHttpEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT8; cfg.valType = TAOS_CFG_VTYPE_INT8;
@ -1576,6 +1590,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "tsdbMetaCompactRatio";
cfg.ptr = &tsTsdbMetaCompactRatio;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 0;
cfg.maxValue = 100;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM); assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM);
#ifdef TD_TSZ #ifdef TD_TSZ
// lossy compress // lossy compress

View File

@ -38,11 +38,7 @@ const int32_t TYPE_BYTES[15] = {
#define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \ #define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \
do { \ do { \
if (_list[(_index)] >= (INT64_MAX - (__sum))) { \
__sum = INT64_MAX; \
} else { \
(__sum) += (_list)[(_index)]; \ (__sum) += (_list)[(_index)]; \
} \
if ((__min) > (_list)[(_index)]) { \ if ((__min) > (_list)[(_index)]) { \
(__min) = (_list)[(_index)]; \ (__min) = (_list)[(_index)]; \
(__minIndex) = (_index); \ (__minIndex) = (_index); \

View File

@ -38,12 +38,12 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
switch (token->type) { switch (token->type) {
case TSDB_DATA_TYPE_BOOL: { case TSDB_DATA_TYPE_BOOL: {
int32_t k = strncasecmp(token->z, "true", 4); if (strncasecmp(token->z, "true", 4) == 0) {
if (k == 0) {
pVar->i64 = TSDB_TRUE; pVar->i64 = TSDB_TRUE;
} else { } else if (strncasecmp(token->z, "false", 5) == 0) {
assert(strncasecmp(token->z, "false", 5) == 0);
pVar->i64 = TSDB_FALSE; pVar->i64 = TSDB_FALSE;
} else {
return;
} }
break; break;

View File

@ -88,6 +88,8 @@ extern const int32_t TYPE_BYTES[15];
#define TSDB_DEFAULT_PASS "taosdata" #define TSDB_DEFAULT_PASS "taosdata"
#endif #endif
#define SHELL_MAX_PASSWORD_LEN 20
#define TSDB_TRUE 1 #define TSDB_TRUE 1
#define TSDB_FALSE 0 #define TSDB_FALSE 0
#define TSDB_OK 0 #define TSDB_OK 0
@ -275,6 +277,7 @@ do { \
#define TSDB_MAX_TABLES 10000000 #define TSDB_MAX_TABLES 10000000
#define TSDB_DEFAULT_TABLES 1000000 #define TSDB_DEFAULT_TABLES 1000000
#define TSDB_TABLES_STEP 1000 #define TSDB_TABLES_STEP 1000
#define TSDB_META_COMPACT_RATIO 0 // disable tsdb meta compact by default
#define TSDB_MIN_DAYS_PER_FILE 1 #define TSDB_MIN_DAYS_PER_FILE 1
#define TSDB_MAX_DAYS_PER_FILE 3650 #define TSDB_MAX_DAYS_PER_FILE 3650

View File

@ -25,7 +25,6 @@
#define MAX_USERNAME_SIZE 64 #define MAX_USERNAME_SIZE 64
#define MAX_DBNAME_SIZE 64 #define MAX_DBNAME_SIZE 64
#define MAX_IP_SIZE 20 #define MAX_IP_SIZE 20
#define MAX_PASSWORD_SIZE 20
#define MAX_HISTORY_SIZE 1000 #define MAX_HISTORY_SIZE 1000
#define MAX_COMMAND_SIZE 1048586 #define MAX_COMMAND_SIZE 1048586
#define HISTORY_FILE ".taos_history" #define HISTORY_FILE ".taos_history"

View File

@ -72,12 +72,13 @@ static int32_t shellShowTables(TAOS *con, char *db) {
int32_t tbIndex = tbNum++; int32_t tbIndex = tbNum++;
if (tbMallocNum < tbNum) { if (tbMallocNum < tbNum) {
tbMallocNum = (tbMallocNum * 2 + 1); tbMallocNum = (tbMallocNum * 2 + 1);
tbNames = realloc(tbNames, tbMallocNum * sizeof(char *)); char** tbNames1 = realloc(tbNames, tbMallocNum * sizeof(char *));
if (tbNames == NULL) { if (tbNames1 == NULL) {
fprintf(stdout, "failed to malloc tablenames, num:%d\n", tbMallocNum); fprintf(stdout, "failed to malloc tablenames, num:%d\n", tbMallocNum);
code = TSDB_CODE_TSC_OUT_OF_MEMORY; code = TSDB_CODE_TSC_OUT_OF_MEMORY;
break; break;
} }
tbNames = tbNames1;
} }
tbNames[tbIndex] = malloc(TSDB_TABLE_NAME_LEN); tbNames[tbIndex] = malloc(TSDB_TABLE_NAME_LEN);

View File

@ -66,7 +66,7 @@ void printHelp() {
char DARWINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" char DARWINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
char g_password[MAX_PASSWORD_SIZE]; char g_password[SHELL_MAX_PASSWORD_LEN];
void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
wordexp_t full_path; wordexp_t full_path;
@ -81,19 +81,25 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
} }
} }
// for password // for password
else if (strncmp(argv[i], "-p", 2) == 0) { else if ((strncmp(argv[i], "-p", 2) == 0)
|| (strncmp(argv[i], "--password", 10) == 0)) {
strcpy(tsOsName, "Darwin"); strcpy(tsOsName, "Darwin");
printf(DARWINCLIENT_VERSION, tsOsName, taos_get_client_info()); printf(DARWINCLIENT_VERSION, tsOsName, taos_get_client_info());
if (strlen(argv[i]) == 2) { if ((strlen(argv[i]) == 2)
|| (strncmp(argv[i], "--password", 10) == 0)) {
printf("Enter password: "); printf("Enter password: ");
taosSetConsoleEcho(false);
if (scanf("%s", g_password) > 1) { if (scanf("%s", g_password) > 1) {
fprintf(stderr, "password read error\n"); fprintf(stderr, "password read error\n");
} }
taosSetConsoleEcho(true);
getchar(); getchar();
} else { } else {
tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN);
} }
arguments->password = g_password; arguments->password = g_password;
strcpy(argv[i], "");
argc -= 1;
} }
// for management port // for management port
else if (strcmp(argv[i], "-P") == 0) { else if (strcmp(argv[i], "-P") == 0) {

View File

@ -254,9 +254,13 @@ int32_t shellRunCommand(TAOS* con, char* command) {
} }
if (c == '\\') { if (c == '\\') {
if (quote != 0 && (*command == '_' || *command == '\\')) {
//DO nothing
} else {
esc = true; esc = true;
continue; continue;
} }
}
if (quote == c) { if (quote == c) {
quote = 0; quote = 0;

View File

@ -47,7 +47,7 @@ static struct argp_option options[] = {
{"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."}, {"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."},
{"check", 'k', "CHECK", 0, "Check tables."}, {"check", 'k', "CHECK", 0, "Check tables."},
{"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."}, {"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."},
{"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."}, {"timezone", 'z', "TIMEZONE", 0, "Time zone of the shell, default is local."},
{"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is startup, options: client|server|rpc|startup|sync|speen|fqdn."}, {"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is startup, options: client|server|rpc|startup|sync|speen|fqdn."},
{"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."}, {"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."},
{"pktnum", 'N', "PKTNUM", 0, "Packet numbers used for net test, default is 100."}, {"pktnum", 'N', "PKTNUM", 0, "Packet numbers used for net test, default is 100."},
@ -76,7 +76,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
} }
break; break;
case 't': case 'z':
arguments->timezone = arg; arguments->timezone = arg;
break; break;
case 'u': case 'u':
@ -173,22 +173,29 @@ static struct argp argp = {options, parse_opt, args_doc, doc};
char LINUXCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" char LINUXCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
"Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n";
char g_password[MAX_PASSWORD_SIZE]; char g_password[SHELL_MAX_PASSWORD_LEN];
static void parse_password( static void parse_args(
int argc, char *argv[], SShellArguments *arguments) { int argc, char *argv[], SShellArguments *arguments) {
for (int i = 1; i < argc; i++) { for (int i = 1; i < argc; i++) {
if (strncmp(argv[i], "-p", 2) == 0) { if ((strncmp(argv[i], "-p", 2) == 0)
|| (strncmp(argv[i], "--password", 10) == 0)) {
strcpy(tsOsName, "Linux"); strcpy(tsOsName, "Linux");
printf(LINUXCLIENT_VERSION, tsOsName, taos_get_client_info()); printf(LINUXCLIENT_VERSION, tsOsName, taos_get_client_info());
if (strlen(argv[i]) == 2) { if ((strlen(argv[i]) == 2)
|| (strncmp(argv[i], "--password", 10) == 0)) {
printf("Enter password: "); printf("Enter password: ");
taosSetConsoleEcho(false);
if (scanf("%20s", g_password) > 1) { if (scanf("%20s", g_password) > 1) {
fprintf(stderr, "password reading error\n"); fprintf(stderr, "password reading error\n");
} }
getchar(); taosSetConsoleEcho(true);
if (EOF == getchar()) {
fprintf(stderr, "getchar() return EOF\n");
}
} else { } else {
tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN);
strcpy(argv[i], "-p");
} }
arguments->password = g_password; arguments->password = g_password;
arguments->is_use_passwd = true; arguments->is_use_passwd = true;
@ -203,7 +210,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
argp_program_version = verType; argp_program_version = verType;
if (argc > 1) { if (argc > 1) {
parse_password(argc, argv, arguments); parse_args(argc, argv, arguments);
} }
argp_parse(&argp, argc, argv, 0, 0, arguments); argp_parse(&argp, argc, argv, 0, 0, arguments);

View File

@ -68,7 +68,7 @@ void printHelp() {
exit(EXIT_SUCCESS); exit(EXIT_SUCCESS);
} }
char g_password[MAX_PASSWORD_SIZE]; char g_password[SHELL_MAX_PASSWORD_LEN];
void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
for (int i = 1; i < argc; i++) { for (int i = 1; i < argc; i++) {
@ -82,20 +82,26 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
} }
} }
// for password // for password
else if (strncmp(argv[i], "-p", 2) == 0) { else if ((strncmp(argv[i], "-p", 2) == 0)
|| (strncmp(argv[i], "--password", 10) == 0)) {
arguments->is_use_passwd = true; arguments->is_use_passwd = true;
strcpy(tsOsName, "Windows"); strcpy(tsOsName, "Windows");
printf(WINCLIENT_VERSION, tsOsName, taos_get_client_info()); printf(WINCLIENT_VERSION, tsOsName, taos_get_client_info());
if (strlen(argv[i]) == 2) { if ((strlen(argv[i]) == 2)
|| (strncmp(argv[i], "--password", 10) == 0)) {
printf("Enter password: "); printf("Enter password: ");
taosSetConsoleEcho(false);
if (scanf("%s", g_password) > 1) { if (scanf("%s", g_password) > 1) {
fprintf(stderr, "password read error!\n"); fprintf(stderr, "password read error!\n");
} }
taosSetConsoleEcho(true);
getchar(); getchar();
} else { } else {
tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); tstrncpy(g_password, (char *)(argv[i] + 2), SHELL_MAX_PASSWORD_LEN);
} }
arguments->password = g_password; arguments->password = g_password;
strcpy(argv[i], "");
argc -= 1;
} }
// for management port // for management port
else if (strcmp(argv[i], "-P") == 0) { else if (strcmp(argv[i], "-P") == 0) {

File diff suppressed because it is too large Load Diff

View File

@ -62,6 +62,20 @@ typedef struct {
#define errorPrint(fmt, ...) \ #define errorPrint(fmt, ...) \
do { fprintf(stderr, "\033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, "\033[0m"); } while(0) do { fprintf(stderr, "\033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, "\033[0m"); } while(0)
static bool isStringNumber(char *input)
{
int len = strlen(input);
if (0 == len) {
return false;
}
for (int i = 0; i < len; i++) {
if (!isdigit(input[i]))
return false;
}
return true;
}
// -------------------------- SHOW DATABASE INTERFACE----------------------- // -------------------------- SHOW DATABASE INTERFACE-----------------------
enum _show_db_index { enum _show_db_index {
@ -243,19 +257,15 @@ static struct argp_option options[] = {
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
{"debug", 'g', 0, 0, "Print debug info.", 8}, {"debug", 'g', 0, 0, "Print debug info.", 8},
{"verbose", 'b', 0, 0, "Print verbose debug info.", 9},
{"performanceprint", 'm', 0, 0, "Print performance debug info.", 10},
{0} {0}
}; };
#define MAX_PASSWORD_SIZE 20
/* Used by main to communicate with parse_opt. */ /* Used by main to communicate with parse_opt. */
typedef struct arguments { typedef struct arguments {
// connection option // connection option
char *host; char *host;
char *user; char *user;
char password[MAX_PASSWORD_SIZE]; char password[SHELL_MAX_PASSWORD_LEN];
uint16_t port; uint16_t port;
char cversion[12]; char cversion[12];
uint16_t mysqlFlag; uint16_t mysqlFlag;
@ -432,7 +442,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
break; break;
// dump unit option // dump unit option
case 'A': case 'A':
g_args.all_databases = true;
break; break;
case 'D': case 'D':
g_args.databases = true; g_args.databases = true;
@ -477,6 +486,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
g_args.table_batch = atoi(arg); g_args.table_batch = atoi(arg);
break; break;
case 'T': case 'T':
if (!isStringNumber(arg)) {
errorPrint("%s", "\n\t-T need a number following!\n");
exit(EXIT_FAILURE);
}
g_args.thread_num = atoi(arg); g_args.thread_num = atoi(arg);
break; break;
case OPT_ABORT: case OPT_ABORT:
@ -555,20 +568,37 @@ static void parse_precision_first(
} }
} }
static void parse_password( static void parse_args(
int argc, char *argv[], SArguments *arguments) { int argc, char *argv[], SArguments *arguments) {
for (int i = 1; i < argc; i++) { for (int i = 1; i < argc; i++) {
if (strncmp(argv[i], "-p", 2) == 0) { if ((strncmp(argv[i], "-p", 2) == 0)
if (strlen(argv[i]) == 2) { || (strncmp(argv[i], "--password", 10) == 0)) {
if ((strlen(argv[i]) == 2)
|| (strncmp(argv[i], "--password", 10) == 0)) {
printf("Enter password: "); printf("Enter password: ");
taosSetConsoleEcho(false);
if(scanf("%20s", arguments->password) > 1) { if(scanf("%20s", arguments->password) > 1) {
errorPrint("%s() LN%d, password read error!\n", __func__, __LINE__); errorPrint("%s() LN%d, password read error!\n", __func__, __LINE__);
} }
taosSetConsoleEcho(true);
} else { } else {
tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); tstrncpy(arguments->password, (char *)(argv[i] + 2),
SHELL_MAX_PASSWORD_LEN);
strcpy(argv[i], "-p");
} }
argv[i] = ""; } else if (strcmp(argv[i], "-gg") == 0) {
arguments->verbose_print = true;
strcpy(argv[i], "");
} else if (strcmp(argv[i], "-PP") == 0) {
arguments->performance_print = true;
strcpy(argv[i], "");
} else if (strcmp(argv[i], "-A") == 0) {
g_args.all_databases = true;
} else {
continue;
} }
} }
} }
@ -637,7 +667,7 @@ int main(int argc, char *argv[]) {
if (argc > 1) { if (argc > 1) {
parse_precision_first(argc, argv, &g_args); parse_precision_first(argc, argv, &g_args);
parse_timestamp(argc, argv, &g_args); parse_timestamp(argc, argv, &g_args);
parse_password(argc, argv, &g_args); parse_args(argc, argv, &g_args);
} }
argp_parse(&argp, argc, argv, 0, 0, &g_args); argp_parse(&argp, argc, argv, 0, 0, &g_args);

View File

@ -18,6 +18,7 @@
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#if defined(WINDOWS) #if defined(WINDOWS)
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
printf("welcome to use taospack tools v1.3 for windows.\n"); printf("welcome to use taospack tools v1.3 for windows.\n");
@ -148,7 +149,10 @@ float* read_float(const char* inFile, int* pcount){
//printf(" buff=%s float=%.50f \n ", buf, floats[fi]); //printf(" buff=%s float=%.50f \n ", buf, floats[fi]);
if ( ++fi == malloc_cnt ) { if ( ++fi == malloc_cnt ) {
malloc_cnt += 100000; malloc_cnt += 100000;
floats = realloc(floats, malloc_cnt*sizeof(float)); float* floats1 = realloc(floats, malloc_cnt*sizeof(float));
if(floats1 == NULL)
break;
floats = floats1;
} }
memset(buf, 0, sizeof(buf)); memset(buf, 0, sizeof(buf));
} }
@ -601,7 +605,6 @@ void test_threadsafe_double(int thread_count){
} }
void unitTestFloat() { void unitTestFloat() {
float ft1 [] = {1.11, 2.22, 3.333}; float ft1 [] = {1.11, 2.22, 3.333};
@ -662,7 +665,50 @@ void unitTestFloat() {
free(ft2); free(ft2);
free(buff); free(buff);
free(output); free(output);
}
void leakFloat() {
int cnt = sizeof(g_ft1)/sizeof(float);
float* floats = g_ft1;
int algorithm = 2;
// compress
const char* input = (const char*)floats;
int input_len = cnt * sizeof(float);
int output_len = input_len + 1024;
char* output = (char*) malloc(output_len);
char* buff = (char*) malloc(input_len);
int buff_len = input_len;
int ret_len = 0;
ret_len = tsCompressFloatLossy(input, input_len, cnt, output, output_len, algorithm, buff, buff_len);
if(ret_len == 0) {
printf(" compress float error.\n");
free(buff);
free(output);
return ;
}
float* ft2 = (float*)malloc(input_len);
ret_len = tsDecompressFloatLossy(output, ret_len, cnt, (char*)ft2, input_len, algorithm, buff, buff_len);
if(ret_len == 0) {
printf(" decompress float error.\n");
}
free(ft2);
free(buff);
free(output);
}
void leakTest(){
for(int i=0; i< 90000000000000; i++){
if(i%10000==0)
printf(" ---------- %d ---------------- \n", i);
leakFloat();
}
} }
#define DB_CNT 500 #define DB_CNT 500
@ -689,7 +735,7 @@ extern char Compressor [];
// ----------------- main ---------------------- // ----------------- main ----------------------
// //
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
printf("welcome to use taospack tools v1.3\n"); printf("welcome to use taospack tools v1.6\n");
//printf(" sizeof(int)=%d\n", (int)sizeof(int)); //printf(" sizeof(int)=%d\n", (int)sizeof(int));
//printf(" sizeof(long)=%d\n", (int)sizeof(long)); //printf(" sizeof(long)=%d\n", (int)sizeof(long));
@ -753,6 +799,9 @@ int main(int argc, char *argv[]) {
if(strcmp(argv[1], "-mem") == 0) { if(strcmp(argv[1], "-mem") == 0) {
memTest(); memTest();
} }
else if(strcmp(argv[1], "-leak") == 0) {
leakTest();
}
} }
else{ else{
unitTestFloat(); unitTestFloat();

View File

@ -274,6 +274,7 @@ typedef struct {
int32_t rowSize; int32_t rowSize;
int32_t numOfRows; int32_t numOfRows;
void * pIter; void * pIter;
void * pVgIter;
void ** ppShow; void ** ppShow;
int16_t offset[TSDB_MAX_COLUMNS]; int16_t offset[TSDB_MAX_COLUMNS];
int32_t bytes[TSDB_MAX_COLUMNS]; int32_t bytes[TSDB_MAX_COLUMNS];

View File

@ -196,14 +196,20 @@ int32_t mnodeInitDnodes() {
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CREATE_DNODE, mnodeProcessCreateDnodeMsg); mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CREATE_DNODE, mnodeProcessCreateDnodeMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_DROP_DNODE, mnodeProcessDropDnodeMsg); mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_DROP_DNODE, mnodeProcessDropDnodeMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CONFIG_DNODE, mnodeProcessCfgDnodeMsg); mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CONFIG_DNODE, mnodeProcessCfgDnodeMsg);
mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_CONFIG_DNODE_RSP, mnodeProcessCfgDnodeMsgRsp); mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_CONFIG_DNODE_RSP, mnodeProcessCfgDnodeMsgRsp);
mnodeAddPeerMsgHandle(TSDB_MSG_TYPE_DM_STATUS, mnodeProcessDnodeStatusMsg); mnodeAddPeerMsgHandle(TSDB_MSG_TYPE_DM_STATUS, mnodeProcessDnodeStatusMsg);
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_MODULE, mnodeGetModuleMeta); mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_MODULE, mnodeGetModuleMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_MODULE, mnodeRetrieveModules); mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_MODULE, mnodeRetrieveModules);
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_VARIABLES, mnodeGetConfigMeta); mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_VARIABLES, mnodeGetConfigMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_VARIABLES, mnodeRetrieveConfigs); mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_VARIABLES, mnodeRetrieveConfigs);
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_VNODES, mnodeGetVnodeMeta); mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_VNODES, mnodeGetVnodeMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_VNODES, mnodeRetrieveVnodes); mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_VNODES, mnodeRetrieveVnodes);
mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_VNODES, mnodeCancelGetNextVgroup);
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_DNODE, mnodeGetDnodeMeta); mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_DNODE, mnodeGetDnodeMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_DNODE, mnodeRetrieveDnodes); mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_DNODE, mnodeRetrieveDnodes);
mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_DNODE, mnodeCancelGetNextDnode); mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_DNODE, mnodeCancelGetNextDnode);
@ -1232,13 +1238,12 @@ static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, vo
pDnode = (SDnodeObj *)(pShow->pIter); pDnode = (SDnodeObj *)(pShow->pIter);
if (pDnode != NULL) { if (pDnode != NULL) {
void *pIter = NULL;
SVgObj *pVgroup; SVgObj *pVgroup;
while (1) { while (1) {
pIter = mnodeGetNextVgroup(pIter, &pVgroup); pShow->pVgIter = mnodeGetNextVgroup(pShow->pVgIter, &pVgroup);
if (pVgroup == NULL) break; if (pVgroup == NULL) break;
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { for (int32_t i = 0; i < pVgroup->numOfVnodes && numOfRows < rows; ++i) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
if (pVgid->pDnode == pDnode) { if (pVgid->pDnode == pDnode) {
cols = 0; cols = 0;
@ -1250,10 +1255,13 @@ static int32_t mnodeRetrieveVnodes(SShowObj *pShow, char *data, int32_t rows, vo
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_TO_VARSTR(pWrite, syncRole[pVgid->role]); STR_TO_VARSTR(pWrite, syncRole[pVgid->role]);
cols++; cols++;
numOfRows++; numOfRows++;
} }
} }
if (numOfRows >= rows) {
break;
}
mnodeDecVgroupRef(pVgroup); mnodeDecVgroupRef(pVgroup);
} }

View File

@ -422,8 +422,13 @@ static void* mnodePutShowObj(SShowObj *pShow) {
static void mnodeFreeShowObj(void *data) { static void mnodeFreeShowObj(void *data) {
SShowObj *pShow = *(SShowObj **)data; SShowObj *pShow = *(SShowObj **)data;
if (tsMnodeShowFreeIterFp[pShow->type] != NULL && pShow->pIter != NULL) { if (tsMnodeShowFreeIterFp[pShow->type] != NULL) {
(*tsMnodeShowFreeIterFp[pShow->type])(pShow->pIter); if (pShow->pVgIter != NULL) {
// only used in 'show vnodes "ep"'
(*tsMnodeShowFreeIterFp[pShow->type])(pShow->pVgIter);
} else {
if (pShow->pIter != NULL) (*tsMnodeShowFreeIterFp[pShow->type])(pShow->pIter);
}
} }
mDebug("%p, show is destroyed, data:%p index:%d", pShow, data, pShow->index); mDebug("%p, show is destroyed, data:%p index:%d", pShow, data, pShow->index);

View File

@ -2921,10 +2921,11 @@ static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray
(*totalMallocLen) *= 2; (*totalMallocLen) *= 2;
} }
pMultiMeta = realloc(pMultiMeta, *totalMallocLen); SMultiTableMeta* pMultiMeta1 = realloc(pMultiMeta, *totalMallocLen);
if (pMultiMeta == NULL) { if (pMultiMeta1 == NULL) {
return NULL; return NULL;
} }
pMultiMeta = pMultiMeta1;
} }
return pMultiMeta; return pMultiMeta;

View File

@ -24,6 +24,8 @@ void* taosLoadDll(const char *filename);
void* taosLoadSym(void* handle, char* name); void* taosLoadSym(void* handle, char* name);
void taosCloseDll(void *handle); void taosCloseDll(void *handle);
int taosSetConsoleEcho(bool on);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -29,4 +29,30 @@ void* taosLoadSym(void* handle, char* name) {
void taosCloseDll(void *handle) { void taosCloseDll(void *handle) {
} }
int taosSetConsoleEcho(bool on)
{
#if 0
#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL)
int err;
struct termios term;
if (tcgetattr(STDIN_FILENO, &term) == -1) {
perror("Cannot get the attribution of the terminal");
return -1;
}
if (on)
term.c_lflag|=ECHOFLAGS;
else
term.c_lflag &=~ECHOFLAGS;
err = tcsetattr(STDIN_FILENO,TCSAFLUSH,&term);
if (err == -1 && err == EINTR) {
perror("Cannot set the attribution of the terminal");
return -1;
}
#endif
return 0;
}

View File

@ -504,8 +504,9 @@ void * taosTRealloc(void *ptr, size_t size) {
void * tptr = (void *)((char *)ptr - sizeof(size_t)); void * tptr = (void *)((char *)ptr - sizeof(size_t));
size_t tsize = size + sizeof(size_t); size_t tsize = size + sizeof(size_t);
tptr = realloc(tptr, tsize); void* tptr1 = realloc(tptr, tsize);
if (tptr == NULL) return NULL; if (tptr1 == NULL) return NULL;
tptr = tptr1;
*(size_t *)tptr = size; *(size_t *)tptr = size;

View File

@ -51,4 +51,28 @@ void taosCloseDll(void *handle) {
} }
} }
int taosSetConsoleEcho(bool on)
{
#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL)
int err;
struct termios term;
if (tcgetattr(STDIN_FILENO, &term) == -1) {
perror("Cannot get the attribution of the terminal");
return -1;
}
if (on)
term.c_lflag |= ECHOFLAGS;
else
term.c_lflag &= ~ECHOFLAGS;
err = tcsetattr(STDIN_FILENO, TCSAFLUSH, &term);
if (err == -1 || err == EINTR) {
perror("Cannot set the attribution of the terminal");
return -1;
}
return 0;
}

View File

@ -81,11 +81,13 @@ int32_t getstr(char **lineptr, size_t *n, FILE *stream, char terminator, int32_t
*n += MIN_CHUNK; *n += MIN_CHUNK;
nchars_avail = (int32_t)(*n + *lineptr - read_pos); nchars_avail = (int32_t)(*n + *lineptr - read_pos);
*lineptr = realloc(*lineptr, *n); char* lineptr1 = realloc(*lineptr, *n);
if (!*lineptr) { if (!lineptr1) {
errno = ENOMEM; errno = ENOMEM;
return -1; return -1;
} }
*lineptr = lineptr1;
read_pos = *n - nchars_avail + *lineptr; read_pos = *n - nchars_avail + *lineptr;
assert((*lineptr + *n) == (read_pos + nchars_avail)); assert((*lineptr + *n) == (read_pos + nchars_avail));
} }

View File

@ -30,3 +30,17 @@ void taosCloseDll(void *handle) {
} }
int taosSetConsoleEcho(bool on)
{
HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE);
DWORD mode = 0;
GetConsoleMode(hStdin, &mode );
if (on) {
mode |= ENABLE_ECHO_INPUT;
} else {
mode &= ~ENABLE_ECHO_INPUT;
}
SetConsoleMode(hStdin, mode);
return 0;
}

View File

@ -150,6 +150,7 @@ typedef struct HttpContext {
char ipstr[22]; char ipstr[22];
char user[TSDB_USER_LEN]; // parsed from auth token or login message char user[TSDB_USER_LEN]; // parsed from auth token or login message
char pass[HTTP_PASSWORD_LEN]; char pass[HTTP_PASSWORD_LEN];
char db[/*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN];
TAOS * taos; TAOS * taos;
void * ppContext; void * ppContext;
HttpSession *session; HttpSession *session;

View File

@ -24,7 +24,7 @@
#define REST_ROOT_URL_POS 0 #define REST_ROOT_URL_POS 0
#define REST_ACTION_URL_POS 1 #define REST_ACTION_URL_POS 1
#define REST_USER_URL_POS 2 #define REST_USER_USEDB_URL_POS 2
#define REST_PASS_URL_POS 3 #define REST_PASS_URL_POS 3
void restInitHandle(HttpServer* pServer); void restInitHandle(HttpServer* pServer);

View File

@ -62,11 +62,11 @@ void restInitHandle(HttpServer* pServer) {
bool restGetUserFromUrl(HttpContext* pContext) { bool restGetUserFromUrl(HttpContext* pContext) {
HttpParser* pParser = pContext->parser; HttpParser* pParser = pContext->parser;
if (pParser->path[REST_USER_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_URL_POS].pos <= 0) { if (pParser->path[REST_USER_USEDB_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_USEDB_URL_POS].pos <= 0) {
return false; return false;
} }
tstrncpy(pContext->user, pParser->path[REST_USER_URL_POS].str, TSDB_USER_LEN); tstrncpy(pContext->user, pParser->path[REST_USER_USEDB_URL_POS].str, TSDB_USER_LEN);
return true; return true;
} }
@ -107,6 +107,16 @@ bool restProcessSqlRequest(HttpContext* pContext, int32_t timestampFmt) {
HttpSqlCmd* cmd = &(pContext->singleCmd); HttpSqlCmd* cmd = &(pContext->singleCmd);
cmd->nativSql = sql; cmd->nativSql = sql;
/* find if there is db_name in url */
pContext->db[0] = '\0';
HttpString *path = &pContext->parser->path[REST_USER_USEDB_URL_POS];
if (path->pos > 0 && !(strlen(sql) > 4 && (sql[0] == 'u' || sql[0] == 'U') &&
(sql[1] == 's' || sql[1] == 'S') && (sql[2] == 'e' || sql[2] == 'E') && sql[3] == ' '))
{
snprintf(pContext->db, /*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN, "%s", path->str);
}
pContext->reqType = HTTP_REQTYPE_SINGLE_SQL; pContext->reqType = HTTP_REQTYPE_SINGLE_SQL;
if (timestampFmt == REST_TIMESTAMP_FMT_LOCAL_STRING) { if (timestampFmt == REST_TIMESTAMP_FMT_LOCAL_STRING) {
pContext->encodeMethod = &restEncodeSqlLocalTimeStringMethod; pContext->encodeMethod = &restEncodeSqlLocalTimeStringMethod;

View File

@ -419,6 +419,11 @@ void httpProcessRequest(HttpContext *pContext) {
&(pContext->taos)); &(pContext->taos));
httpDebug("context:%p, fd:%d, user:%s, try connect tdengine, taos:%p", pContext, pContext->fd, pContext->user, httpDebug("context:%p, fd:%d, user:%s, try connect tdengine, taos:%p", pContext, pContext->fd, pContext->user,
pContext->taos); pContext->taos);
if (pContext->taos != NULL) {
STscObj *pObj = pContext->taos;
pObj->from = TAOS_REQ_FROM_HTTP;
}
} else { } else {
httpExecCmd(pContext); httpExecCmd(pContext);
} }

View File

@ -43,9 +43,7 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int
#define GET_NUM_OF_RESULTS(_r) (((_r)->outputBuf) == NULL? 0:((_r)->outputBuf)->info.rows) #define GET_NUM_OF_RESULTS(_r) (((_r)->outputBuf) == NULL? 0:((_r)->outputBuf)->info.rows)
//TODO: may need to fine tune this threshold #define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData? 1 : 0)
#define QUERY_COMP_THRESHOLD (1024 * 512)
#define NEEDTO_COMPRESS_QUERY(size) ((size) > QUERY_COMP_THRESHOLD ? 1 : 0)
enum { enum {
// when query starts to execute, this status will set // when query starts to execute, this status will set
@ -623,6 +621,7 @@ int32_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx* pCtx, int3
void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResultRowInfo* pResultRowInfo, int32_t* rowCellInfoOffset); void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResultRowInfo* pResultRowInfo, int32_t* rowCellInfoOffset);
void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOfInputRows); void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOfInputRows);
void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity); void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity);
void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput);
void freeParam(SQueryParam *param); void freeParam(SQueryParam *param);
int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param); int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param);

View File

@ -220,6 +220,8 @@ tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrder
void tOrderDescDestroy(tOrderDescriptor *pDesc); void tOrderDescDestroy(tOrderDescriptor *pDesc);
void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn);
void tColModelAppend(SColumnModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows, void tColModelAppend(SColumnModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows,
int32_t numOfRowsToWrite, int32_t srcCapacity); int32_t numOfRowsToWrite, int32_t srcCapacity);

View File

@ -24,10 +24,10 @@ extern "C" {
extern uint32_t qDebugFlag; extern uint32_t qDebugFlag;
#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", 255, __VA_ARGS__); }} while(0) #define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", 255, __VA_ARGS__); }} while(0) #define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", 255, __VA_ARGS__); }} while(0) #define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", 255, __VA_ARGS__); }} while(0) #define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qDebug(...) do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0) #define qDebug(...) do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qTrace(...) do { if (qDebugFlag & DEBUG_TRACE) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0) #define qTrace(...) do { if (qDebugFlag & DEBUG_TRACE) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
#define qDump(a, l) do { if (qDebugFlag & DEBUG_DUMP) { taosDumpData((unsigned char *)a, l); }} while(0) #define qDump(a, l) do { if (qDebugFlag & DEBUG_DUMP) { taosDumpData((unsigned char *)a, l); }} while(0)

View File

@ -3670,6 +3670,8 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
return; return;
} }
bool ascQuery = (pCtx->order == TSDB_ORDER_ASC);
if (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) { if (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) {
*(TSKEY *)pCtx->pOutput = pCtx->startTs; *(TSKEY *)pCtx->pOutput = pCtx->startTs;
} else if (type == TSDB_FILL_NULL) { } else if (type == TSDB_FILL_NULL) {
@ -3677,7 +3679,7 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
} else if (type == TSDB_FILL_SET_VALUE) { } else if (type == TSDB_FILL_SET_VALUE) {
tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true); tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true);
} else { } else {
if (pCtx->start.key != INT64_MIN && pCtx->start.key < pCtx->startTs && pCtx->end.key > pCtx->startTs) { if (pCtx->start.key != INT64_MIN && ((ascQuery && pCtx->start.key <= pCtx->startTs && pCtx->end.key >= pCtx->startTs) || ((!ascQuery) && pCtx->start.key >= pCtx->startTs && pCtx->end.key <= pCtx->startTs))) {
if (type == TSDB_FILL_PREV) { if (type == TSDB_FILL_PREV) {
if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) { if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) {
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val); SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val);
@ -3716,13 +3718,14 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY skey = GET_TS_DATA(pCtx, 0); TSKEY skey = GET_TS_DATA(pCtx, 0);
if (type == TSDB_FILL_PREV) { if (type == TSDB_FILL_PREV) {
if (skey > pCtx->startTs) { if ((ascQuery && skey > pCtx->startTs) || ((!ascQuery) && skey < pCtx->startTs)) {
return; return;
} }
if (pCtx->size > 1) { if (pCtx->size > 1) {
TSKEY ekey = GET_TS_DATA(pCtx, 1); TSKEY ekey = GET_TS_DATA(pCtx, 1);
if (ekey > skey && ekey <= pCtx->startTs) { if ((ascQuery && ekey > skey && ekey <= pCtx->startTs) ||
((!ascQuery) && ekey < skey && ekey >= pCtx->startTs)){
skey = ekey; skey = ekey;
} }
} }
@ -3731,10 +3734,10 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY ekey = skey; TSKEY ekey = skey;
char* val = NULL; char* val = NULL;
if (ekey < pCtx->startTs) { if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) {
if (pCtx->size > 1) { if (pCtx->size > 1) {
ekey = GET_TS_DATA(pCtx, 1); ekey = GET_TS_DATA(pCtx, 1);
if (ekey < pCtx->startTs) { if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) {
return; return;
} }
@ -3755,12 +3758,11 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
TSKEY ekey = GET_TS_DATA(pCtx, 1); TSKEY ekey = GET_TS_DATA(pCtx, 1);
// no data generated yet // no data generated yet
if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) { if ((ascQuery && !(skey <= pCtx->startTs && ekey >= pCtx->startTs))
|| ((!ascQuery) && !(skey >= pCtx->startTs && ekey <= pCtx->startTs))) {
return; return;
} }
assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs);
char *start = GET_INPUT_DATA(pCtx, 0); char *start = GET_INPUT_DATA(pCtx, 0);
char *end = GET_INPUT_DATA(pCtx, 1); char *end = GET_INPUT_DATA(pCtx, 1);
@ -3788,11 +3790,37 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
static void interp_function(SQLFunctionCtx *pCtx) { static void interp_function(SQLFunctionCtx *pCtx) {
// at this point, the value is existed, return directly // at this point, the value is existed, return directly
if (pCtx->size > 0) { if (pCtx->size > 0) {
// impose the timestamp check bool ascQuery = (pCtx->order == TSDB_ORDER_ASC);
TSKEY key = GET_TS_DATA(pCtx, 0); TSKEY key;
char *pData;
int32_t typedData = 0;
if (ascQuery) {
key = GET_TS_DATA(pCtx, 0);
pData = GET_INPUT_DATA(pCtx, 0);
} else {
key = pCtx->start.key;
if (key == INT64_MIN) {
key = GET_TS_DATA(pCtx, 0);
pData = GET_INPUT_DATA(pCtx, 0);
} else {
if (!(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL)) {
pData = pCtx->start.ptr;
} else {
typedData = 1;
pData = (char *)&pCtx->start.val;
}
}
}
//if (key == pCtx->startTs && (ascQuery || !(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL))) {
if (key == pCtx->startTs) { if (key == pCtx->startTs) {
char *pData = GET_INPUT_DATA(pCtx, 0); if (typedData) {
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, *(double *)pData);
} else {
assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType); assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType);
}
SET_VAL(pCtx, 1, 1); SET_VAL(pCtx, 1, 1);
} else { } else {
interp_function_impl(pCtx); interp_function_impl(pCtx);

View File

@ -1324,6 +1324,16 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
pCtx[k].end.key = curTs; pCtx[k].end.key = curTs;
pCtx[k].end.val = v2; pCtx[k].end.val = v2;
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
if (prevRowIndex == -1) {
pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[index];
} else {
pCtx[k].start.ptr = (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes;
}
pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes;
}
} }
} else if (functionId == TSDB_FUNC_TWA) { } else if (functionId == TSDB_FUNC_TWA) {
SPoint point1 = (SPoint){.key = prevTs, .val = &v1}; SPoint point1 = (SPoint){.key = prevTs, .val = &v1};
@ -1593,6 +1603,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
SResultRow* pResult = NULL; SResultRow* pResult = NULL;
int32_t forwardStep = 0; int32_t forwardStep = 0;
int32_t ret = 0; int32_t ret = 0;
STimeWindow preWin = win;
while (1) { while (1) {
// null data, failed to allocate more memory buffer // null data, failed to allocate more memory buffer
@ -1607,12 +1618,13 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
// window start(end) key interpolation // window start(end) key interpolation
doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
preWin = win;
int32_t prevEndPos = (forwardStep - 1) * step + startPos; int32_t prevEndPos = (forwardStep - 1) * step + startPos;
startPos = getNextQualifiedWindow(pQueryAttr, &win, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos); startPos = getNextQualifiedWindow(pQueryAttr, &win, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos);
if (startPos < 0) { if (startPos < 0) {
if (win.skey <= pQueryAttr->window.ekey) { if ((ascQuery && win.skey <= pQueryAttr->window.ekey) || ((!ascQuery) && win.ekey >= pQueryAttr->window.ekey)) {
int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId,
pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) { if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
@ -1623,7 +1635,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
// window start(end) key interpolation // window start(end) key interpolation
doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
} }
break; break;
@ -3588,7 +3600,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i
// set the timestamp output buffer for top/bottom/diff query // set the timestamp output buffer for top/bottom/diff query
int32_t fid = pCtx[i].functionId; int32_t fid = pCtx[i].functionId;
if (fid == TSDB_FUNC_TOP || fid == TSDB_FUNC_BOTTOM || fid == TSDB_FUNC_DIFF || fid == TSDB_FUNC_DERIVATIVE) { if (fid == TSDB_FUNC_TOP || fid == TSDB_FUNC_BOTTOM || fid == TSDB_FUNC_DIFF || fid == TSDB_FUNC_DERIVATIVE) {
pCtx[i].ptsOutputBuf = pCtx[0].pOutput; if (i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput;
} }
} }
@ -3616,14 +3628,15 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf
} }
} }
for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
SColumnInfoData *pColInfo = taosArrayGet(pDataBlock->pDataBlock, i); SColumnInfoData *pColInfo = taosArrayGet(pDataBlock->pDataBlock, i);
pBInfo->pCtx[i].pOutput = pColInfo->pData + pColInfo->info.bytes * pDataBlock->info.rows; pBInfo->pCtx[i].pOutput = pColInfo->pData + pColInfo->info.bytes * pDataBlock->info.rows;
// set the correct pointer after the memory buffer reallocated. // set the correct pointer after the memory buffer reallocated.
int32_t functionId = pBInfo->pCtx[i].functionId; int32_t functionId = pBInfo->pCtx[i].functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE){
pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput; if (i > 0) pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput;
} }
} }
} }
@ -3641,7 +3654,35 @@ void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity) {
} }
} }
void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput) {
bool needCopyTs = false;
int32_t tsNum = 0;
char *src = NULL;
for (int32_t i = 0; i < numOfOutput; i++) {
int32_t functionId = pCtx[i].functionId;
if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
needCopyTs = true;
if (i > 0 && pCtx[i-1].functionId == TSDB_FUNC_TS_DUMMY){
SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i - 1); // find ts data
src = pColRes->pData;
}
}else if(functionId == TSDB_FUNC_TS_DUMMY) {
tsNum++;
}
}
if (!needCopyTs) return;
if (tsNum < 2) return;
if (src == NULL) return;
for (int32_t i = 0; i < numOfOutput; i++) {
int32_t functionId = pCtx[i].functionId;
if(functionId == TSDB_FUNC_TS_DUMMY) {
SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i);
memcpy(pColRes->pData, src, pColRes->info.bytes * pRes->info.rows);
}
}
}
void initCtxOutputBuffer(SQLFunctionCtx* pCtx, int32_t size) { void initCtxOutputBuffer(SQLFunctionCtx* pCtx, int32_t size) {
for (int32_t j = 0; j < size; ++j) { for (int32_t j = 0; j < size; ++j) {
@ -3823,7 +3864,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe
} }
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
pCtx[i].ptsOutputBuf = pCtx[0].pOutput; if(i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput;
} }
if (!pResInfo->initialized) { if (!pResInfo->initialized) {
@ -3884,7 +3925,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF
int32_t functionId = pCtx[i].functionId; int32_t functionId = pCtx[i].functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
pCtx[i].ptsOutputBuf = pCtx[0].pOutput; if(i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput;
} }
/* /*
@ -5705,6 +5746,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) {
pRes->info.rows = getNumOfResult(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); pRes->info.rows = getNumOfResult(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
if (pRes->info.rows >= pRuntimeEnv->resultInfo.threshold) { if (pRes->info.rows >= pRuntimeEnv->resultInfo.threshold) {
copyTsColoum(pRes, pInfo->pCtx, pOperator->numOfOutput);
clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput); clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput);
return pRes; return pRes;
} }
@ -5730,8 +5772,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) {
if (*newgroup) { if (*newgroup) {
if (pRes->info.rows > 0) { if (pRes->info.rows > 0) {
pProjectInfo->existDataBlock = pBlock; pProjectInfo->existDataBlock = pBlock;
clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput); break;
return pInfo->pRes;
} else { // init output buffer for a new group data } else { // init output buffer for a new group data
for (int32_t j = 0; j < pOperator->numOfOutput; ++j) { for (int32_t j = 0; j < pOperator->numOfOutput; ++j) {
aAggs[pInfo->pCtx[j].functionId].xFinalize(&pInfo->pCtx[j]); aAggs[pInfo->pCtx[j].functionId].xFinalize(&pInfo->pCtx[j]);
@ -5761,7 +5802,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) {
break; break;
} }
} }
copyTsColoum(pRes, pInfo->pCtx, pOperator->numOfOutput);
clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput); clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput);
return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
} }
@ -7225,11 +7266,13 @@ static void buildMultiDistinctKey(SDistinctOperatorInfo *pInfo, SSDataBlock *pBl
p += strlen(MULTI_KEY_DELIM); p += strlen(MULTI_KEY_DELIM);
} }
} }
static SSDataBlock* hashDistinct(void* param, bool* newgroup) { static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param; SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) { if (pOperator->status == OP_EXEC_DONE) {
return NULL; return NULL;
} }
SDistinctOperatorInfo* pInfo = pOperator->info; SDistinctOperatorInfo* pInfo = pOperator->info;
SSDataBlock* pRes = pInfo->pRes; SSDataBlock* pRes = pInfo->pRes;
@ -7284,11 +7327,11 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
pRes->info.rows += 1; pRes->info.rows += 1;
} }
} }
if (pRes->info.rows >= pInfo->threshold) { if (pRes->info.rows >= pInfo->threshold) {
break; break;
} }
} }
return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
} }

View File

@ -768,60 +768,6 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
free(buf); free(buf);
} }
void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) {
assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols);
int32_t bytes = pSchema[index].bytes;
int32_t size = bytes + sizeof(int32_t);
char* buf = calloc(1, size * numOfRows);
for(int32_t i = 0; i < numOfRows; ++i) {
char* dest = buf + size * i;
memcpy(dest, ((char*)pCols[index]) + bytes * i, bytes);
*(int32_t*)(dest+bytes) = i;
}
qsort(buf, numOfRows, size, compareFn);
int32_t prevLength = 0;
char* p = NULL;
for(int32_t i = 0; i < numOfCols; ++i) {
int32_t bytes1 = pSchema[i].bytes;
if (i == index) {
for(int32_t j = 0; j < numOfRows; ++j){
char* src = buf + (j * size);
char* dest = (char*) pCols[i] + (j * bytes1);
memcpy(dest, src, bytes1);
}
} else {
// make sure memory buffer is enough
if (prevLength < bytes1) {
char *tmp = realloc(p, bytes1 * numOfRows);
assert(tmp);
p = tmp;
prevLength = bytes1;
}
memcpy(p, pCols[i], bytes1 * numOfRows);
for(int32_t j = 0; j < numOfRows; ++j){
char* dest = (char*) pCols[i] + bytes1 * j;
int32_t newPos = *(int32_t*)(buf + (j * size) + bytes);
char* src = p + (newPos * bytes1);
memcpy(dest, src, bytes1);
}
}
}
tfree(buf);
tfree(p);
}
/* /*
* deep copy of sschema * deep copy of sschema
*/ */
@ -1157,3 +1103,57 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) {
destroyColumnModel(pDesc->pColumnModel); destroyColumnModel(pDesc->pColumnModel);
tfree(pDesc); tfree(pDesc);
} }
void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) {
assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols);
int32_t bytes = pSchema[index].bytes;
int32_t size = bytes + sizeof(int32_t);
char* buf = calloc(1, size * numOfRows);
for(int32_t i = 0; i < numOfRows; ++i) {
char* dest = buf + size * i;
memcpy(dest, ((char*) pCols[index]) + bytes * i, bytes);
*(int32_t*)(dest+bytes) = i;
}
qsort(buf, numOfRows, size, compareFn);
int32_t prevLength = 0;
char* p = NULL;
for(int32_t i = 0; i < numOfCols; ++i) {
int32_t bytes1 = pSchema[i].bytes;
if (i == index) {
for(int32_t j = 0; j < numOfRows; ++j){
char* src = buf + (j * size);
char* dest = ((char*)pCols[i]) + (j * bytes1);
memcpy(dest, src, bytes1);
}
} else {
// make sure memory buffer is enough
if (prevLength < bytes1) {
char *tmp = realloc(p, bytes1 * numOfRows);
assert(tmp);
p = tmp;
prevLength = bytes1;
}
memcpy(p, pCols[i], bytes1 * numOfRows);
for(int32_t j = 0; j < numOfRows; ++j){
char* dest = ((char*)pCols[i]) + bytes1 * j;
int32_t newPos = *(int32_t*)(buf + (j * size) + bytes);
char* src = p + (newPos * bytes1);
memcpy(dest, src, bytes1);
}
}
}
tfree(buf);
tfree(p);
}

View File

@ -698,7 +698,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
} }
// fill operator // fill operator
if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) { if (pQueryAttr->fillType != TSDB_FILL_NONE && pQueryAttr->interval.interval > 0) {
op = OP_Fill; op = OP_Fill;
taosArrayPush(plan, &op); taosArrayPush(plan, &op);
} }

View File

@ -223,9 +223,12 @@ static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) {
static void shrinkBuffer(STSList* ptsData) { static void shrinkBuffer(STSList* ptsData) {
// shrink tmp buffer size if it consumes too many memory compared to the pre-defined size // shrink tmp buffer size if it consumes too many memory compared to the pre-defined size
if (ptsData->allocSize >= ptsData->threshold * 2) { if (ptsData->allocSize >= ptsData->threshold * 2) {
ptsData->rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE); char* rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE);
if(rawBuf) {
ptsData->rawBuf = rawBuf;
ptsData->allocSize = MEM_BUF_SIZE; ptsData->allocSize = MEM_BUF_SIZE;
} }
}
} }
static int32_t getTagAreaLength(tVariant* pa) { static int32_t getTagAreaLength(tVariant* pa) {

View File

@ -357,7 +357,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
} }
(*pRsp)->precision = htons(pQueryAttr->precision); (*pRsp)->precision = htons(pQueryAttr->precision);
(*pRsp)->compressed = (int8_t)(tsCompressColData && checkNeedToCompressQueryCol(pQInfo)); (*pRsp)->compressed = (int8_t)((tsCompressColData != -1) && checkNeedToCompressQueryCol(pQInfo));
if (GET_NUM_OF_RESULTS(&(pQInfo->runtimeEnv)) > 0 && pQInfo->code == TSDB_CODE_SUCCESS) { if (GET_NUM_OF_RESULTS(&(pQInfo->runtimeEnv)) > 0 && pQInfo->code == TSDB_CODE_SUCCESS) {
doDumpQueryResult(pQInfo, (*pRsp)->data, (*pRsp)->compressed, &compLen); doDumpQueryResult(pQInfo, (*pRsp)->data, (*pRsp)->compressed, &compLen);
@ -367,8 +367,12 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
if ((*pRsp)->compressed && compLen != 0) { if ((*pRsp)->compressed && compLen != 0) {
int32_t numOfCols = pQueryAttr->pExpr2 ? pQueryAttr->numOfExpr2 : pQueryAttr->numOfOutput; int32_t numOfCols = pQueryAttr->pExpr2 ? pQueryAttr->numOfExpr2 : pQueryAttr->numOfOutput;
*contLen = *contLen - pQueryAttr->resultRowSize * s + compLen + numOfCols * sizeof(int32_t); int32_t origSize = pQueryAttr->resultRowSize * s;
int32_t compSize = compLen + numOfCols * sizeof(int32_t);
*contLen = *contLen - origSize + compSize;
*pRsp = (SRetrieveTableRsp *)rpcReallocCont(*pRsp, *contLen); *pRsp = (SRetrieveTableRsp *)rpcReallocCont(*pRsp, *contLen);
qDebug("QInfo:0x%"PRIx64" compress col data, uncompressed size:%d, compressed size:%d, ratio:%.2f",
pQInfo->qId, origSize, compSize, (float)origSize / (float)compSize);
} }
(*pRsp)->compLen = htonl(compLen); (*pRsp)->compLen = htonl(compLen);

View File

@ -18,6 +18,9 @@
#define TSDB_FS_VERSION 0 #define TSDB_FS_VERSION 0
// ================== TSDB global config
extern bool tsdbForceKeepFile;
// ================== CURRENT file header info // ================== CURRENT file header info
typedef struct { typedef struct {
uint32_t version; // Current file system version (relating to code) uint32_t version; // Current file system version (relating to code)
@ -44,6 +47,7 @@ typedef struct {
SFSStatus* cstatus; // current status SFSStatus* cstatus; // current status
SHashObj* metaCache; // meta cache SHashObj* metaCache; // meta cache
SHashObj* metaCacheComp; // meta cache for compact
bool intxn; bool intxn;
SFSStatus* nstatus; // new status SFSStatus* nstatus; // new status
} STsdbFS; } STsdbFS;

View File

@ -14,6 +14,8 @@
*/ */
#include "tsdbint.h" #include "tsdbint.h"
extern int32_t tsTsdbMetaCompactRatio;
#define TSDB_MAX_SUBBLOCKS 8 #define TSDB_MAX_SUBBLOCKS 8
static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) { static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) {
if (key < 0) { if (key < 0) {
@ -55,8 +57,9 @@ typedef struct {
#define TSDB_COMMIT_TXN_VERSION(ch) FS_TXN_VERSION(REPO_FS(TSDB_COMMIT_REPO(ch))) #define TSDB_COMMIT_TXN_VERSION(ch) FS_TXN_VERSION(REPO_FS(TSDB_COMMIT_REPO(ch)))
static int tsdbCommitMeta(STsdbRepo *pRepo); static int tsdbCommitMeta(STsdbRepo *pRepo);
static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen); static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen, bool compact);
static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid); static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid);
static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile);
static int tsdbCommitTSData(STsdbRepo *pRepo); static int tsdbCommitTSData(STsdbRepo *pRepo);
static void tsdbStartCommit(STsdbRepo *pRepo); static void tsdbStartCommit(STsdbRepo *pRepo);
static void tsdbEndCommit(STsdbRepo *pRepo, int eno); static void tsdbEndCommit(STsdbRepo *pRepo, int eno);
@ -261,6 +264,35 @@ int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf) {
// =================== Commit Meta Data // =================== Commit Meta Data
static int tsdbInitCommitMetaFile(STsdbRepo *pRepo, SMFile* pMf, bool open) {
STsdbFS * pfs = REPO_FS(pRepo);
SMFile * pOMFile = pfs->cstatus->pmf;
SDiskID did;
// Create/Open a meta file or open the existing file
if (pOMFile == NULL) {
// Create a new meta file
did.level = TFS_PRIMARY_LEVEL;
did.id = TFS_PRIMARY_ID;
tsdbInitMFile(pMf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)));
if (open && tsdbCreateMFile(pMf, true) < 0) {
tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
tsdbInfo("vgId:%d meta file %s is created to commit", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMf));
} else {
tsdbInitMFileEx(pMf, pOMFile);
if (open && tsdbOpenMFile(pMf, O_WRONLY) < 0) {
tsdbError("vgId:%d failed to open META file since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
}
return 0;
}
static int tsdbCommitMeta(STsdbRepo *pRepo) { static int tsdbCommitMeta(STsdbRepo *pRepo) {
STsdbFS * pfs = REPO_FS(pRepo); STsdbFS * pfs = REPO_FS(pRepo);
SMemTable *pMem = pRepo->imem; SMemTable *pMem = pRepo->imem;
@ -269,35 +301,26 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
SActObj * pAct = NULL; SActObj * pAct = NULL;
SActCont * pCont = NULL; SActCont * pCont = NULL;
SListNode *pNode = NULL; SListNode *pNode = NULL;
SDiskID did;
ASSERT(pOMFile != NULL || listNEles(pMem->actList) > 0); ASSERT(pOMFile != NULL || listNEles(pMem->actList) > 0);
if (listNEles(pMem->actList) <= 0) { if (listNEles(pMem->actList) <= 0) {
// no meta data to commit, just keep the old meta file // no meta data to commit, just keep the old meta file
tsdbUpdateMFile(pfs, pOMFile); tsdbUpdateMFile(pfs, pOMFile);
if (tsTsdbMetaCompactRatio > 0) {
if (tsdbInitCommitMetaFile(pRepo, &mf, false) < 0) {
return -1;
}
int ret = tsdbCompactMetaFile(pRepo, pfs, &mf);
if (ret < 0) tsdbError("compact meta file error");
return ret;
}
return 0; return 0;
} else { } else {
// Create/Open a meta file or open the existing file if (tsdbInitCommitMetaFile(pRepo, &mf, true) < 0) {
if (pOMFile == NULL) {
// Create a new meta file
did.level = TFS_PRIMARY_LEVEL;
did.id = TFS_PRIMARY_ID;
tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)));
if (tsdbCreateMFile(&mf, true) < 0) {
tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1; return -1;
} }
tsdbInfo("vgId:%d meta file %s is created to commit", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf));
} else {
tsdbInitMFileEx(&mf, pOMFile);
if (tsdbOpenMFile(&mf, O_WRONLY) < 0) {
tsdbError("vgId:%d failed to open META file since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
}
} }
// Loop to write // Loop to write
@ -305,7 +328,7 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
pAct = (SActObj *)pNode->data; pAct = (SActObj *)pNode->data;
if (pAct->act == TSDB_UPDATE_META) { if (pAct->act == TSDB_UPDATE_META) {
pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj)); pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj));
if (tsdbUpdateMetaRecord(pfs, &mf, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) { if (tsdbUpdateMetaRecord(pfs, &mf, pAct->uid, (void *)(pCont->cont), pCont->len, false) < 0) {
tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid, tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
tstrerror(terrno)); tstrerror(terrno));
tsdbCloseMFile(&mf); tsdbCloseMFile(&mf);
@ -338,6 +361,10 @@ static int tsdbCommitMeta(STsdbRepo *pRepo) {
tsdbCloseMFile(&mf); tsdbCloseMFile(&mf);
tsdbUpdateMFile(pfs, &mf); tsdbUpdateMFile(pfs, &mf);
if (tsTsdbMetaCompactRatio > 0 && tsdbCompactMetaFile(pRepo, pfs, &mf) < 0) {
tsdbError("compact meta file error");
}
return 0; return 0;
} }
@ -375,7 +402,7 @@ void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn) {
pRtn->minFid, pRtn->midFid, pRtn->maxFid); pRtn->minFid, pRtn->midFid, pRtn->maxFid);
} }
static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen) { static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void *cont, int contLen, bool compact) {
char buf[64] = "\0"; char buf[64] = "\0";
void * pBuf = buf; void * pBuf = buf;
SKVRecord rInfo; SKVRecord rInfo;
@ -401,13 +428,18 @@ static int tsdbUpdateMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid, void
} }
tsdbUpdateMFileMagic(pMFile, POINTER_SHIFT(cont, contLen - sizeof(TSCKSUM))); tsdbUpdateMFileMagic(pMFile, POINTER_SHIFT(cont, contLen - sizeof(TSCKSUM)));
SKVRecord *pRecord = taosHashGet(pfs->metaCache, (void *)&uid, sizeof(uid));
SHashObj* cache = compact ? pfs->metaCacheComp : pfs->metaCache;
pMFile->info.nRecords++;
SKVRecord *pRecord = taosHashGet(cache, (void *)&uid, sizeof(uid));
if (pRecord != NULL) { if (pRecord != NULL) {
pMFile->info.tombSize += (pRecord->size + sizeof(SKVRecord)); pMFile->info.tombSize += (pRecord->size + sizeof(SKVRecord));
} else { } else {
pMFile->info.nRecords++; pMFile->info.nRecords++;
} }
taosHashPut(pfs->metaCache, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo)); taosHashPut(cache, (void *)(&uid), sizeof(uid), (void *)(&rInfo), sizeof(rInfo));
return 0; return 0;
} }
@ -442,6 +474,129 @@ static int tsdbDropMetaRecord(STsdbFS *pfs, SMFile *pMFile, uint64_t uid) {
return 0; return 0;
} }
static int tsdbCompactMetaFile(STsdbRepo *pRepo, STsdbFS *pfs, SMFile *pMFile) {
float delPercent = (float)(pMFile->info.nDels) / (float)(pMFile->info.nRecords);
float tombPercent = (float)(pMFile->info.tombSize) / (float)(pMFile->info.size);
float compactRatio = (float)(tsTsdbMetaCompactRatio)/100;
if (delPercent < compactRatio && tombPercent < compactRatio) {
return 0;
}
if (tsdbOpenMFile(pMFile, O_RDONLY) < 0) {
tsdbError("open meta file %s compact fail", pMFile->f.rname);
return -1;
}
tsdbInfo("begin compact tsdb meta file, ratio:%d, nDels:%" PRId64 ",nRecords:%" PRId64 ",tombSize:%" PRId64 ",size:%" PRId64,
tsTsdbMetaCompactRatio, pMFile->info.nDels,pMFile->info.nRecords,pMFile->info.tombSize,pMFile->info.size);
SMFile mf;
SDiskID did;
// first create tmp meta file
did.level = TFS_PRIMARY_LEVEL;
did.id = TFS_PRIMARY_ID;
tsdbInitMFile(&mf, did, REPO_ID(pRepo), FS_TXN_VERSION(REPO_FS(pRepo)) + 1);
if (tsdbCreateMFile(&mf, true) < 0) {
tsdbError("vgId:%d failed to create META file since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
tsdbInfo("vgId:%d meta file %s is created to compact meta data", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf));
// second iterator metaCache
int code = -1;
int64_t maxBufSize = 1024;
SKVRecord *pRecord;
void *pBuf = NULL;
pBuf = malloc((size_t)maxBufSize);
if (pBuf == NULL) {
goto _err;
}
// init Comp
assert(pfs->metaCacheComp == NULL);
pfs->metaCacheComp = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
if (pfs->metaCacheComp == NULL) {
goto _err;
}
pRecord = taosHashIterate(pfs->metaCache, NULL);
while (pRecord) {
if (tsdbSeekMFile(pMFile, pRecord->offset + sizeof(SKVRecord), SEEK_SET) < 0) {
tsdbError("vgId:%d failed to seek file %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile),
tstrerror(terrno));
goto _err;
}
if (pRecord->size > maxBufSize) {
maxBufSize = pRecord->size;
void* tmp = realloc(pBuf, (size_t)maxBufSize);
if (tmp == NULL) {
goto _err;
}
pBuf = tmp;
}
int nread = (int)tsdbReadMFile(pMFile, pBuf, pRecord->size);
if (nread < 0) {
tsdbError("vgId:%d failed to read file %s since %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile),
tstrerror(terrno));
goto _err;
}
if (nread < pRecord->size) {
tsdbError("vgId:%d failed to read file %s since file corrupted, expected read:%" PRId64 " actual read:%d",
REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pMFile), pRecord->size, nread);
goto _err;
}
if (tsdbUpdateMetaRecord(pfs, &mf, pRecord->uid, pBuf, (int)pRecord->size, true) < 0) {
tsdbError("vgId:%d failed to update META record, uid %" PRIu64 " since %s", REPO_ID(pRepo), pRecord->uid,
tstrerror(terrno));
goto _err;
}
pRecord = taosHashIterate(pfs->metaCache, pRecord);
}
code = 0;
_err:
if (code == 0) TSDB_FILE_FSYNC(&mf);
tsdbCloseMFile(&mf);
tsdbCloseMFile(pMFile);
if (code == 0) {
// rename meta.tmp -> meta
tsdbInfo("vgId:%d meta file rename %s -> %s", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(&mf), TSDB_FILE_FULL_NAME(pMFile));
taosRename(mf.f.aname,pMFile->f.aname);
tstrncpy(mf.f.aname, pMFile->f.aname, TSDB_FILENAME_LEN);
tstrncpy(mf.f.rname, pMFile->f.rname, TSDB_FILENAME_LEN);
// update current meta file info
pfs->nstatus->pmf = NULL;
tsdbUpdateMFile(pfs, &mf);
taosHashCleanup(pfs->metaCache);
pfs->metaCache = pfs->metaCacheComp;
pfs->metaCacheComp = NULL;
} else {
// remove meta.tmp file
remove(mf.f.aname);
taosHashCleanup(pfs->metaCacheComp);
pfs->metaCacheComp = NULL;
}
tfree(pBuf);
ASSERT(mf.info.nDels == 0);
ASSERT(mf.info.tombSize == 0);
tsdbInfo("end compact tsdb meta file,code:%d,nRecords:%" PRId64 ",size:%" PRId64,
code,mf.info.nRecords,mf.info.size);
return code;
}
// =================== Commit Time-Series Data // =================== Commit Time-Series Data
static int tsdbCommitTSData(STsdbRepo *pRepo) { static int tsdbCommitTSData(STsdbRepo *pRepo) {
SMemTable *pMem = pRepo->imem; SMemTable *pMem = pRepo->imem;

View File

@ -38,7 +38,6 @@ static int tsdbProcessExpiredFS(STsdbRepo *pRepo);
static int tsdbCreateMeta(STsdbRepo *pRepo); static int tsdbCreateMeta(STsdbRepo *pRepo);
// For backward compatibility // For backward compatibility
bool tsdbForceKeepFile = false;
// ================== CURRENT file header info // ================== CURRENT file header info
static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) { static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) {
int tlen = 0; int tlen = 0;
@ -217,6 +216,7 @@ STsdbFS *tsdbNewFS(STsdbCfg *pCfg) {
} }
pfs->intxn = false; pfs->intxn = false;
pfs->metaCacheComp = NULL;
pfs->nstatus = tsdbNewFSStatus(maxFSet); pfs->nstatus = tsdbNewFSStatus(maxFSet);
if (pfs->nstatus == NULL) { if (pfs->nstatus == NULL) {

View File

@ -20,7 +20,7 @@ static const char *TSDB_FNAME_SUFFIX[] = {
"data", // TSDB_FILE_DATA "data", // TSDB_FILE_DATA
"last", // TSDB_FILE_LAST "last", // TSDB_FILE_LAST
"", // TSDB_FILE_MAX "", // TSDB_FILE_MAX
"meta" // TSDB_FILE_META "meta", // TSDB_FILE_META
}; };
static void tsdbGetFilename(int vid, int fid, uint32_t ver, TSDB_FILE_T ftype, char *fname); static void tsdbGetFilename(int vid, int fid, uint32_t ver, TSDB_FILE_T ftype, char *fname);

View File

@ -43,6 +43,7 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable);
static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable); static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable);
static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid); static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid);
static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema); static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema);
static int tsdbInsertNewTableAction(STsdbRepo *pRepo, STable* pTable);
static int tsdbAddSchema(STable *pTable, STSchema *pSchema); static int tsdbAddSchema(STable *pTable, STSchema *pSchema);
static void tsdbFreeTableSchema(STable *pTable); static void tsdbFreeTableSchema(STable *pTable);
@ -128,21 +129,16 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) {
tsdbUnlockRepoMeta(pRepo); tsdbUnlockRepoMeta(pRepo);
// Write to memtable action // Write to memtable action
// TODO: refactor duplicate codes
int tlen = 0;
void *pBuf = NULL;
if (newSuper || superChanged) { if (newSuper || superChanged) {
tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, super); // add insert new super table action
pBuf = tsdbAllocBytes(pRepo, tlen); if (tsdbInsertNewTableAction(pRepo, super) != 0) {
if (pBuf == NULL) goto _err; goto _err;
void *tBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, pBuf, super); }
ASSERT(POINTER_DISTANCE(tBuf, pBuf) == tlen); }
// add insert new table action
if (tsdbInsertNewTableAction(pRepo, table) != 0) {
goto _err;
} }
tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, table);
pBuf = tsdbAllocBytes(pRepo, tlen);
if (pBuf == NULL) goto _err;
void *tBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, pBuf, table);
ASSERT(POINTER_DISTANCE(tBuf, pBuf) == tlen);
if (tsdbCheckCommit(pRepo) < 0) return -1; if (tsdbCheckCommit(pRepo) < 0) return -1;
@ -383,7 +379,7 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) {
tdDestroyTSchemaBuilder(&schemaBuilder); tdDestroyTSchemaBuilder(&schemaBuilder);
} }
// Chage in memory // Change in memory
if (pNewSchema != NULL) { // change super table tag schema if (pNewSchema != NULL) { // change super table tag schema
TSDB_WLOCK_TABLE(pTable->pSuper); TSDB_WLOCK_TABLE(pTable->pSuper);
STSchema *pOldSchema = pTable->pSuper->tagSchema; STSchema *pOldSchema = pTable->pSuper->tagSchema;
@ -426,6 +422,21 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) {
} }
// ------------------ INTERNAL FUNCTIONS ------------------ // ------------------ INTERNAL FUNCTIONS ------------------
static int tsdbInsertNewTableAction(STsdbRepo *pRepo, STable* pTable) {
int tlen = 0;
void *pBuf = NULL;
tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pTable);
pBuf = tsdbAllocBytes(pRepo, tlen);
if (pBuf == NULL) {
return -1;
}
void *tBuf = tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, pBuf, pTable);
ASSERT(POINTER_DISTANCE(tBuf, pBuf) == tlen);
return 0;
}
STsdbMeta *tsdbNewMeta(STsdbCfg *pCfg) { STsdbMeta *tsdbNewMeta(STsdbCfg *pCfg) {
STsdbMeta *pMeta = (STsdbMeta *)calloc(1, sizeof(*pMeta)); STsdbMeta *pMeta = (STsdbMeta *)calloc(1, sizeof(*pMeta));
if (pMeta == NULL) { if (pMeta == NULL) {
@ -617,6 +628,7 @@ int16_t tsdbGetLastColumnsIndexByColId(STable* pTable, int16_t colId) {
if (pTable->lastCols == NULL) { if (pTable->lastCols == NULL) {
return -1; return -1;
} }
// TODO: use binary search instead
for (int16_t i = 0; i < pTable->maxColNum; ++i) { for (int16_t i = 0; i < pTable->maxColNum; ++i) {
if (pTable->lastCols[i].colId == colId) { if (pTable->lastCols[i].colId == colId) {
return i; return i;
@ -734,10 +746,10 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema,
TSDB_WUNLOCK_TABLE(pCTable); TSDB_WUNLOCK_TABLE(pCTable);
if (insertAct) { if (insertAct) {
int tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pCTable); if (tsdbInsertNewTableAction(pRepo, pCTable) != 0) {
void *buf = tsdbAllocBytes(pRepo, tlen); tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " tsdbInsertNewTableAction fail", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
ASSERT(buf != NULL); TABLE_TID(pTable), TABLE_UID(pTable));
tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pCTable); }
} }
} }
@ -1250,8 +1262,14 @@ static int tsdbEncodeTable(void **buf, STable *pTable) {
tlen += taosEncodeFixedU64(buf, TABLE_SUID(pTable)); tlen += taosEncodeFixedU64(buf, TABLE_SUID(pTable));
tlen += tdEncodeKVRow(buf, pTable->tagVal); tlen += tdEncodeKVRow(buf, pTable->tagVal);
} else { } else {
tlen += taosEncodeFixedU8(buf, (uint8_t)taosArrayGetSize(pTable->schema)); uint32_t arraySize = (uint32_t)taosArrayGetSize(pTable->schema);
for (int i = 0; i < taosArrayGetSize(pTable->schema); i++) { if(arraySize > UINT8_MAX) {
tlen += taosEncodeFixedU8(buf, 0);
tlen += taosEncodeFixedU32(buf, arraySize);
} else {
tlen += taosEncodeFixedU8(buf, (uint8_t)arraySize);
}
for (uint32_t i = 0; i < arraySize; i++) {
STSchema *pSchema = taosArrayGetP(pTable->schema, i); STSchema *pSchema = taosArrayGetP(pTable->schema, i);
tlen += tdEncodeSchema(buf, pSchema); tlen += tdEncodeSchema(buf, pSchema);
} }
@ -1284,8 +1302,11 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) {
buf = taosDecodeFixedU64(buf, &TABLE_SUID(pTable)); buf = taosDecodeFixedU64(buf, &TABLE_SUID(pTable));
buf = tdDecodeKVRow(buf, &(pTable->tagVal)); buf = tdDecodeKVRow(buf, &(pTable->tagVal));
} else { } else {
uint8_t nSchemas; uint32_t nSchemas = 0;
buf = taosDecodeFixedU8(buf, &nSchemas); buf = taosDecodeFixedU8(buf, (uint8_t *)&nSchemas);
if(nSchemas == 0) {
buf = taosDecodeFixedU32(buf, &nSchemas);
}
for (int i = 0; i < nSchemas; i++) { for (int i = 0; i < nSchemas; i++) {
STSchema *pSchema; STSchema *pSchema;
buf = tdDecodeSchema(buf, &pSchema); buf = tdDecodeSchema(buf, &pSchema);

View File

@ -1572,7 +1572,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
int32_t numOfColsOfRow1 = 0; int32_t numOfColsOfRow1 = 0;
if (pSchema1 == NULL) { if (pSchema1 == NULL) {
pSchema1 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row1)); pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1));
} }
if(isRow1DataRow) { if(isRow1DataRow) {
numOfColsOfRow1 = schemaNCols(pSchema1); numOfColsOfRow1 = schemaNCols(pSchema1);
@ -1584,7 +1584,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
if(row2) { if(row2) {
isRow2DataRow = isDataRow(row2); isRow2DataRow = isDataRow(row2);
if (pSchema2 == NULL) { if (pSchema2 == NULL) {
pSchema2 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row2)); pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2));
} }
if(isRow2DataRow) { if(isRow2DataRow) {
numOfColsOfRow2 = schemaNCols(pSchema2); numOfColsOfRow2 = schemaNCols(pSchema2);
@ -3480,6 +3480,7 @@ void filterPrepare(void* expr, void* param) {
SArray *arr = (SArray *)(pCond->arr); SArray *arr = (SArray *)(pCond->arr);
for (size_t i = 0; i < taosArrayGetSize(arr); i++) { for (size_t i = 0; i < taosArrayGetSize(arr); i++) {
char* p = taosArrayGetP(arr, i); char* p = taosArrayGetP(arr, i);
strtolower(varDataVal(p), varDataVal(p));
taosHashPut(pObj, varDataVal(p),varDataLen(p), &dummy, sizeof(dummy)); taosHashPut(pObj, varDataVal(p),varDataLen(p), &dummy, sizeof(dummy));
} }
} else { } else {

View File

@ -25,7 +25,7 @@ extern "C" {
#define TSDB_PATTERN_MATCH 0 #define TSDB_PATTERN_MATCH 0
#define TSDB_PATTERN_NOMATCH 1 #define TSDB_PATTERN_NOMATCH 1
#define TSDB_PATTERN_NOWILDCARDMATCH 2 #define TSDB_PATTERN_NOWILDCARDMATCH 2
#define TSDB_PATTERN_STRING_MAX_LEN 100 #define TSDB_PATTERN_STRING_DEFAULT_LEN 100
#define FLT_COMPAR_TOL_FACTOR 4 #define FLT_COMPAR_TOL_FACTOR 4
#define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON)) #define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON))

View File

@ -81,7 +81,6 @@ typedef struct {
extern SGlobalCfg tsGlobalConfig[]; extern SGlobalCfg tsGlobalConfig[];
extern int32_t tsGlobalConfigNum; extern int32_t tsGlobalConfigNum;
extern char * tsCfgStatusStr[]; extern char * tsCfgStatusStr[];
extern bool tsdbForceKeepFile;
void taosReadGlobalLogCfg(); void taosReadGlobalLogCfg();
bool taosReadGlobalCfg(); bool taosReadGlobalCfg();

View File

@ -199,16 +199,7 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) {
if (len1 != len2) { if (len1 != len2) {
return len1 > len2? 1:-1; return len1 > len2? 1:-1;
} else { } else {
char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); int32_t ret = memcmp((wchar_t*) pLeft, (wchar_t*) pRight, len1);
char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char));
memcpy(pLeftTerm, varDataVal(pLeft), len1);
memcpy(pRightTerm, varDataVal(pRight), len2);
int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE);
tfree(pLeftTerm);
tfree(pRightTerm);
if (ret == 0) { if (ret == 0) {
return 0; return 0;
} else { } else {
@ -271,6 +262,7 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat
c1 = str[j++]; c1 = str[j++];
if (j <= size) { if (j <= size) {
if (c == '\\' && patterStr[i] == '_' && c1 == '_') { i++; continue; }
if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) { if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) {
continue; continue;
} }
@ -367,12 +359,13 @@ int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
SPatternCompareInfo pInfo = {'%', '_'}; SPatternCompareInfo pInfo = {'%', '_'};
assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE); assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE);
wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo); int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo);
free(pattern); free(pattern);
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
} }
@ -517,17 +510,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) {
if (t1->len != t2->len) { if (t1->len != t2->len) {
return t1->len > t2->len? 1:-1; return t1->len > t2->len? 1:-1;
} }
int32_t ret = memcmp((wchar_t*) t1, (wchar_t*) t2, t2->len);
char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char));
char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char));
memcpy(t1_term, t1->data, t1->len);
memcpy(t2_term, t2->data, t2->len);
int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE);
tfree(t1_term);
tfree(t2_term);
if (ret == 0) { if (ret == 0) {
return ret; return ret;
} }

View File

@ -14,23 +14,24 @@
*/ */
#include "tfunctional.h" #include "tfunctional.h"
#include "tarray.h"
tGenericSavedFunc* genericSavedFuncInit(GenericVaFunc func, int numOfArgs) { tGenericSavedFunc* genericSavedFuncInit(GenericVaFunc func, int numOfArgs) {
tGenericSavedFunc* pSavedFunc = malloc(sizeof(tGenericSavedFunc) + numOfArgs * (sizeof(void*))); tGenericSavedFunc* pSavedFunc = malloc(sizeof(tGenericSavedFunc) + numOfArgs * (sizeof(void*)));
if(pSavedFunc == NULL) return NULL;
pSavedFunc->func = func; pSavedFunc->func = func;
return pSavedFunc; return pSavedFunc;
} }
tI32SavedFunc* i32SavedFuncInit(I32VaFunc func, int numOfArgs) { tI32SavedFunc* i32SavedFuncInit(I32VaFunc func, int numOfArgs) {
tI32SavedFunc* pSavedFunc = malloc(sizeof(tI32SavedFunc) + numOfArgs * sizeof(void *)); tI32SavedFunc* pSavedFunc = malloc(sizeof(tI32SavedFunc) + numOfArgs * sizeof(void *));
if(pSavedFunc == NULL) return NULL;
pSavedFunc->func = func; pSavedFunc->func = func;
return pSavedFunc; return pSavedFunc;
} }
tVoidSavedFunc* voidSavedFuncInit(VoidVaFunc func, int numOfArgs) { tVoidSavedFunc* voidSavedFuncInit(VoidVaFunc func, int numOfArgs) {
tVoidSavedFunc* pSavedFunc = malloc(sizeof(tVoidSavedFunc) + numOfArgs * sizeof(void*)); tVoidSavedFunc* pSavedFunc = malloc(sizeof(tVoidSavedFunc) + numOfArgs * sizeof(void*));
if(pSavedFunc == NULL) return NULL;
pSavedFunc->func = func; pSavedFunc->func = func;
return pSavedFunc; return pSavedFunc;
} }

View File

@ -70,7 +70,7 @@ void doubleSkipListTest() {
} }
void randKeyTest() { void randKeyTest() {
SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT), SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT, TSDB_ORDER_ASC),
false, getkey); false, getkey);
int32_t size = 200000; int32_t size = 200000;

View File

@ -540,7 +540,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
pWal->version = pHead->version; pWal->version = pHead->version;
//wInfo("writeFp: %ld", offset); // wInfo("writeFp: %ld", offset);
if (0 != walSMemRowCheck(pHead)) { if (0 != walSMemRowCheck(pHead)) {
wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64,
pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset);

View File

@ -0,0 +1,429 @@
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <errno.h>
#include <signal.h>
#define RECV_MAX_LINE 2048
#define ITEM_MAX_LINE 128
#define REQ_MAX_LINE 2048
#define REQ_CLI_COUNT 100
typedef enum
{
uninited,
connecting,
connected,
datasent
} conn_stat;
typedef enum
{
false,
true
} bool;
typedef unsigned short u16_t;
typedef unsigned int u32_t;
typedef struct
{
int sockfd;
int index;
conn_stat state;
size_t nsent;
size_t nrecv;
size_t nlen;
bool error;
bool success;
struct sockaddr_in serv_addr;
} socket_ctx;
int set_nonblocking(int sockfd)
{
int ret;
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
if (ret == -1) {
printf("failed to fcntl for %d\r\n", sockfd);
return ret;
}
return ret;
}
int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
{
int ret;
if (ip == NULL || port == 0 || pctx == NULL) {
printf("invalid parameter\r\n");
return -1;
}
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (pctx->sockfd == -1) {
printf("failed to create socket\r\n");
return -1;
}
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
pctx->serv_addr.sin_family = AF_INET;
pctx->serv_addr.sin_port = htons(port);
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
if (ret <= 0) {
printf("inet_pton error, ip: %s\r\n", ip);
return -1;
}
ret = set_nonblocking(pctx->sockfd);
if (ret == -1) {
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
return -1;
}
return pctx->sockfd;
}
void close_sockets(socket_ctx *pctx, int cnt)
{
int i;
if (pctx == NULL) {
return;
}
for (i = 0; i < cnt; i++) {
if (pctx[i].sockfd > 0) {
close(pctx[i].sockfd);
pctx[i].sockfd = -1;
}
}
}
int proc_pending_error(socket_ctx *ctx)
{
int ret;
int err;
socklen_t len;
if (ctx == NULL) {
return 0;
}
err = 0;
len = sizeof(int);
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
if (ret == -1) {
err = errno;
}
if (err) {
printf("failed to connect at index: %d\r\n", ctx->index);
close(ctx->sockfd);
ctx->sockfd = -1;
return -1;
}
return 0;
}
void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
{
char req_line[ITEM_MAX_LINE];
char req_host[ITEM_MAX_LINE];
char req_cont_type[ITEM_MAX_LINE];
char req_cont_len[ITEM_MAX_LINE];
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
if (ip == NULL || port == 0 ||
url == NULL || url[0] == '\0' ||
sql == NULL || sql[0] == '\0' ||
req_buf == NULL || len <= 0)
{
return;
}
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
}
int add_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
}
int mod_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
}
int del_event(int epfd, int sockfd)
{
struct epoll_event evs_op;
evs_op.events = 0;
evs_op.data.ptr = NULL;
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
}
int main()
{
int i;
int ret, n, nsent, nrecv;
int epfd;
u32_t events;
char *str;
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
char *ip = "127.0.0.1";
char *url = "/rest/sql";
u16_t port = 6041;
struct epoll_event evs[REQ_CLI_COUNT];
char sql[REQ_MAX_LINE];
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
int count;
signal(SIGPIPE, SIG_IGN);
for (i = 0; i < REQ_CLI_COUNT; i++) {
ctx[i].sockfd = -1;
ctx[i].index = i;
ctx[i].state = uninited;
ctx[i].nsent = 0;
ctx[i].nrecv = 0;
ctx[i].error = false;
ctx[i].success = false;
memset(sql, 0, REQ_MAX_LINE);
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
memset(recv_buf[i], 0, RECV_MAX_LINE);
snprintf(sql, REQ_MAX_LINE, "create database if not exists db%d precision 'us'", i);
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
ctx[i].nlen = strlen(send_buf[i]);
}
epfd = epoll_create(REQ_CLI_COUNT);
if (epfd <= 0) {
printf("failed to create epoll\r\n");
goto failed;
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = create_socket(ip, port, &ctx[i]);
if (ret == -1) {
printf("failed to create socket ar %d\r\n", i);
goto failed;
}
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
events = EPOLLET | EPOLLIN | EPOLLOUT;
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
if (ret == -1) {
printf("failed to add sockfd at %d to epoll\r\n", i);
goto failed;
}
}
count = 0;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
if (ret == -1) {
if (errno != EINPROGRESS) {
printf("connect error, index: %d\r\n", ctx[i].index);
(void) del_event(epfd, ctx[i].sockfd);
close(ctx[i].sockfd);
ctx[i].sockfd = -1;
} else {
ctx[i].state = connecting;
count++;
}
continue;
}
ctx[i].state = connected;
count++;
}
printf("clients: %d\r\n", count);
while (count > 0) {
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
if (n == -1) {
if (errno != EINTR) {
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
break;
}
} else {
for (i = 0; i < n; i++) {
if (evs[i].events & EPOLLERR) {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("event error, index: %d\r\n", pctx->index);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
} else if (evs[i].events & EPOLLIN) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
if (nrecv == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
} else if (nrecv == 0) {
printf("peer closed connection, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
break;
}
pctx->nrecv += nrecv;
if (pctx->nrecv > 12) {
if (pctx->error == false && pctx->success == false) {
str = recv_buf[pctx->index] + 9;
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
pctx->error = true;
} else {
printf("response ok, index: %d\r\n", pctx->index);
pctx->success = true;
}
}
}
}
} else if (evs[i].events & EPOLLOUT) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
if (nsent == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to send, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
}
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
pctx->state = datasent;
events = EPOLLET | EPOLLIN;
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
break;
} else {
pctx->nsent += nsent;
}
}
} else {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
}
}
}
failed:
if (epfd > 0) {
close(epfd);
}
close_sockets(ctx, REQ_CLI_COUNT);
return 0;
}

View File

@ -0,0 +1,433 @@
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <errno.h>
#include <signal.h>
#define RECV_MAX_LINE 2048
#define ITEM_MAX_LINE 128
#define REQ_MAX_LINE 2048
#define REQ_CLI_COUNT 100
typedef enum
{
uninited,
connecting,
connected,
datasent
} conn_stat;
typedef enum
{
false,
true
} bool;
typedef unsigned short u16_t;
typedef unsigned int u32_t;
typedef struct
{
int sockfd;
int index;
conn_stat state;
size_t nsent;
size_t nrecv;
size_t nlen;
bool error;
bool success;
struct sockaddr_in serv_addr;
} socket_ctx;
int set_nonblocking(int sockfd)
{
int ret;
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
if (ret == -1) {
printf("failed to fcntl for %d\r\n", sockfd);
return ret;
}
return ret;
}
int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
{
int ret;
if (ip == NULL || port == 0 || pctx == NULL) {
printf("invalid parameter\r\n");
return -1;
}
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (pctx->sockfd == -1) {
printf("failed to create socket\r\n");
return -1;
}
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
pctx->serv_addr.sin_family = AF_INET;
pctx->serv_addr.sin_port = htons(port);
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
if (ret <= 0) {
printf("inet_pton error, ip: %s\r\n", ip);
return -1;
}
ret = set_nonblocking(pctx->sockfd);
if (ret == -1) {
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
return -1;
}
return pctx->sockfd;
}
void close_sockets(socket_ctx *pctx, int cnt)
{
int i;
if (pctx == NULL) {
return;
}
for (i = 0; i < cnt; i++) {
if (pctx[i].sockfd > 0) {
close(pctx[i].sockfd);
pctx[i].sockfd = -1;
}
}
}
int proc_pending_error(socket_ctx *ctx)
{
int ret;
int err;
socklen_t len;
if (ctx == NULL) {
return 0;
}
err = 0;
len = sizeof(int);
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
if (ret == -1) {
err = errno;
}
if (err) {
printf("failed to connect at index: %d\r\n", ctx->index);
close(ctx->sockfd);
ctx->sockfd = -1;
return -1;
}
return 0;
}
void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
{
char req_line[ITEM_MAX_LINE];
char req_host[ITEM_MAX_LINE];
char req_cont_type[ITEM_MAX_LINE];
char req_cont_len[ITEM_MAX_LINE];
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
if (ip == NULL || port == 0 ||
url == NULL || url[0] == '\0' ||
sql == NULL || sql[0] == '\0' ||
req_buf == NULL || len <= 0)
{
return;
}
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
}
int add_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
}
int mod_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
}
int del_event(int epfd, int sockfd)
{
struct epoll_event evs_op;
evs_op.events = 0;
evs_op.data.ptr = NULL;
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
}
int main()
{
int i;
int ret, n, nsent, nrecv;
int epfd;
u32_t events;
char *str;
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
char *ip = "127.0.0.1";
char *url_prefix = "/rest/sql";
char url[ITEM_MAX_LINE];
u16_t port = 6041;
struct epoll_event evs[REQ_CLI_COUNT];
char sql[REQ_MAX_LINE];
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
int count;
signal(SIGPIPE, SIG_IGN);
for (i = 0; i < REQ_CLI_COUNT; i++) {
ctx[i].sockfd = -1;
ctx[i].index = i;
ctx[i].state = uninited;
ctx[i].nsent = 0;
ctx[i].nrecv = 0;
ctx[i].error = false;
ctx[i].success = false;
memset(url, 0, ITEM_MAX_LINE);
memset(sql, 0, REQ_MAX_LINE);
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
memset(recv_buf[i], 0, RECV_MAX_LINE);
snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
snprintf(sql, REQ_MAX_LINE, "create table if not exists tb%d (ts timestamp, index int, val binary(40))", i);
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
ctx[i].nlen = strlen(send_buf[i]);
}
epfd = epoll_create(REQ_CLI_COUNT);
if (epfd <= 0) {
printf("failed to create epoll\r\n");
goto failed;
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = create_socket(ip, port, &ctx[i]);
if (ret == -1) {
printf("failed to create socket, index: %d\r\n", i);
goto failed;
}
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
events = EPOLLET | EPOLLIN | EPOLLOUT;
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
if (ret == -1) {
printf("failed to add sockfd to epoll, index: %d\r\n", i);
goto failed;
}
}
count = 0;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
if (ret == -1) {
if (errno != EINPROGRESS) {
printf("connect error, index: %d\r\n", ctx[i].index);
(void) del_event(epfd, ctx[i].sockfd);
close(ctx[i].sockfd);
ctx[i].sockfd = -1;
} else {
ctx[i].state = connecting;
count++;
}
continue;
}
ctx[i].state = connected;
count++;
}
printf("clients: %d\r\n", count);
while (count > 0) {
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
if (n == -1) {
if (errno != EINTR) {
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
break;
}
} else {
for (i = 0; i < n; i++) {
if (evs[i].events & EPOLLERR) {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("event error, index: %d\r\n", pctx->index);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
} else if (evs[i].events & EPOLLIN) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
if (nrecv == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
} else if (nrecv == 0) {
printf("peer closed connection, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
break;
}
pctx->nrecv += nrecv;
if (pctx->nrecv > 12) {
if (pctx->error == false && pctx->success == false) {
str = recv_buf[pctx->index] + 9;
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
pctx->error = true;
} else {
printf("response ok, index: %d\r\n", pctx->index);
pctx->success = true;
}
}
}
}
} else if (evs[i].events & EPOLLOUT) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
if (nsent == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to send, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
}
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
pctx->state = datasent;
events = EPOLLET | EPOLLIN;
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
break;
} else {
pctx->nsent += nsent;
}
}
} else {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
}
}
}
failed:
if (epfd > 0) {
close(epfd);
}
close_sockets(ctx, REQ_CLI_COUNT);
return 0;
}

View File

@ -0,0 +1,433 @@
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <errno.h>
#include <signal.h>
#define RECV_MAX_LINE 2048
#define ITEM_MAX_LINE 128
#define REQ_MAX_LINE 2048
#define REQ_CLI_COUNT 100
typedef enum
{
uninited,
connecting,
connected,
datasent
} conn_stat;
typedef enum
{
false,
true
} bool;
typedef unsigned short u16_t;
typedef unsigned int u32_t;
typedef struct
{
int sockfd;
int index;
conn_stat state;
size_t nsent;
size_t nrecv;
size_t nlen;
bool error;
bool success;
struct sockaddr_in serv_addr;
} socket_ctx;
int set_nonblocking(int sockfd)
{
int ret;
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
if (ret == -1) {
printf("failed to fcntl for %d\r\n", sockfd);
return ret;
}
return ret;
}
int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
{
int ret;
if (ip == NULL || port == 0 || pctx == NULL) {
printf("invalid parameter\r\n");
return -1;
}
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (pctx->sockfd == -1) {
printf("failed to create socket\r\n");
return -1;
}
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
pctx->serv_addr.sin_family = AF_INET;
pctx->serv_addr.sin_port = htons(port);
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
if (ret <= 0) {
printf("inet_pton error, ip: %s\r\n", ip);
return -1;
}
ret = set_nonblocking(pctx->sockfd);
if (ret == -1) {
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
return -1;
}
return pctx->sockfd;
}
void close_sockets(socket_ctx *pctx, int cnt)
{
int i;
if (pctx == NULL) {
return;
}
for (i = 0; i < cnt; i++) {
if (pctx[i].sockfd > 0) {
close(pctx[i].sockfd);
pctx[i].sockfd = -1;
}
}
}
int proc_pending_error(socket_ctx *ctx)
{
int ret;
int err;
socklen_t len;
if (ctx == NULL) {
return 0;
}
err = 0;
len = sizeof(int);
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
if (ret == -1) {
err = errno;
}
if (err) {
printf("failed to connect at index: %d\r\n", ctx->index);
close(ctx->sockfd);
ctx->sockfd = -1;
return -1;
}
return 0;
}
void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
{
char req_line[ITEM_MAX_LINE];
char req_host[ITEM_MAX_LINE];
char req_cont_type[ITEM_MAX_LINE];
char req_cont_len[ITEM_MAX_LINE];
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
if (ip == NULL || port == 0 ||
url == NULL || url[0] == '\0' ||
sql == NULL || sql[0] == '\0' ||
req_buf == NULL || len <= 0)
{
return;
}
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
}
int add_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
}
int mod_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
}
int del_event(int epfd, int sockfd)
{
struct epoll_event evs_op;
evs_op.events = 0;
evs_op.data.ptr = NULL;
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
}
int main()
{
int i;
int ret, n, nsent, nrecv;
int epfd;
u32_t events;
char *str;
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
char *ip = "127.0.0.1";
char *url_prefix = "/rest/sql";
char url[ITEM_MAX_LINE];
u16_t port = 6041;
struct epoll_event evs[REQ_CLI_COUNT];
char sql[REQ_MAX_LINE];
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
int count;
signal(SIGPIPE, SIG_IGN);
for (i = 0; i < REQ_CLI_COUNT; i++) {
ctx[i].sockfd = -1;
ctx[i].index = i;
ctx[i].state = uninited;
ctx[i].nsent = 0;
ctx[i].nrecv = 0;
ctx[i].error = false;
ctx[i].success = false;
memset(url, 0, ITEM_MAX_LINE);
memset(sql, 0, REQ_MAX_LINE);
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
memset(recv_buf[i], 0, RECV_MAX_LINE);
snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
snprintf(sql, REQ_MAX_LINE, "drop database if exists db%d", i);
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
ctx[i].nlen = strlen(send_buf[i]);
}
epfd = epoll_create(REQ_CLI_COUNT);
if (epfd <= 0) {
printf("failed to create epoll\r\n");
goto failed;
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = create_socket(ip, port, &ctx[i]);
if (ret == -1) {
printf("failed to create socket, index: %d\r\n", i);
goto failed;
}
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
events = EPOLLET | EPOLLIN | EPOLLOUT;
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
if (ret == -1) {
printf("failed to add sockfd to epoll, index: %d\r\n", i);
goto failed;
}
}
count = 0;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
if (ret == -1) {
if (errno != EINPROGRESS) {
printf("connect error, index: %d\r\n", ctx[i].index);
(void) del_event(epfd, ctx[i].sockfd);
close(ctx[i].sockfd);
ctx[i].sockfd = -1;
} else {
ctx[i].state = connecting;
count++;
}
continue;
}
ctx[i].state = connected;
count++;
}
printf("clients: %d\r\n", count);
while (count > 0) {
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
if (n == -1) {
if (errno != EINTR) {
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
break;
}
} else {
for (i = 0; i < n; i++) {
if (evs[i].events & EPOLLERR) {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("event error, index: %d\r\n", pctx->index);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
} else if (evs[i].events & EPOLLIN) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
if (nrecv == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
} else if (nrecv == 0) {
printf("peer closed connection, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
break;
}
pctx->nrecv += nrecv;
if (pctx->nrecv > 12) {
if (pctx->error == false && pctx->success == false) {
str = recv_buf[pctx->index] + 9;
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
pctx->error = true;
} else {
printf("response ok, index: %d\r\n", pctx->index);
pctx->success = true;
}
}
}
}
} else if (evs[i].events & EPOLLOUT) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
if (nsent == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to send, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
}
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
pctx->state = datasent;
events = EPOLLET | EPOLLIN;
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
break;
} else {
pctx->nsent += nsent;
}
}
} else {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
}
}
}
failed:
if (epfd > 0) {
close(epfd);
}
close_sockets(ctx, REQ_CLI_COUNT);
return 0;
}

View File

@ -0,0 +1,455 @@
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <inttypes.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <sys/time.h>
#include <errno.h>
#include <signal.h>
#define RECV_MAX_LINE 2048
#define ITEM_MAX_LINE 128
#define REQ_MAX_LINE 4096
#define REQ_CLI_COUNT 100
typedef enum
{
uninited,
connecting,
connected,
datasent
} conn_stat;
typedef enum
{
false,
true
} bool;
typedef struct
{
int sockfd;
int index;
conn_stat state;
size_t nsent;
size_t nrecv;
size_t nlen;
bool error;
bool success;
struct sockaddr_in serv_addr;
} socket_ctx;
int set_nonblocking(int sockfd)
{
int ret;
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
if (ret == -1) {
printf("failed to fcntl for %d\r\n", sockfd);
return ret;
}
return ret;
}
int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx)
{
int ret;
if (ip == NULL || port == 0 || pctx == NULL) {
printf("invalid parameter\r\n");
return -1;
}
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (pctx->sockfd == -1) {
printf("failed to create socket\r\n");
return -1;
}
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
pctx->serv_addr.sin_family = AF_INET;
pctx->serv_addr.sin_port = htons(port);
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
if (ret <= 0) {
printf("inet_pton error, ip: %s\r\n", ip);
return -1;
}
ret = set_nonblocking(pctx->sockfd);
if (ret == -1) {
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
return -1;
}
return pctx->sockfd;
}
void close_sockets(socket_ctx *pctx, int cnt)
{
int i;
if (pctx == NULL) {
return;
}
for (i = 0; i < cnt; i++) {
if (pctx[i].sockfd > 0) {
close(pctx[i].sockfd);
pctx[i].sockfd = -1;
}
}
}
int proc_pending_error(socket_ctx *ctx)
{
int ret;
int err;
socklen_t len;
if (ctx == NULL) {
return 0;
}
err = 0;
len = sizeof(int);
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
if (ret == -1) {
err = errno;
}
if (err) {
printf("failed to connect at index: %d\r\n", ctx->index);
close(ctx->sockfd);
ctx->sockfd = -1;
return -1;
}
return 0;
}
void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len)
{
char req_line[ITEM_MAX_LINE];
char req_host[ITEM_MAX_LINE];
char req_cont_type[ITEM_MAX_LINE];
char req_cont_len[ITEM_MAX_LINE];
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
if (ip == NULL || port == 0 ||
url == NULL || url[0] == '\0' ||
sql == NULL || sql[0] == '\0' ||
req_buf == NULL || len <= 0)
{
return;
}
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
}
int add_event(int epfd, int sockfd, uint32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
}
int mod_event(int epfd, int sockfd, uint32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
}
int del_event(int epfd, int sockfd)
{
struct epoll_event evs_op;
evs_op.events = 0;
evs_op.data.ptr = NULL;
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
}
int main()
{
int i;
int ret, n, nsent, nrecv, offset;
int epfd;
uint32_t events;
char *str;
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
char *ip = "127.0.0.1";
char *url_prefix = "/rest/sql";
char url[ITEM_MAX_LINE];
uint16_t port = 6041;
struct epoll_event evs[REQ_CLI_COUNT];
struct timeval now;
int64_t start_time;
char sql[REQ_MAX_LINE];
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
int count;
signal(SIGPIPE, SIG_IGN);
gettimeofday(&now, NULL);
start_time = now.tv_sec * 1000000 + now.tv_usec;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ctx[i].sockfd = -1;
ctx[i].index = i;
ctx[i].state = uninited;
ctx[i].nsent = 0;
ctx[i].nrecv = 0;
ctx[i].error = false;
ctx[i].success = false;
memset(url, 0, ITEM_MAX_LINE);
memset(sql, 0, REQ_MAX_LINE);
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
memset(recv_buf[i], 0, RECV_MAX_LINE);
snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
offset = 0;
ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "insert into tb%d values ", i);
if (ret <= 0) {
printf("failed to snprintf for sql(prefix), index: %d\r\n ", i);
goto failed;
}
offset += ret;
while (offset < REQ_MAX_LINE - 128) {
ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "(%"PRId64", %d, 'test_string_%d') ", start_time + i, i, i);
if (ret <= 0) {
printf("failed to snprintf for sql(values), index: %d\r\n ", i);
goto failed;
}
offset += ret;
}
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
ctx[i].nlen = strlen(send_buf[i]);
}
epfd = epoll_create(REQ_CLI_COUNT);
if (epfd <= 0) {
printf("failed to create epoll\r\n");
goto failed;
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = create_socket(ip, port, &ctx[i]);
if (ret == -1) {
printf("failed to create socket, index: %d\r\n", i);
goto failed;
}
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
events = EPOLLET | EPOLLIN | EPOLLOUT;
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
if (ret == -1) {
printf("failed to add sockfd to epoll, index: %d\r\n", i);
goto failed;
}
}
count = 0;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
if (ret == -1) {
if (errno != EINPROGRESS) {
printf("connect error, index: %d\r\n", ctx[i].index);
(void) del_event(epfd, ctx[i].sockfd);
close(ctx[i].sockfd);
ctx[i].sockfd = -1;
} else {
ctx[i].state = connecting;
count++;
}
continue;
}
ctx[i].state = connected;
count++;
}
printf("clients: %d\r\n", count);
while (count > 0) {
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
if (n == -1) {
if (errno != EINTR) {
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
break;
}
} else {
for (i = 0; i < n; i++) {
if (evs[i].events & EPOLLERR) {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("event error, index: %d\r\n", pctx->index);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
} else if (evs[i].events & EPOLLIN) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
if (nrecv == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
} else if (nrecv == 0) {
printf("peer closed connection, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
break;
}
pctx->nrecv += nrecv;
if (pctx->nrecv > 12) {
if (pctx->error == false && pctx->success == false) {
str = recv_buf[pctx->index] + 9;
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
pctx->error = true;
} else {
printf("response ok, index: %d\r\n", pctx->index);
pctx->success = true;
}
}
}
}
} else if (evs[i].events & EPOLLOUT) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
if (nsent == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to send, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
}
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
pctx->state = datasent;
events = EPOLLET | EPOLLIN;
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
break;
} else {
pctx->nsent += nsent;
}
}
} else {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
}
}
}
failed:
if (epfd > 0) {
close(epfd);
}
close_sockets(ctx, REQ_CLI_COUNT);
return 0;
}

View File

@ -0,0 +1,432 @@
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <inttypes.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <sys/time.h>
#include <errno.h>
#include <signal.h>
#define RECV_MAX_LINE 2048
#define ITEM_MAX_LINE 128
#define REQ_MAX_LINE 4096
#define REQ_CLI_COUNT 100
typedef enum
{
uninited,
connecting,
connected,
datasent
} conn_stat;
typedef enum
{
false,
true
} bool;
typedef struct
{
int sockfd;
int index;
conn_stat state;
size_t nsent;
size_t nrecv;
size_t nlen;
bool error;
bool success;
struct sockaddr_in serv_addr;
} socket_ctx;
int set_nonblocking(int sockfd)
{
int ret;
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
if (ret == -1) {
printf("failed to fcntl for %d\r\n", sockfd);
return ret;
}
return ret;
}
int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx)
{
int ret;
if (ip == NULL || port == 0 || pctx == NULL) {
printf("invalid parameter\r\n");
return -1;
}
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (pctx->sockfd == -1) {
printf("failed to create socket\r\n");
return -1;
}
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
pctx->serv_addr.sin_family = AF_INET;
pctx->serv_addr.sin_port = htons(port);
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
if (ret <= 0) {
printf("inet_pton error, ip: %s\r\n", ip);
return -1;
}
ret = set_nonblocking(pctx->sockfd);
if (ret == -1) {
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
return -1;
}
return pctx->sockfd;
}
void close_sockets(socket_ctx *pctx, int cnt)
{
int i;
if (pctx == NULL) {
return;
}
for (i = 0; i < cnt; i++) {
if (pctx[i].sockfd > 0) {
close(pctx[i].sockfd);
pctx[i].sockfd = -1;
}
}
}
int proc_pending_error(socket_ctx *ctx)
{
int ret;
int err;
socklen_t len;
if (ctx == NULL) {
return 0;
}
err = 0;
len = sizeof(int);
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
if (ret == -1) {
err = errno;
}
if (err) {
printf("failed to connect at index: %d\r\n", ctx->index);
close(ctx->sockfd);
ctx->sockfd = -1;
return -1;
}
return 0;
}
void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len)
{
char req_line[ITEM_MAX_LINE];
char req_host[ITEM_MAX_LINE];
char req_cont_type[ITEM_MAX_LINE];
char req_cont_len[ITEM_MAX_LINE];
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
if (ip == NULL || port == 0 ||
url == NULL || url[0] == '\0' ||
sql == NULL || sql[0] == '\0' ||
req_buf == NULL || len <= 0)
{
return;
}
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
}
int add_event(int epfd, int sockfd, uint32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
}
int mod_event(int epfd, int sockfd, uint32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
}
int del_event(int epfd, int sockfd)
{
struct epoll_event evs_op;
evs_op.events = 0;
evs_op.data.ptr = NULL;
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
}
int main()
{
int i;
int ret, n, nsent, nrecv;
int epfd;
uint32_t events;
char *str;
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
char *ip = "127.0.0.1";
char *url_prefix = "/rest/sql";
char url[ITEM_MAX_LINE];
uint16_t port = 6041;
struct epoll_event evs[REQ_CLI_COUNT];
char sql[REQ_MAX_LINE];
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
int count;
signal(SIGPIPE, SIG_IGN);
for (i = 0; i < REQ_CLI_COUNT; i++) {
ctx[i].sockfd = -1;
ctx[i].index = i;
ctx[i].state = uninited;
ctx[i].nsent = 0;
ctx[i].nrecv = 0;
ctx[i].error = false;
ctx[i].success = false;
memset(url, 0, ITEM_MAX_LINE);
memset(sql, 0, REQ_MAX_LINE);
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
memset(recv_buf[i], 0, RECV_MAX_LINE);
snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
snprintf(sql, REQ_MAX_LINE, "select count(*) from tb%d", i);
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
ctx[i].nlen = strlen(send_buf[i]);
}
epfd = epoll_create(REQ_CLI_COUNT);
if (epfd <= 0) {
printf("failed to create epoll\r\n");
goto failed;
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = create_socket(ip, port, &ctx[i]);
if (ret == -1) {
printf("failed to create socket, index: %d\r\n", i);
goto failed;
}
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
events = EPOLLET | EPOLLIN | EPOLLOUT;
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
if (ret == -1) {
printf("failed to add sockfd to epoll, index: %d\r\n", i);
goto failed;
}
}
count = 0;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
if (ret == -1) {
if (errno != EINPROGRESS) {
printf("connect error, index: %d\r\n", ctx[i].index);
(void) del_event(epfd, ctx[i].sockfd);
close(ctx[i].sockfd);
ctx[i].sockfd = -1;
} else {
ctx[i].state = connecting;
count++;
}
continue;
}
ctx[i].state = connected;
count++;
}
printf("clients: %d\r\n", count);
while (count > 0) {
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 2);
if (n == -1) {
if (errno != EINTR) {
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
break;
}
} else {
for (i = 0; i < n; i++) {
if (evs[i].events & EPOLLERR) {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("event error, index: %d\r\n", pctx->index);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
} else if (evs[i].events & EPOLLIN) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
if (nrecv == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
} else if (nrecv == 0) {
printf("peer closed connection, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
break;
}
pctx->nrecv += nrecv;
if (pctx->nrecv > 12) {
if (pctx->error == false && pctx->success == false) {
str = recv_buf[pctx->index] + 9;
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
pctx->error = true;
} else {
printf("response ok, index: %d\r\n", pctx->index);
pctx->success = true;
}
}
}
}
} else if (evs[i].events & EPOLLOUT) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
if (nsent == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to send, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
}
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
pctx->state = datasent;
events = EPOLLET | EPOLLIN;
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
break;
} else {
pctx->nsent += nsent;
}
}
} else {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
}
}
}
failed:
if (epfd > 0) {
close(epfd);
}
close_sockets(ctx, REQ_CLI_COUNT);
return 0;
}

View File

@ -0,0 +1,430 @@
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <stdlib.h>
#include <sys/epoll.h>
#include <errno.h>
#include <signal.h>
#define RECV_MAX_LINE 2048
#define ITEM_MAX_LINE 128
#define REQ_MAX_LINE 2048
#define REQ_CLI_COUNT 100
typedef enum
{
uninited,
connecting,
connected,
datasent
} conn_stat;
typedef enum
{
false,
true
} bool;
typedef unsigned short u16_t;
typedef unsigned int u32_t;
typedef struct
{
int sockfd;
int index;
conn_stat state;
size_t nsent;
size_t nrecv;
size_t nlen;
bool error;
bool success;
struct sockaddr_in serv_addr;
} socket_ctx;
int set_nonblocking(int sockfd)
{
int ret;
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
if (ret == -1) {
printf("failed to fcntl for %d\r\n", sockfd);
return ret;
}
return ret;
}
int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
{
int ret;
if (ip == NULL || port == 0 || pctx == NULL) {
printf("invalid parameter\r\n");
return -1;
}
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (pctx->sockfd == -1) {
printf("failed to create socket\r\n");
return -1;
}
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
pctx->serv_addr.sin_family = AF_INET;
pctx->serv_addr.sin_port = htons(port);
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
if (ret <= 0) {
printf("inet_pton error, ip: %s\r\n", ip);
return -1;
}
ret = set_nonblocking(pctx->sockfd);
if (ret == -1) {
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
return -1;
}
return pctx->sockfd;
}
void close_sockets(socket_ctx *pctx, int cnt)
{
int i;
if (pctx == NULL) {
return;
}
for (i = 0; i < cnt; i++) {
if (pctx[i].sockfd > 0) {
close(pctx[i].sockfd);
pctx[i].sockfd = -1;
}
}
}
int proc_pending_error(socket_ctx *ctx)
{
int ret;
int err;
socklen_t len;
if (ctx == NULL) {
return 0;
}
err = 0;
len = sizeof(int);
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
if (ret == -1) {
err = errno;
}
if (err) {
printf("failed to connect at index: %d\r\n", ctx->index);
close(ctx->sockfd);
ctx->sockfd = -1;
return -1;
}
return 0;
}
void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
{
char req_line[ITEM_MAX_LINE];
char req_host[ITEM_MAX_LINE];
char req_cont_type[ITEM_MAX_LINE];
char req_cont_len[ITEM_MAX_LINE];
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
if (ip == NULL || port == 0 ||
url == NULL || url[0] == '\0' ||
sql == NULL || sql[0] == '\0' ||
req_buf == NULL || len <= 0)
{
return;
}
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
}
int add_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
}
int mod_event(int epfd, int sockfd, u32_t events, void *data)
{
struct epoll_event evs_op;
evs_op.data.ptr = data;
evs_op.events = events;
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
}
int del_event(int epfd, int sockfd)
{
struct epoll_event evs_op;
evs_op.events = 0;
evs_op.data.ptr = NULL;
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
}
int main()
{
int i;
int ret, n, nsent, nrecv;
int epfd;
u32_t events;
char *str;
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
char *ip = "127.0.0.1";
char *url = "/rest/sql";
u16_t port = 6041;
struct epoll_event evs[REQ_CLI_COUNT];
char sql[REQ_MAX_LINE];
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
int count;
signal(SIGPIPE, SIG_IGN);
for (i = 0; i < REQ_CLI_COUNT; i++) {
ctx[i].sockfd = -1;
ctx[i].index = i;
ctx[i].state = uninited;
ctx[i].nsent = 0;
ctx[i].nrecv = 0;
ctx[i].error = false;
ctx[i].success = false;
memset(sql, 0, REQ_MAX_LINE);
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
memset(recv_buf[i], 0, RECV_MAX_LINE);
snprintf(sql, REQ_MAX_LINE, "use db%d", i);
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
ctx[i].nlen = strlen(send_buf[i]);
}
epfd = epoll_create(REQ_CLI_COUNT);
if (epfd <= 0) {
printf("failed to create epoll\r\n");
goto failed;
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = create_socket(ip, port, &ctx[i]);
if (ret == -1) {
printf("failed to create socket, index: %d\r\n", i);
goto failed;
}
}
for (i = 0; i < REQ_CLI_COUNT; i++) {
events = EPOLLET | EPOLLIN | EPOLLOUT;
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
if (ret == -1) {
printf("failed to add sockfd to epoll, index: %d\r\n", i);
goto failed;
}
}
count = 0;
for (i = 0; i < REQ_CLI_COUNT; i++) {
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
if (ret == -1) {
if (errno != EINPROGRESS) {
printf("connect error, index: %d\r\n", ctx[i].index);
(void) del_event(epfd, ctx[i].sockfd);
close(ctx[i].sockfd);
ctx[i].sockfd = -1;
} else {
ctx[i].state = connecting;
count++;
}
continue;
}
ctx[i].state = connected;
count++;
}
printf("clients: %d\r\n", count);
while (count > 0) {
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 2);
if (n == -1) {
if (errno != EINTR) {
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
break;
}
} else {
for (i = 0; i < n; i++) {
if (evs[i].events & EPOLLERR) {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("event error, index: %d\r\n", pctx->index);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
} else if (evs[i].events & EPOLLIN) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
if (nrecv == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
} else if (nrecv == 0) {
printf("peer closed connection, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
break;
}
pctx->nrecv += nrecv;
if (pctx->nrecv > 12) {
if (pctx->error == false && pctx->success == false) {
str = recv_buf[pctx->index] + 9;
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
pctx->error = true;
} else {
printf("response ok, index: %d\r\n", pctx->index);
pctx->success = true;
}
}
}
}
} else if (evs[i].events & EPOLLOUT) {
pctx = (socket_ctx *) evs[i].data.ptr;
if (pctx->state == connecting) {
ret = proc_pending_error(pctx);
if (ret == 0) {
printf("client connected, index: %d\r\n", pctx->index);
pctx->state = connected;
} else {
printf("client connect failed, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
continue;
}
}
for ( ;; ) {
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
if (nsent == -1) {
if (errno != EAGAIN && errno != EINTR) {
printf("failed to send, index: %d\r\n", pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
break;
}
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
pctx->state = datasent;
events = EPOLLET | EPOLLIN;
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
break;
} else {
pctx->nsent += nsent;
}
}
} else {
pctx = (socket_ctx *) evs[i].data.ptr;
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
(void) del_event(epfd, pctx->sockfd);
close(pctx->sockfd);
pctx->sockfd = -1;
count--;
}
}
}
}
failed:
if (epfd > 0) {
close(epfd);
}
close_sockets(ctx, REQ_CLI_COUNT);
return 0;
}

View File

@ -102,6 +102,20 @@ class TDTestCase:
print("check2: i=%d colIdx=%d" % (i, colIdx)) print("check2: i=%d colIdx=%d" % (i, colIdx))
tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3)) tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3))
def alter_table_255_times(self): # add case for TD-6207
for i in range(255):
tdLog.info("alter table st add column cb%d int"%i)
tdSql.execute("alter table st add column cb%d int"%i)
tdSql.execute("insert into t0 (ts,c1) values(now,1)")
tdSql.execute("reset query cache")
tdSql.query("select * from st")
tdSql.execute("create table mt(ts timestamp, i int)")
tdSql.execute("insert into mt values(now,11)")
tdSql.query("select * from mt")
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.query("describe db.st")
def run(self): def run(self):
# Setup params # Setup params
db = "db" db = "db"
@ -131,12 +145,14 @@ class TDTestCase:
tdSql.checkData(0, i, self.rowNum * (size - i)) tdSql.checkData(0, i, self.rowNum * (size - i))
tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float)") tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float,t2 int,t3 double)")
tdSql.execute("create table t0 using st tags(null)") tdSql.execute("create table t0 using st tags(null,1,2.3)")
tdSql.execute("alter table t0 set tag t1=2.1") tdSql.execute("alter table t0 set tag t1=2.1")
tdSql.query("show tables") tdSql.query("show tables")
tdSql.checkRows(2) tdSql.checkRows(2)
self.alter_table_255_times()
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -175,12 +175,62 @@ class ConcurrentInquiry:
def con_group(self,tlist,col_list,tag_list): def con_group(self,tlist,col_list,tag_list):
rand_tag = random.randint(0,5) rand_tag = random.randint(0,5)
rand_col = random.randint(0,1) rand_col = random.randint(0,1)
if len(tag_list):
return 'group by '+','.join(random.sample(col_list,rand_col) + random.sample(tag_list,rand_tag)) return 'group by '+','.join(random.sample(col_list,rand_col) + random.sample(tag_list,rand_tag))
else:
return 'group by '+','.join(random.sample(col_list,rand_col))
def con_order(self,tlist,col_list,tag_list): def con_order(self,tlist,col_list,tag_list):
return 'order by '+random.choice(tlist) return 'order by '+random.choice(tlist)
def gen_query_sql(self): #生成查询语句 def gen_subquery_sql(self):
subsql ,col_num = self.gen_query_sql(1)
if col_num == 0:
return 0
col_list=[]
tag_list=[]
for i in range(col_num):
col_list.append("taosd%d"%i)
tlist=col_list+['abc'] #增加不存在的域'abc'是否会引起新bug
con_rand=random.randint(0,len(condition_list))
func_rand=random.randint(0,len(func_list))
col_rand=random.randint(0,len(col_list))
t_rand=random.randint(0,len(tlist))
sql='select ' #select
random.shuffle(col_list)
random.shuffle(func_list)
sel_col_list=[]
col_rand=random.randint(0,len(col_list))
loop = 0
for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数
alias = ' as '+ 'sub%d ' % loop
loop += 1
pick_func = ''
if j == 'leastsquares':
pick_func=j+'('+i+',1,1)'
elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile':
pick_func=j+'('+i+',1)'
else:
pick_func=j+'('+i+')'
if bool(random.getrandbits(1)) :
pick_func+=alias
sel_col_list.append(pick_func)
if col_rand == 0:
sql = sql + '*'
else:
sql=sql+','.join(sel_col_list) #select col & func
sql = sql + ' from ('+ subsql +') '
con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill]
sel_con=random.sample(con_func,random.randint(0,len(con_func)))
sel_con_list=[]
for i in sel_con:
sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数
sql+=' '.join(sel_con_list) # condition
#print(sql)
return sql
def gen_query_sql(self,subquery=0): #生成查询语句
tbi=random.randint(0,len(self.subtb_list)+len(self.stb_list)) #随机决定查询哪张表 tbi=random.randint(0,len(self.subtb_list)+len(self.stb_list)) #随机决定查询哪张表
tbname='' tbname=''
col_list=[] col_list=[]
@ -218,10 +268,10 @@ class ConcurrentInquiry:
pick_func=j+'('+i+',1)' pick_func=j+'('+i+',1)'
else: else:
pick_func=j+'('+i+')' pick_func=j+'('+i+')'
if bool(random.getrandbits(1)): if bool(random.getrandbits(1)) | subquery :
pick_func+=alias pick_func+=alias
sel_col_list.append(pick_func) sel_col_list.append(pick_func)
if col_rand == 0: if col_rand == 0 & subquery :
sql = sql + '*' sql = sql + '*'
else: else:
sql=sql+','.join(sel_col_list) #select col & func sql=sql+','.join(sel_col_list) #select col & func
@ -238,7 +288,7 @@ class ConcurrentInquiry:
sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数 sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数
sql+=' '.join(sel_con_list) # condition sql+=' '.join(sel_con_list) # condition
#print(sql) #print(sql)
return sql return (sql,loop)
def gen_query_join(self): #生成join查询语句 def gen_query_join(self): #生成join查询语句
tbname = [] tbname = []
@ -429,9 +479,12 @@ class ConcurrentInquiry:
try: try:
if self.random_pick(): if self.random_pick():
sql=self.gen_query_sql() if self.random_pick():
sql,temp=self.gen_query_sql()
else: else:
sql=self.gen_query_join() sql = self.gen_subquery_sql()
else:
sql = self.gen_query_join()
print("sql is ",sql) print("sql is ",sql)
fo.write(sql+'\n') fo.write(sql+'\n')
start = time.time() start = time.time()
@ -496,9 +549,12 @@ class ConcurrentInquiry:
while loop: while loop:
try: try:
if self.random_pick(): if self.random_pick():
sql=self.gen_query_sql() if self.random_pick():
sql,temp=self.gen_query_sql()
else: else:
sql=self.gen_query_join() sql = self.gen_subquery_sql()
else:
sql = self.gen_query_join()
print("sql is ",sql) print("sql is ",sql)
fo.write(sql+'\n') fo.write(sql+'\n')
start = time.time() start = time.time()

View File

@ -80,6 +80,7 @@ python3 ./test.py -f tag_lite/set.py
python3 ./test.py -f tag_lite/smallint.py python3 ./test.py -f tag_lite/smallint.py
python3 ./test.py -f tag_lite/tinyint.py python3 ./test.py -f tag_lite/tinyint.py
python3 ./test.py -f tag_lite/timestamp.py python3 ./test.py -f tag_lite/timestamp.py
python3 ./test.py -f tag_lite/TestModifyTag.py
#python3 ./test.py -f dbmgmt/database-name-boundary.py #python3 ./test.py -f dbmgmt/database-name-boundary.py
python3 test.py -f dbmgmt/nanoSecondCheck.py python3 test.py -f dbmgmt/nanoSecondCheck.py
@ -382,6 +383,8 @@ python3 test.py -f alter/alter_create_exception.py
python3 ./test.py -f insert/flushwhiledrop.py python3 ./test.py -f insert/flushwhiledrop.py
python3 ./test.py -f insert/schemalessInsert.py python3 ./test.py -f insert/schemalessInsert.py
python3 ./test.py -f alter/alterColMultiTimes.py python3 ./test.py -f alter/alterColMultiTimes.py
python3 ./test.py -f query/queryWildcardLength.py
python3 ./test.py -f query/queryTbnameUpperLower.py
#======================p4-end=============== #======================p4-end===============

Some files were not shown because too many files have changed in this diff Show More