Merge branch 'develop' into feature/szhou/tagregex
This commit is contained in:
commit
85ddd76db2
|
@ -23,6 +23,7 @@ steps:
|
|||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: test_arm64_bionic
|
||||
|
@ -150,6 +151,7 @@ steps:
|
|||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: build_trusty
|
||||
|
@ -176,6 +178,7 @@ steps:
|
|||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: build_xenial
|
||||
|
@ -201,7 +204,7 @@ steps:
|
|||
branch:
|
||||
- develop
|
||||
- master
|
||||
|
||||
- 2.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: build_bionic
|
||||
|
@ -226,6 +229,7 @@ steps:
|
|||
branch:
|
||||
- develop
|
||||
- master
|
||||
- 2.0
|
||||
---
|
||||
kind: pipeline
|
||||
name: build_centos7
|
||||
|
@ -249,4 +253,4 @@ steps:
|
|||
branch:
|
||||
- develop
|
||||
- master
|
||||
|
||||
- 2.0
|
|
@ -265,12 +265,12 @@ pipeline {
|
|||
}
|
||||
}
|
||||
timeout(time: 60, unit: 'MINUTES'){
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
rm -rf /var/lib/taos/*
|
||||
rm -rf /var/log/taos/*
|
||||
./handle_crash_gen_val_log.sh
|
||||
'''
|
||||
// sh '''
|
||||
// cd ${WKC}/tests/pytest
|
||||
// rm -rf /var/lib/taos/*
|
||||
// rm -rf /var/log/taos/*
|
||||
// ./handle_crash_gen_val_log.sh
|
||||
// '''
|
||||
sh '''
|
||||
cd ${WKC}/tests/pytest
|
||||
rm -rf /var/lib/taos/*
|
||||
|
|
|
@ -4,7 +4,7 @@ PROJECT(TDengine)
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "2.1.6.0")
|
||||
SET(TD_VER_NUMBER "2.1.7.2")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -98,6 +98,7 @@ int ConvertString(char *buf, size_t nBytes, UINT cpFrom, UINT cpTo, LPCSTR lpDef
|
|||
char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefaultChar) {
|
||||
int nBytes;
|
||||
char *pBuf;
|
||||
char *pBuf1;
|
||||
nBytes = 4 * ((int)lstrlen(string) + 1); /* Worst case for the size needed */
|
||||
pBuf = (char *)malloc(nBytes);
|
||||
if (!pBuf) {
|
||||
|
@ -110,8 +111,9 @@ char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefault
|
|||
free(pBuf);
|
||||
return NULL;
|
||||
}
|
||||
pBuf = realloc(pBuf, nBytes+1);
|
||||
return pBuf;
|
||||
pBuf1 = realloc(pBuf, nBytes+1);
|
||||
if(pBuf1 == NULL && pBuf != NULL) free(pBuf);
|
||||
return pBuf1;
|
||||
}
|
||||
|
||||
int CountCharacters(const char *string, UINT cp) {
|
||||
|
|
|
@ -68,6 +68,7 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
|
|||
int iString = FALSE; /* TRUE = string mode; FALSE = non-string mode */
|
||||
int nBackslash = 0;
|
||||
char **ppszArg;
|
||||
char **ppszArg1;
|
||||
int iArg = FALSE; /* TRUE = inside an argument; FALSE = between arguments */
|
||||
|
||||
ppszArg = (char **)malloc((argc+1)*sizeof(char *));
|
||||
|
@ -89,7 +90,10 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) {
|
|||
if ((!iArg) && (c != ' ') && (c != '\t')) { /* Beginning of a new argument */
|
||||
iArg = TRUE;
|
||||
ppszArg[argc++] = pszCopy+j;
|
||||
ppszArg = (char **)realloc(ppszArg, (argc+1)*sizeof(char *));
|
||||
ppszArg1 = (char **)realloc(ppszArg, (argc+1)*sizeof(char *));
|
||||
if(ppszArg1 == NULL && ppszArg != NULL)
|
||||
free(ppszArg);
|
||||
ppszArg = ppszArg1;
|
||||
if (!ppszArg) return -1;
|
||||
pszCopy[j] = c0 = '\0';
|
||||
}
|
||||
|
@ -212,7 +216,7 @@ int _initU(void) {
|
|||
fprintf(stderr, "Warning: Can't convert the argument line to UTF-8\n");
|
||||
_acmdln[0] = '\0';
|
||||
}
|
||||
realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */
|
||||
//realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */
|
||||
/* Should not fail since we make it smaller */
|
||||
|
||||
/* Record the console code page, to allow converting the output accordingly */
|
||||
|
|
|
@ -196,6 +196,7 @@ not_compact_enough:
|
|||
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
|
||||
char *realpath(const char *path, char *outbuf) {
|
||||
char *pOutbuf = outbuf;
|
||||
char *pOutbuf1 = NULL;
|
||||
int iErr;
|
||||
const char *pc;
|
||||
|
||||
|
@ -242,8 +243,11 @@ realpath_failed:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1);
|
||||
return pOutbuf;
|
||||
if (!outbuf) {
|
||||
pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
|
||||
if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
|
||||
}
|
||||
return pOutbuf1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -517,6 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) {
|
|||
/* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */
|
||||
char *realpathU(const char *path, char *outbuf) {
|
||||
char *pOutbuf = outbuf;
|
||||
char *pOutbuf1 = NULL;
|
||||
char *pPath1 = NULL;
|
||||
char *pPath2 = NULL;
|
||||
int iErr;
|
||||
|
@ -590,10 +595,13 @@ realpathU_failed:
|
|||
}
|
||||
|
||||
DEBUG_LEAVE(("return 0x%p; // \"%s\"\n", pOutbuf, pOutbuf));
|
||||
if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1);
|
||||
if (!outbuf) {
|
||||
pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1);
|
||||
if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf);
|
||||
}
|
||||
free(pPath1);
|
||||
free(pPath2);
|
||||
return pOutbuf;
|
||||
return pOutbuf1;
|
||||
}
|
||||
|
||||
#endif /* defined(_WIN32) */
|
||||
|
|
|
@ -2,18 +2,18 @@
|
|||
|
||||
## <a class="anchor" id="intro"></a>TDengine 简介
|
||||
|
||||
TDengine是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品,它不依赖任何第三方软件,也不是优化或包装了一个开源的数据库或流式计算产品,而是在吸取众多传统关系型数据库、NoSQL数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品,在时序空间大数据处理上,有着自己独到的优势。
|
||||
TDengine 是涛思数据面对高速增长的物联网大数据市场和技术挑战推出的创新性的大数据处理产品,它不依赖任何第三方软件,也不是优化或包装了一个开源的数据库或流式计算产品,而是在吸取众多传统关系型数据库、NoSQL 数据库、流式计算引擎、消息队列等软件的优点之后自主开发的产品,在时序空间大数据处理上,有着自己独到的优势。
|
||||
|
||||
TDengine的模块之一是时序数据库。但除此之外,为减少研发的复杂度、系统维护的难度,TDengine还提供缓存、消息队列、订阅、流式计算等功能,为物联网、工业互联网大数据的处理提供全栈的技术方案,是一个高效易用的物联网大数据平台。与Hadoop等典型的大数据平台相比,它具有如下鲜明的特点:
|
||||
TDengine 的模块之一是时序数据库。但除此之外,为减少研发的复杂度、系统维护的难度,TDengine 还提供缓存、消息队列、订阅、流式计算等功能,为物联网、工业互联网大数据的处理提供全栈的技术方案,是一个高效易用的物联网大数据平台。与 Hadoop 等典型的大数据平台相比,它具有如下鲜明的特点:
|
||||
|
||||
* __10倍以上的性能提升__:定义了创新的数据存储结构,单核每秒能处理至少2万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。
|
||||
* __硬件或云服务成本降至1/5__:由于超强性能,计算资源不到通用大数据方案的1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的1/10。
|
||||
* __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件,大幅降低应用开发和维护的复杂度成本。
|
||||
* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, MATLAB随时进行。
|
||||
* __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R等集成。后续将支持OPC, Hadoop, Spark等,BI工具也将无缝连接。
|
||||
* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类标准SQL,支持RESTful,支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似,零学习成本。
|
||||
* __10 倍以上的性能提升__:定义了创新的数据存储结构,单核每秒能处理至少 2 万次请求,插入数百万个数据点,读出一千万以上数据点,比现有通用数据库快十倍以上。
|
||||
* __硬件或云服务成本降至 1/5__:由于超强性能,计算资源不到通用大数据方案的 1/5;通过列式存储和先进的压缩算法,存储空间不到通用数据库的 1/10。
|
||||
* __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成 Kafka/Redis/HBase/Spark/HDFS 等软件,大幅降低应用开发和维护的复杂度成本。
|
||||
* __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过 Shell, Python, R, MATLAB 随时进行。
|
||||
* __与第三方工具无缝连接__:不用一行代码,即可与 Telegraf, Grafana, EMQ, HiveMQ, Prometheus, MATLAB, R 等集成。后续将支持 OPC, Hadoop, Spark 等,BI 工具也将无缝连接。
|
||||
* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类标准 SQL,支持 RESTful,支持 Python/Java/C/C++/C#/Go/Node.js, 与 MySQL 相似,零学习成本。
|
||||
|
||||
采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。
|
||||
采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。
|
||||
|
||||

|
||||
<center>图 1. TDengine技术生态图</center>
|
||||
|
@ -21,42 +21,47 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的
|
|||
|
||||
## <a class="anchor" id="scenes"></a>TDengine 总体适用场景
|
||||
|
||||
作为一个IOT大数据平台,TDengine的典型适用场景是在IOT范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如CRM,ERP等,不在本文讨论范围内。
|
||||
作为一个 IoT 大数据平台,TDengine 的典型适用场景是在 IoT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRM,ERP 等,不在本文讨论范围内。
|
||||
|
||||
|
||||
### 数据源特点和需求
|
||||
从数据源角度,设计人员可以从下面几个角度分析TDengine在目标应用系统里面的适用性。
|
||||
|
||||
从数据源角度,设计人员可以从下面几个角度分析 TDengine 在目标应用系统里面的适用性。
|
||||
|
||||
|数据源特点和需求|不适用|可能适用|非常适用|简单说明|
|
||||
|---|---|---|---|---|
|
||||
|总体数据量巨大| | | √ |TDengine在容量方面提供出色的水平扩展功能,并且具备匹配高压缩的存储结构,达到业界最优的存储效率。|
|
||||
|数据输入速度偶尔或者持续巨大| | | √ | TDengine的性能大大超过同类产品,可以在同样的硬件环境下持续处理大量的输入数据,并且提供很容易在用户环境里面运行的性能评估工具。|
|
||||
|数据源数目巨大| | | √ |TDengine设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。|
|
||||
|总体数据量巨大| | | √ | TDengine 在容量方面提供出色的水平扩展功能,并且具备匹配高压缩的存储结构,达到业界最优的存储效率。|
|
||||
|数据输入速度偶尔或者持续巨大| | | √ | TDengine 的性能大大超过同类产品,可以在同样的硬件环境下持续处理大量的输入数据,并且提供很容易在用户环境里面运行的性能评估工具。|
|
||||
|数据源数目巨大| | | √ | TDengine 设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。|
|
||||
|
||||
### 系统架构要求
|
||||
|
||||
|系统架构要求|不适用|可能适用|非常适用|简单说明|
|
||||
|---|---|---|---|---|
|
||||
|要求简单可靠的系统架构| | | √ |TDengine的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。|
|
||||
|要求容错和高可靠| | | √ |TDengine的集群功能,自动提供容错灾备等高可靠功能。|
|
||||
|标准化规范| | | √ |TDengine使用标准的SQL语言提供主要功能,遵守标准化规范。|
|
||||
|要求简单可靠的系统架构| | | √ | TDengine 的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。|
|
||||
|要求容错和高可靠| | | √ | TDengine 的集群功能,自动提供容错灾备等高可靠功能。|
|
||||
|标准化规范| | | √ | TDengine 使用标准的 SQL 语言提供主要功能,遵守标准化规范。|
|
||||
|
||||
### 系统功能需求
|
||||
|
||||
|系统功能需求|不适用|可能适用|非常适用|简单说明|
|
||||
|---|---|---|---|---|
|
||||
|要求完整的内置数据处理算法| | √ | |TDengine的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。|
|
||||
|需要大量的交叉查询处理| | √ | |这种类型的处理更多应该用关系型数据系统处理,或者应该考虑TDengine和关系型数据系统配合实现系统功能。|
|
||||
|要求完整的内置数据处理算法| | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。|
|
||||
|需要大量的交叉查询处理| | √ | |这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。|
|
||||
|
||||
### 系统性能需求
|
||||
|
||||
|系统性能需求|不适用|可能适用|非常适用|简单说明|
|
||||
|---|---|---|---|---|
|
||||
|要求较大的总体处理能力| | | √ |TDengine的集群功能可以轻松地让多服务器配合达成处理能力的提升。|
|
||||
|要求高速处理数据 | | | √ |TDengine的专门为IOT优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。|
|
||||
|要求快速处理小粒度数据| | | √ |这方面TDengine性能可以完全对标关系型和NoSQL型数据处理系统。|
|
||||
|要求较大的总体处理能力| | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。|
|
||||
|要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。|
|
||||
|要求快速处理小粒度数据| | | √ |这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。|
|
||||
|
||||
### 系统维护需求
|
||||
|
||||
|系统维护需求|不适用|可能适用|非常适用|简单说明|
|
||||
|---|---|---|---|---|
|
||||
|要求系统可靠运行| | | √ |TDengine的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。|
|
||||
|要求系统可靠运行| | | √ | TDengine 的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。|
|
||||
|要求运维学习成本可控| | | √ |同上。|
|
||||
|要求市场有大量人才储备| √ | | |TDengine作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。|
|
||||
|要求市场有大量人才储备| √ | | | TDengine 作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。|
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# 通过 Docker 快速体验 TDengine
|
||||
|
||||
虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。
|
||||
虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。另外,从2.0.14.0版本开始,TDengine提供的镜像已经可以同时支持X86-64、X86、arm64、arm32平台,像NAS、树莓派、嵌入式开发板之类可以运行docker的非主流计算机也可以基于本文档轻松体验TDengine。
|
||||
|
||||
下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。
|
||||
|
||||
|
@ -12,7 +12,7 @@ Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.c
|
|||
|
||||
```bash
|
||||
$ docker -v
|
||||
Docker version 20.10.5, build 55c4c88
|
||||
Docker version 20.10.3, build 48d30b5
|
||||
```
|
||||
|
||||
## 在 Docker 容器中运行 TDengine
|
||||
|
@ -20,21 +20,22 @@ Docker version 20.10.5, build 55c4c88
|
|||
1,使用命令拉取 TDengine 镜像,并使它在后台运行。
|
||||
|
||||
```bash
|
||||
$ docker run -d tdengine/tdengine
|
||||
cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316
|
||||
$ docker run -d --name tdengine tdengine/tdengine
|
||||
7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292
|
||||
```
|
||||
|
||||
- **docker run**:通过 Docker 运行一个容器。
|
||||
- **-d**:让容器在后台运行。
|
||||
- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像。
|
||||
- **cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316**:这个返回的长字符是容器 ID,我们可以通过容器 ID 来查看对应的容器。
|
||||
- **docker run**:通过 Docker 运行一个容器
|
||||
- **--name tdengine**:设置容器名称,我们可以通过容器名称来查看对应的容器
|
||||
- **-d**:让容器在后台运行
|
||||
- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像
|
||||
- **7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292**:这个返回的长字符是容器 ID,我们也可以通过容器 ID 来查看对应的容器
|
||||
|
||||
2,确认容器是否已经正确运行。
|
||||
|
||||
```bash
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS ···
|
||||
cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
|
||||
c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
|
||||
```
|
||||
|
||||
- **docker ps**:列出所有正在运行状态的容器信息。
|
||||
|
@ -47,25 +48,25 @@ cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ·
|
|||
3,进入 Docker 容器内,使用 TDengine。
|
||||
|
||||
```bash
|
||||
$ docker exec -it cdf548465318 /bin/bash
|
||||
root@cdf548465318:~/TDengine-server-2.0.13.0#
|
||||
$ docker exec -it tdengine /bin/bash
|
||||
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
|
||||
```
|
||||
|
||||
- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。
|
||||
- **-i**:进入交互模式。
|
||||
- **-t**:指定一个终端。
|
||||
- **cdf548465318**:容器 ID,需要根据 docker ps 指令返回的值进行修改。
|
||||
- **c452519b0f9b**:容器 ID,需要根据 docker ps 指令返回的值进行修改。
|
||||
- **/bin/bash**:载入容器后运行 bash 来进行交互。
|
||||
|
||||
4,进入容器后,执行 taos shell 客户端程序。
|
||||
|
||||
```bash
|
||||
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
|
||||
$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos>
|
||||
taos>
|
||||
```
|
||||
|
||||
TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。
|
||||
|
@ -78,45 +79,74 @@ TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息
|
|||
|
||||
```bash
|
||||
$ taos> q
|
||||
root@cdf548465318:~/TDengine-server-2.0.13.0#
|
||||
root@c452519b0f9b:~/TDengine-server-2.0.20.13#
|
||||
```
|
||||
|
||||
2,在命令行界面执行 taosdemo。
|
||||
|
||||
```bash
|
||||
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taosdemo
|
||||
###################################################################
|
||||
# Server IP: localhost:0
|
||||
# User: root
|
||||
# Password: taosdata
|
||||
# Use metric: true
|
||||
# Datatype of Columns: int int int int int int int float
|
||||
# Binary Length(If applicable): -1
|
||||
# Number of Columns per record: 3
|
||||
# Number of Threads: 10
|
||||
# Number of Tables: 10000
|
||||
# Number of Data per Table: 100000
|
||||
# Records/Request: 1000
|
||||
# Database name: test
|
||||
# Table prefix: t
|
||||
# Delete method: 0
|
||||
# Test time: 2021-04-13 02:05:20
|
||||
###################################################################
|
||||
root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo
|
||||
|
||||
taosdemo is simulating data generated by power equipments monitoring...
|
||||
|
||||
host: 127.0.0.1:6030
|
||||
user: root
|
||||
password: taosdata
|
||||
configDir:
|
||||
resultFile: ./output.txt
|
||||
thread num of insert data: 10
|
||||
thread num of create table: 10
|
||||
top insert interval: 0
|
||||
number of records per req: 30000
|
||||
max sql length: 1048576
|
||||
database count: 1
|
||||
database[0]:
|
||||
database[0] name: test
|
||||
drop: yes
|
||||
replica: 1
|
||||
precision: ms
|
||||
super table count: 1
|
||||
super table[0]:
|
||||
stbName: meters
|
||||
autoCreateTable: no
|
||||
childTblExists: no
|
||||
childTblCount: 10000
|
||||
childTblPrefix: d
|
||||
dataSource: rand
|
||||
iface: taosc
|
||||
insertRows: 10000
|
||||
interlaceRows: 0
|
||||
disorderRange: 1000
|
||||
disorderRatio: 0
|
||||
maxSqlLen: 1048576
|
||||
timeStampStep: 1
|
||||
startTimestamp: 2017-07-14 10:40:00.000
|
||||
sampleFormat:
|
||||
sampleFile:
|
||||
tagsFile:
|
||||
columnCount: 3
|
||||
column[0]:FLOAT column[1]:INT column[2]:FLOAT
|
||||
tagCount: 2
|
||||
tag[0]:INT tag[1]:BINARY(16)
|
||||
|
||||
Press enter key to continue or Ctrl-C to stop
|
||||
```
|
||||
|
||||
回车后,该命令将新建一个数据库 test,并且自动创建一张超级表 meters,并以超级表 meters 为模版创建了 1 万张表,表名从 "t0" 到 "t9999"。每张表有 10 万条记录,每条记录有 f1,f2,f3 三个字段,时间戳 ts 字段从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:41:39 999"。每张表带有 areaid 和 loc 两个标签 TAG,areaid 被设置为 1 到 10,loc 被设置为 "beijing" 或 "shanghai"。
|
||||
回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
|
||||
|
||||
执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
|
||||
|
||||
3,进入 TDengine 终端,查看 taosdemo 生成的数据。
|
||||
|
||||
- **进入命令行。**
|
||||
|
||||
```bash
|
||||
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
|
||||
$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.0.20.13
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos>
|
||||
taos>
|
||||
```
|
||||
|
||||
- **查看数据库。**
|
||||
|
@ -124,8 +154,8 @@ taos>
|
|||
```bash
|
||||
$ taos> show databases;
|
||||
name | created_time | ntables | vgroups | ···
|
||||
test | 2021-04-13 02:14:15.950 | 10000 | 6 | ···
|
||||
log | 2021-04-12 09:36:37.549 | 4 | 1 | ···
|
||||
test | 2021-08-18 06:01:11.021 | 10000 | 6 | ···
|
||||
log | 2021-08-18 05:51:51.065 | 4 | 1 | ···
|
||||
|
||||
```
|
||||
|
||||
|
@ -136,10 +166,10 @@ $ taos> use test;
|
|||
Database changed.
|
||||
|
||||
$ taos> show stables;
|
||||
name | created_time | columns | tags | tables |
|
||||
=====================================================================================
|
||||
meters | 2021-04-13 02:14:15.955 | 4 | 2 | 10000 |
|
||||
Query OK, 1 row(s) in set (0.001737s)
|
||||
name | created_time | columns | tags | tables |
|
||||
============================================================================================
|
||||
meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 |
|
||||
Query OK, 1 row(s) in set (0.003259s)
|
||||
|
||||
```
|
||||
|
||||
|
@ -147,42 +177,45 @@ Query OK, 1 row(s) in set (0.001737s)
|
|||
|
||||
```bash
|
||||
$ taos> select * from test.t0 limit 10;
|
||||
ts | f1 | f2 | f3 |
|
||||
====================================================================
|
||||
2017-07-14 02:40:01.000 | 3 | 9 | 0 |
|
||||
2017-07-14 02:40:02.000 | 0 | 1 | 2 |
|
||||
2017-07-14 02:40:03.000 | 7 | 2 | 3 |
|
||||
2017-07-14 02:40:04.000 | 9 | 4 | 5 |
|
||||
2017-07-14 02:40:05.000 | 1 | 2 | 5 |
|
||||
2017-07-14 02:40:06.000 | 6 | 3 | 2 |
|
||||
2017-07-14 02:40:07.000 | 4 | 7 | 8 |
|
||||
2017-07-14 02:40:08.000 | 4 | 6 | 6 |
|
||||
2017-07-14 02:40:09.000 | 5 | 7 | 7 |
|
||||
2017-07-14 02:40:10.000 | 1 | 5 | 0 |
|
||||
Query OK, 10 row(s) in set (0.003638s)
|
||||
|
||||
DB error: Table does not exist (0.002857s)
|
||||
taos> select * from test.d0 limit 10;
|
||||
ts | current | voltage | phase |
|
||||
======================================================================================
|
||||
2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 |
|
||||
2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 |
|
||||
2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 |
|
||||
2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 |
|
||||
2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 |
|
||||
2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 |
|
||||
2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 |
|
||||
2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 |
|
||||
2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 |
|
||||
2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 |
|
||||
Query OK, 10 row(s) in set (0.016791s)
|
||||
|
||||
```
|
||||
|
||||
- **查看 t0 表的标签值。**
|
||||
- **查看 d0 表的标签值。**
|
||||
|
||||
```bash
|
||||
$ taos> select areaid, loc from test.t0;
|
||||
areaid | loc |
|
||||
===========================
|
||||
10 | shanghai |
|
||||
Query OK, 1 row(s) in set (0.002904s)
|
||||
$ taos> select groupid, location from test.d0;
|
||||
groupid | location |
|
||||
=================================
|
||||
0 | shanghai |
|
||||
Query OK, 1 row(s) in set (0.003490s)
|
||||
|
||||
```
|
||||
|
||||
## 停止正在 Docker 中运行的 TDengine 服务
|
||||
|
||||
```bash
|
||||
$ docker stop cdf548465318
|
||||
cdf548465318
|
||||
$ docker stop tdengine
|
||||
tdengine
|
||||
```
|
||||
|
||||
- **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。
|
||||
- **cdf548465318**:容器 ID,根据 docker ps 指令返回的结果进行修改。
|
||||
- **tdengine**:容器名称。
|
||||
|
||||
## 编程开发时连接在 Docker 中的 TDengine
|
||||
|
||||
|
@ -195,7 +228,7 @@ $ docker run -d -v /etc/taos:/etc/taos -P 6041:6041 tdengine/tdengine
|
|||
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
|
||||
|
||||
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
|
||||
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}
|
||||
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2}
|
||||
```
|
||||
|
||||
- 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。
|
||||
|
@ -206,6 +239,5 @@ $ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
|
|||
2,直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。
|
||||
|
||||
```bash
|
||||
$ docker exec -it 526aa188da /bin/bash
|
||||
$ docker exec -it tdengine /bin/bash
|
||||
```
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
# TDengine数据建模
|
||||
|
||||
TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库的设计,超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。
|
||||
TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库、超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。
|
||||
|
||||
关于数据建模请参考[视频教程](https://www.taosdata.com/blog/2020/11/11/1945.html)。
|
||||
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
## 总体介绍
|
||||
|
||||
TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索下载。
|
||||
|
||||
`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。
|
||||
|
||||

|
||||
|
@ -14,12 +12,10 @@ TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实
|
|||
* RESTful:应用将 SQL 发送给位于物理节点2(pnode2)上的 RESTful 连接器,再调用客户端 API(libtaos.so)。
|
||||
* JDBC-RESTful:Java 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求,发送给物理节点2的 RESTful 连接器。
|
||||
|
||||
TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
|
||||
TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但TDengine与关系对象型数据库的使用场景和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
|
||||
|
||||
* TDengine 目前不支持针对单条数据记录的删除操作。
|
||||
* 目前不支持事务操作。
|
||||
* 目前不支持嵌套查询(nested query)。
|
||||
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。
|
||||
|
||||
### JDBC-JNI和JDBC-RESTful的对比
|
||||
|
||||
|
@ -50,9 +46,12 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
|
|||
</tr>
|
||||
</table>
|
||||
|
||||
注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。
|
||||
注意:与 JNI 方式不同,RESTful 接口是无状态的。在使用JDBC-RESTful时,需要在sql中指定表、超级表的数据库名称。(从 TDengine 2.1.8.0 版本开始,也可以在 RESTful url 中指定当前 SQL 语句所使用的默认数据库名。)例如:
|
||||
```sql
|
||||
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6);
|
||||
```
|
||||
|
||||
### <a class="anchor" id="version"></a>TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
|
||||
## <a class="anchor" id="version"></a>TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
|
||||
|
||||
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
|
||||
| -------------------- | ----------------- | -------- |
|
||||
|
@ -65,7 +64,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
|
|||
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
|
||||
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
|
||||
|
||||
### TDengine DataType 和 Java DataType
|
||||
## TDengine DataType 和 Java DataType
|
||||
|
||||
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
|
||||
|
||||
|
@ -82,36 +81,29 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
|
|||
| BINARY | byte array |
|
||||
| NCHAR | java.lang.String |
|
||||
|
||||
## 安装
|
||||
## 安装Java Connector
|
||||
|
||||
Java连接器支持的系统有: Linux 64/Windows x64/Windows x86。
|
||||
### 安装前准备
|
||||
|
||||
**安装前准备:**
|
||||
|
||||
- 已安装TDengine服务器端
|
||||
- 已安装好TDengine应用驱动,具体请参照 [安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver) 章节
|
||||
|
||||
TDengine 为了方便 Java 应用使用,遵循 JDBC 标准(3.0)API 规范提供了 `taos-jdbcdriver` 实现。可以通过 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索并下载。
|
||||
|
||||
由于 TDengine 的应用驱动是使用C语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
|
||||
使用Java Connector连接数据库前,需要具备以下条件:
|
||||
1. Linux或Windows操作系统
|
||||
2. Java 1.8以上运行时环境
|
||||
3. TDengine-client(使用JDBC-JNI时必须,使用JDBC-RESTful时非必须)
|
||||
|
||||
**注意**:由于 TDengine 的应用驱动是使用C语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
|
||||
- libtaos.so 在 Linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
|
||||
|
||||
- taos.dll 在 Windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
|
||||
|
||||
注意:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client),Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。
|
||||
**注意**:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client),Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。
|
||||
|
||||
### 如何获取 TAOS-JDBCDriver
|
||||
|
||||
**maven仓库**
|
||||
### 通过maven获取JDBC driver
|
||||
|
||||
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。
|
||||
|
||||
- [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver)
|
||||
- [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver)
|
||||
- [maven.aliyun](https://maven.aliyun.com/mvn/search)
|
||||
|
||||
maven 项目中使用如下 pom.xml 配置即可:
|
||||
maven 项目中,在pom.xml 中添加以下依赖:
|
||||
```xml-dtd
|
||||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
|
@ -119,32 +111,17 @@ maven 项目中使用如下 pom.xml 配置即可:
|
|||
<version>2.0.18</version>
|
||||
</dependency>
|
||||
```
|
||||
**源码编译打包**
|
||||
|
||||
下载 TDengine 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
|
||||
### 通过源码编译获取JDBC driver
|
||||
|
||||
### 示例程序
|
||||
|
||||
示例程序源码位于install_directory/examples/JDBC,有如下目录:
|
||||
|
||||
JDBCDemo JDBC示例源程序
|
||||
|
||||
JDBCConnectorChecker JDBC安装校验源程序及jar包
|
||||
|
||||
Springbootdemo springboot示例源程序
|
||||
|
||||
SpringJdbcTemplate SpringJDBC模板
|
||||
|
||||
### 安装验证
|
||||
|
||||
运行如下指令:
|
||||
|
||||
```Bash
|
||||
cd {install_directory}/examples/JDBC/JDBCConnectorChecker
|
||||
java -jar JDBCConnectorChecker.jar -host <fqdn>
|
||||
可以通过下载TDengine的源码,自己编译最新版本的java connector
|
||||
```shell
|
||||
git clone https://github.com/taosdata/TDengine.git
|
||||
cd TDengine/src/connector/jdbc
|
||||
mvn clean package -Dmaven.test.skip=true
|
||||
```
|
||||
|
||||
验证通过将打印出成功信息。
|
||||
编译后,在target目录下会产生taos-jdbcdriver-2.0.XX-dist.jar的jar包。
|
||||
|
||||
## Java连接器的使用
|
||||
|
||||
|
@ -163,13 +140,11 @@ Connection conn = DriverManager.getConnection(jdbcUrl);
|
|||
以上示例,使用 **JDBC-RESTful** 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
|
||||
|
||||
使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要:
|
||||
|
||||
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”;
|
||||
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
|
||||
3. 使用 6041 作为连接端口。
|
||||
|
||||
如果希望获得更好的写入和查询性能,Java 应用可以使用 **JDBC-JNI** 的driver,如下所示:
|
||||
|
||||
```java
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
|
||||
|
@ -178,15 +153,9 @@ Connection conn = DriverManager.getConnection(jdbcUrl);
|
|||
|
||||
以上示例,使用了 JDBC-JNI 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。
|
||||
|
||||
**注意**:使用 JDBC-JNI 的 driver,taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
|
||||
**注意**:使用 JDBC-JNI 的 driver,taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库(Linux 下是 libtaos.so;Windows 下是 taos.dll)。
|
||||
|
||||
* libtaos.so
|
||||
在 Linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
|
||||
|
||||
* taos.dll
|
||||
在 Windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
|
||||
|
||||
> 在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
|
||||
> 在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client),Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#%E5%AE%A2%E6%88%B7%E7%AB%AF) 连接远程 TDengine Server。
|
||||
|
||||
JDBC-JNI 的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
|
||||
|
||||
|
@ -194,12 +163,15 @@ TDengine 的 JDBC URL 规范格式为:
|
|||
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
|
||||
|
||||
url中的配置参数如下:
|
||||
* user:登录 TDengine 用户名,默认值 root。
|
||||
* password:用户登录密码,默认值 taosdata。
|
||||
* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
|
||||
* user:登录 TDengine 用户名,默认值 'root'。
|
||||
* password:用户登录密码,默认值 'taosdata'。
|
||||
* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。
|
||||
* charset:客户端使用的字符集,默认值为系统字符集。
|
||||
* locale:客户端语言环境,默认值系统当前 locale。
|
||||
* timezone:客户端使用的时区,默认值为系统当前时区。
|
||||
* batchfetch: 仅在使用JDBC-JNI时生效。true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。
|
||||
* timestampFormat: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP':结果集中timestamp类型的字段为一个long值; 'UTC':结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING':结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。
|
||||
* batchErrorIgnore:true:在执行Statement的executeBatch时,如果中间有一条sql执行失败,继续执行下面的sq了。false:不再执行失败sql后的任何语句。默认值为:false。
|
||||
|
||||
#### 指定URL和Properties获取连接
|
||||
|
||||
|
@ -222,12 +194,15 @@ public Connection getConn() throws Exception{
|
|||
以上示例,建立一个到 hostname 为 taosdemo.com,端口为 6030,数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root,密码(password)为 taosdata,并在 connProps 中指定了使用的字符集、语言环境、时区等信息。
|
||||
|
||||
properties 中的配置参数如下:
|
||||
* TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 root。
|
||||
* TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 taosdata。
|
||||
* TSDBDriver.PROPERTY_KEY_CONFIG_DIR:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
|
||||
* TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 'root'。
|
||||
* TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 'taosdata'。
|
||||
* TSDBDriver.PROPERTY_KEY_CONFIG_DIR:客户端配置文件目录路径,Linux OS 上默认值 `/etc/taos`,Windows OS 上默认值 `C:/TDengine/cfg`。
|
||||
* TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。
|
||||
* TSDBDriver.PROPERTY_KEY_LOCALE:客户端语言环境,默认值系统当前 locale。
|
||||
* TSDBDriver.PROPERTY_KEY_TIME_ZONE:客户端使用的时区,默认值为系统当前时区。
|
||||
* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: 仅在使用JDBC-JNI时生效。true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。
|
||||
* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP':结果集中timestamp类型的字段为一个long值; 'UTC':结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING':结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。
|
||||
* TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE:true:在执行Statement的executeBatch时,如果中间有一条sql执行失败,继续执行下面的sq了。false:不再执行失败sql后的任何语句。默认值为:false。
|
||||
|
||||
#### 使用客户端配置文件建立连接
|
||||
|
||||
|
@ -265,6 +240,7 @@ secondEp cluster_node2:6030
|
|||
```
|
||||
|
||||
以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp 连接集群。
|
||||
|
||||
TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。
|
||||
|
||||
> 注意:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
|
||||
|
@ -348,6 +324,7 @@ try (Statement statement = connection.createStatement()) {
|
|||
```
|
||||
|
||||
JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(错误码在0x2301到0x2350之间),JNI方法的报错(错误码在0x2351到0x2400之间),TDengine其他功能模块的报错。
|
||||
|
||||
具体的错误码请参考:
|
||||
* https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
|
||||
* https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
|
||||
|
@ -428,11 +405,12 @@ public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
|
|||
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
```
|
||||
|
||||
其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。
|
||||
|
||||
### <a class="anchor" id="subscribe"></a>订阅
|
||||
## <a class="anchor" id="subscribe"></a>订阅
|
||||
|
||||
#### 创建
|
||||
### 创建
|
||||
|
||||
```java
|
||||
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
|
||||
|
@ -446,7 +424,7 @@ TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from met
|
|||
|
||||
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
|
||||
|
||||
#### 消费数据
|
||||
### 消费数据
|
||||
|
||||
```java
|
||||
int total = 0;
|
||||
|
@ -464,7 +442,7 @@ while(true) {
|
|||
|
||||
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
|
||||
|
||||
#### 关闭订阅
|
||||
### 关闭订阅
|
||||
|
||||
```java
|
||||
sub.close(true);
|
||||
|
@ -472,7 +450,7 @@ sub.close(true);
|
|||
|
||||
`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
|
||||
|
||||
### 关闭资源
|
||||
## 关闭资源
|
||||
|
||||
```java
|
||||
resultSet.close();
|
||||
|
@ -484,19 +462,9 @@ conn.close();
|
|||
|
||||
## 与连接池使用
|
||||
|
||||
**HikariCP**
|
||||
### HikariCP
|
||||
|
||||
* 引入相应 HikariCP maven 依赖:
|
||||
|
||||
```xml
|
||||
<dependency>
|
||||
<groupId>com.zaxxer</groupId>
|
||||
<artifactId>HikariCP</artifactId>
|
||||
<version>3.4.1</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
* 使用示例如下:
|
||||
使用示例如下:
|
||||
|
||||
```java
|
||||
public static void main(String[] args) throws SQLException {
|
||||
|
@ -528,19 +496,9 @@ conn.close();
|
|||
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
|
||||
> 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP)。
|
||||
|
||||
**Druid**
|
||||
### Druid
|
||||
|
||||
* 引入相应 Druid maven 依赖:
|
||||
|
||||
```xml
|
||||
<dependency>
|
||||
<groupId>com.alibaba</groupId>
|
||||
<artifactId>druid</artifactId>
|
||||
<version>1.1.20</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
* 使用示例如下:
|
||||
使用示例如下:
|
||||
|
||||
```java
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
@ -586,6 +544,16 @@ Query OK, 1 row(s) in set (0.000141s)
|
|||
* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate)
|
||||
* Springboot + Mybatis 中使用,可参考 [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo)
|
||||
|
||||
## 示例程序
|
||||
|
||||
示例程序源码位于TDengine/test/examples/JDBC下:
|
||||
* JDBCDemo:JDBC示例源程序
|
||||
* JDBCConnectorChecker:JDBC安装校验源程序及jar包
|
||||
* Springbootdemo:springboot示例源程序
|
||||
* SpringJdbcTemplate:SpringJDBC模板
|
||||
|
||||
请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC)
|
||||
|
||||
## 常见问题
|
||||
|
||||
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
||||
|
|
|
@ -315,10 +315,6 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
|
|||
1. 调用 `taos_stmt_init` 创建参数绑定对象;
|
||||
2. 调用 `taos_stmt_prepare` 解析 INSERT 语句;
|
||||
3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname` 来设置表名;
|
||||
* 从 2.1.6.0 版本开始,对于向一个超级表下的多个子表同时写入数据(每个子表写入的数据较少,可能只有一行)的情形,提供了一个专用的优化接口 `taos_stmt_set_sub_tbname`,可以通过提前载入 meta 数据以及避免对 SQL 语法的重复解析来节省总体的处理时间(但这个优化方法并不支持自动建表语法)。具体使用方法如下:
|
||||
1. 必须先提前调用 `taos_load_table_info` 来加载所有需要的超级表和子表的 table meta;
|
||||
2. 然后对一个超级表的第一个子表调用 `taos_stmt_set_tbname` 来设置表名;
|
||||
3. 后续子表用 `taos_stmt_set_sub_tbname` 来设置表名。
|
||||
4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags` 来设置表名和 TAGS 的值;
|
||||
5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param` 以单行的方式设置 VALUES 的值;
|
||||
6. 调用 `taos_stmt_add_batch` 把当前绑定的参数加入批处理;
|
||||
|
@ -362,12 +358,6 @@ typedef struct TAOS_BIND {
|
|||
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||
当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。
|
||||
|
||||
- `int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name)`
|
||||
|
||||
(2.1.6.0 版本新增,仅支持用于替换 INSERT 语句中、属于同一个超级表下的多个子表中、作为写入目标的第 2 个到第 n 个子表的表名)
|
||||
当 SQL 语句中的表名使用了 `?` 占位时,如果想要一批写入的表是多个属于同一个超级表的子表,那么可以使用此函数绑定除第一个子表之外的其他子表的表名。
|
||||
*注意:*在使用时,客户端必须先调用 `taos_load_table_info` 来加载所有需要的超级表和子表的 table meta,然后对一个超级表的第一个子表调用 `taos_stmt_set_tbname`,后续子表用 `taos_stmt_set_sub_tbname`。
|
||||
|
||||
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
|
||||
|
||||
(2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||
|
@ -664,22 +654,23 @@ conn.close()
|
|||
|
||||
为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 RESTful API。为最大程度降低学习成本,不同于其他数据库 RESTful API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。RESTful 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。
|
||||
|
||||
注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。
|
||||
注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。(从 2.1.8.0 版本开始,支持在 RESTful url 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 url 中指定的这个 db_name。)
|
||||
|
||||
### 安装
|
||||
|
||||
RESTful接口不依赖于任何TDengine的库,因此客户端不需要安装任何TDengine的库,只要客户端的开发语言支持HTTP协议即可。
|
||||
RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安装任何 TDengine 的库,只要客户端的开发语言支持 HTTP 协议即可。
|
||||
|
||||
### 验证
|
||||
|
||||
在已经安装TDengine服务器端的情况下,可以按照如下方式进行验证。
|
||||
在已经安装 TDengine 服务器端的情况下,可以按照如下方式进行验证。
|
||||
|
||||
下面以Ubuntu环境中使用curl工具(确认已经安装)来验证RESTful接口的正常。
|
||||
下面以 Ubuntu 环境中使用 curl 工具(确认已经安装)来验证 RESTful 接口的正常。
|
||||
|
||||
下面示例是列出所有的数据库,请把h1.taosdata.com和6041(缺省值)替换为实际运行的TDengine服务fqdn和端口号:
|
||||
下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 fqdn 和端口号:
|
||||
```html
|
||||
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql
|
||||
```
|
||||
|
||||
返回值结果如下表示验证通过:
|
||||
```json
|
||||
{
|
||||
|
@ -692,22 +683,23 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taos
|
|||
}
|
||||
```
|
||||
|
||||
### RESTful连接器的使用
|
||||
### RESTful 连接器的使用
|
||||
|
||||
#### HTTP请求格式
|
||||
#### HTTP 请求格式
|
||||
|
||||
```
|
||||
http://<fqdn>:<port>/rest/sql
|
||||
http://<fqdn>:<port>/rest/sql/[db_name]
|
||||
```
|
||||
|
||||
参数说明:
|
||||
|
||||
- fqnd: 集群中的任一台主机FQDN或IP地址
|
||||
- port: 配置文件中httpPort配置项,缺省为6041
|
||||
- fqnd: 集群中的任一台主机 FQDN 或 IP 地址
|
||||
- port: 配置文件中 httpPort 配置项,缺省为 6041
|
||||
- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。(从 2.1.8.0 版本开始支持)
|
||||
|
||||
例如:http://h1.taos.com:6041/rest/sql 是指向地址为h1.taos.com:6041的url。
|
||||
例如:http://h1.taos.com:6041/rest/sql/test 是指向地址为 h1.taos.com:6041 的 url,并将默认使用的数据库库名设置为 test。
|
||||
|
||||
HTTP请求的Header里需带有身份认证信息,TDengine支持Basic认证与自定义认证两种机制,后续版本将提供标准安全的数字签名机制来做身份验证。
|
||||
HTTP 请求的 Header 里需带有身份认证信息,TDengine 支持 Basic 认证与自定义认证两种机制,后续版本将提供标准安全的数字签名机制来做身份验证。
|
||||
|
||||
- 自定义身份认证信息如下所示(<token>稍后介绍)
|
||||
|
||||
|
@ -721,25 +713,25 @@ Authorization: Taosd <TOKEN>
|
|||
Authorization: Basic <TOKEN>
|
||||
```
|
||||
|
||||
HTTP请求的BODY里就是一个完整的SQL语句,SQL语句中的数据表应提供数据库前缀,例如\<db-name>.\<tb-name>。如果表名不带数据库前缀,系统会返回错误。因为HTTP模块只是一个简单的转发,没有当前DB的概念。
|
||||
HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据表应提供数据库前缀,例如 \<db_name>.\<tb_name>。如果表名不带数据库前缀,又没有在 url 中指定数据库名的话,系统会返回错误。因为 HTTP 模块只是一个简单的转发,没有当前 DB 的概念。
|
||||
|
||||
使用curl通过自定义身份认证方式来发起一个HTTP Request,语法如下:
|
||||
使用 curl 通过自定义身份认证方式来发起一个 HTTP Request,语法如下:
|
||||
|
||||
```bash
|
||||
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql
|
||||
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
|
||||
```
|
||||
|
||||
或者
|
||||
|
||||
```bash
|
||||
curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql
|
||||
curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
|
||||
```
|
||||
|
||||
其中,`TOKEN`为`{username}:{password}`经过Base64编码之后的字符串,例如`root:taosdata`编码后为`cm9vdDp0YW9zZGF0YQ==`
|
||||
其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`
|
||||
|
||||
### HTTP返回格式
|
||||
### HTTP 返回格式
|
||||
|
||||
返回值为JSON格式,如下:
|
||||
返回值为 JSON 格式,如下:
|
||||
|
||||
```json
|
||||
{
|
||||
|
@ -757,9 +749,9 @@ curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql
|
|||
说明:
|
||||
|
||||
- status: 告知操作结果是成功还是失败。
|
||||
- head: 表的定义,如果不返回结果集,则仅有一列“affected_rows”。(从 2.0.17.0 版本开始,建议不要依赖 head 返回值来判断数据列类型,而推荐使用 column_meta。在未来版本中,有可能会从返回值中去掉 head 这一项。)
|
||||
- head: 表的定义,如果不返回结果集,则仅有一列 “affected_rows”。(从 2.0.17.0 版本开始,建议不要依赖 head 返回值来判断数据列类型,而推荐使用 column_meta。在未来版本中,有可能会从返回值中去掉 head 这一项。)
|
||||
- column_meta: 从 2.0.17.0 版本开始,返回值中增加这一项来说明 data 里每一列的数据类型。具体每个列会用三个值来说明,分别为:列名、列类型、类型长度。例如`["current",6,4]`表示列名为“current”;列类型为 6,也即 float 类型;类型长度为 4,也即对应 4 个字节表示的 float。如果列类型为 binary 或 nchar,则类型长度表示该列最多可以保存的内容长度,而不是本次返回值中的具体数据长度。当列类型是 nchar 的时候,其类型长度表示可以保存的 unicode 字符数量,而不是 bytes。
|
||||
- data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有[[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。
|
||||
- data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有 [[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。
|
||||
- rows: 表明总共多少行数据。
|
||||
|
||||
column_meta 中的列类型说明:
|
||||
|
@ -776,13 +768,13 @@ column_meta 中的列类型说明:
|
|||
|
||||
### 自定义授权码
|
||||
|
||||
HTTP请求中需要带有授权码`<TOKEN>`,用于身份识别。授权码通常由管理员提供,可简单的通过发送`HTTP GET`请求来获取授权码,操作如下:
|
||||
HTTP 请求中需要带有授权码 `<TOKEN>`,用于身份识别。授权码通常由管理员提供,可简单的通过发送 `HTTP GET` 请求来获取授权码,操作如下:
|
||||
|
||||
```bash
|
||||
curl http://<fqnd>:<port>/rest/login/<username>/<password>
|
||||
```
|
||||
|
||||
其中,`fqdn`是TDengine数据库的fqdn或ip地址,port是TDengine服务的端口号,`username`为数据库用户名,`password`为数据库密码,返回值为`JSON`格式,各字段含义如下:
|
||||
其中,`fqdn` 是 TDengine 数据库的 fqdn 或 ip 地址,port 是 TDengine 服务的端口号,`username` 为数据库用户名,`password` 为数据库密码,返回值为 `JSON` 格式,各字段含义如下:
|
||||
|
||||
- status:请求结果的标志位
|
||||
|
||||
|
@ -808,7 +800,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
|
|||
|
||||
### 使用示例
|
||||
|
||||
- 在demo库里查询表d1001的所有记录:
|
||||
- 在 demo 库里查询表 d1001 的所有记录:
|
||||
|
||||
```bash
|
||||
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql
|
||||
|
@ -828,7 +820,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001
|
|||
}
|
||||
```
|
||||
|
||||
- 创建库demo:
|
||||
- 创建库 demo:
|
||||
|
||||
```bash
|
||||
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql
|
||||
|
@ -847,9 +839,9 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 19
|
|||
|
||||
### 其他用法
|
||||
|
||||
#### 结果集采用Unix时间戳
|
||||
#### 结果集采用 Unix 时间戳
|
||||
|
||||
HTTP请求URL采用`sqlt`时,返回结果集的时间戳将采用Unix时间戳格式表示,例如
|
||||
HTTP 请求 URL 采用 `sqlt` 时,返回结果集的时间戳将采用 Unix 时间戳格式表示,例如
|
||||
|
||||
```bash
|
||||
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt
|
||||
|
@ -870,9 +862,9 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001
|
|||
}
|
||||
```
|
||||
|
||||
#### 结果集采用UTC时间字符串
|
||||
#### 结果集采用 UTC 时间字符串
|
||||
|
||||
HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间字符串表示,例如
|
||||
HTTP 请求 URL 采用 `sqlutc` 时,返回结果集的时间戳将采用 UTC 时间字符串表示,例如
|
||||
```bash
|
||||
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc
|
||||
```
|
||||
|
@ -894,13 +886,14 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间
|
|||
|
||||
### 重要配置项
|
||||
|
||||
下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。(注意:配置修改后,需要重启taosd服务才能生效)
|
||||
下面仅列出一些与 RESTful 接口有关的配置参数,其他系统参数请看配置文件里的说明。(注意:配置修改后,需要重启 taosd 服务才能生效)
|
||||
|
||||
- 对外提供RESTful服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改)
|
||||
- httpMaxThreads: 启动的线程数量,默认为2(2.0.17.0版本开始,默认值改为CPU核数的一半向下取整)
|
||||
- restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240
|
||||
- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式
|
||||
- httpDebugFlag: 日志开关,默认131。131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息,默认131
|
||||
- 对外提供 RESTful 服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改)。
|
||||
- httpMaxThreads: 启动的线程数量,默认为 2(2.0.17.0 版本开始,默认值改为 CPU 核数的一半向下取整)。
|
||||
- restfulRowLimit: 返回结果集(JSON 格式)的最大条数,默认值为 10240。
|
||||
- httpEnableCompress: 是否支持压缩,默认不支持,目前 TDengine 仅支持 gzip 压缩格式。
|
||||
- httpDebugFlag: 日志开关,默认 131。131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息,默认 131。
|
||||
- httpDbNameMandatory: 是否必须在 RESTful url 中指定默认的数据库名。默认为 0,即关闭此检查。如果设置为 1,那么每个 RESTful url 中都必须设置一个默认数据库名,否则无论此时执行的 SQL 语句是否需要指定数据库,都会返回一个执行错误,拒绝执行此 SQL 语句。
|
||||
|
||||
## <a class="anchor" id="csharp"></a>CSharp Connector
|
||||
|
||||
|
@ -976,13 +969,17 @@ Go连接器支持的系统有:
|
|||
|
||||
**提示:建议Go版本是1.13及以上,并开启模块支持:**
|
||||
```sh
|
||||
go env -w GO111MODULE=on
|
||||
go env -w GOPROXY=https://goproxy.io,direct
|
||||
go env -w GO111MODULE=on
|
||||
go env -w GOPROXY=https://goproxy.io,direct
|
||||
```
|
||||
在taosdemo.go所在目录下进行编译和执行:
|
||||
```sh
|
||||
go mod init *demo*
|
||||
go build ./demo -h fqdn -p serverPort
|
||||
go mod init taosdemo
|
||||
go get github.com/taosdata/driver-go/taosSql
|
||||
# use win branch in Windows platform.
|
||||
#go get github.com/taosdata/driver-go/taosSql@win
|
||||
go build
|
||||
./taosdemo -h fqdn -p serverPort
|
||||
```
|
||||
|
||||
### Go连接器的使用
|
||||
|
|
|
@ -375,7 +375,7 @@ taos -C 或 taos --dump-config
|
|||
timezone GMT-8
|
||||
timezone Asia/Shanghai
|
||||
```
|
||||
均是合法的设置东八区时区的格式。
|
||||
均是合法的设置东八区时区的格式。但需注意,Windows 下并不支持 `timezone Asia/Shanghai` 这样的写法,而必须写成 `timezone UTC-8`。
|
||||
|
||||
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词now的解析)产生影响。例如:
|
||||
```sql
|
||||
|
@ -800,7 +800,7 @@ taos -n sync -P 6042 -h <fqdn of server>
|
|||
|
||||
`taos -n speed -h <fqdn of server> -P 6030 -N 10 -l 10000000 -S TCP`
|
||||
|
||||
从 2.1.7.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下:
|
||||
从 2.1.8.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下:
|
||||
|
||||
-n:设为“speed”时,表示对网络速度进行诊断。
|
||||
-h:所要连接的服务端的 FQDN 或 ip 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。
|
||||
|
@ -809,6 +809,15 @@ taos -n sync -P 6042 -h <fqdn of server>
|
|||
-l:单个网络包的大小(单位:字节)。最小值是 1024、最大值是 1024*1024*1024,默认值为 1000。
|
||||
-S:网络封包的类型。可以是 TCP 或 UDP,默认值为 TCP。
|
||||
|
||||
#### FQDN 解析速度诊断
|
||||
|
||||
`taos -n fqdn -h <fqdn of server>`
|
||||
|
||||
从 2.1.8.0 版本开始,taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下:
|
||||
|
||||
-n:设为“fqdn”时,表示对 FQDN 解析进行诊断。
|
||||
-h:所要解析的目标 FQDN 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。
|
||||
|
||||
#### 服务端日志
|
||||
|
||||
taosd 服务端日志文件标志位 debugflag 默认为 131,在 debug 时往往需要将其提升到 135 或 143 。
|
||||
|
|
|
@ -206,10 +206,6 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
|
|||
|
||||
显示当前数据库下的所有数据表信息。
|
||||
|
||||
说明:可在 like 中使用通配符进行名称的匹配,这一通配符字符串最长不能超过 20 字节。( 从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
|
||||
|
||||
通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。
|
||||
|
||||
- **显示一个数据表的创建语句**
|
||||
|
||||
```mysql
|
||||
|
@ -718,15 +714,19 @@ Query OK, 1 row(s) in set (0.001091s)
|
|||
| = | equal to | all types |
|
||||
| <> | not equal to | all types |
|
||||
| between and | within a certain range | **`timestamp`** and all numeric types |
|
||||
| in | matches any value in a set | all types except first column `timestamp` |
|
||||
| in | match any value in a set | all types except first column `timestamp` |
|
||||
| like | match a wildcard string | **`binary`** **`nchar`** |
|
||||
| % | match with any char sequences | **`binary`** **`nchar`** |
|
||||
| _ | match with a single char | **`binary`** **`nchar`** |
|
||||
|
||||
1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。
|
||||
2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
|
||||
3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
|
||||
4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
|
||||
5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER -->
|
||||
2. like 算子使用通配符字符串进行匹配检查。
|
||||
* 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意字符。
|
||||
* 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
|
||||
3. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
|
||||
4. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
|
||||
5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
|
||||
6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。<!-- REPLACE_OPEN_TO_ENTERPRISE__IN_OPERATOR_AND_UNSIGNED_INTEGER -->
|
||||
|
||||
<a class="anchor" id="union"></a>
|
||||
### UNION ALL 操作符
|
||||
|
@ -1197,8 +1197,6 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
|
||||
适用于:**表、超级表**。
|
||||
|
||||
说明:与LAST函数不同,LAST_ROW不支持时间范围限制,强制返回最后一条记录。
|
||||
|
||||
限制:LAST_ROW()不能与INTERVAL一起使用。
|
||||
|
||||
示例:
|
||||
|
|
|
@ -71,7 +71,7 @@ TDengine is a highly efficient platform to store, query, and analyze time-series
|
|||
## [Connector](/connector)
|
||||
|
||||
- [C/C++ Connector](/connector#c-cpp): primary method to connect to TDengine server through libtaos client library
|
||||
- [Java Connector(JDBC)]: driver for connecting to the server from Java applications using the JDBC API
|
||||
- [Java Connector(JDBC)](/connector/java): driver for connecting to the server from Java applications using the JDBC API
|
||||
- [Python Connector](/connector#python): driver for connecting to TDengine server from Python applications
|
||||
- [RESTful Connector](/connector#restful): a simple way to interact with TDengine via HTTP
|
||||
- [Go Connector](/connector#go): driver for connecting to TDengine server from Go applications
|
||||
|
|
|
@ -0,0 +1,525 @@
|
|||
# Java connector
|
||||
|
||||
## Introduction
|
||||
|
||||
The taos-jdbcdriver is implemented in two forms: JDBC-JNI and JDBC-RESTful (supported from taos-jdbcdriver-2.0.18). JDBC-JNI is implemented by calling the local methods of libtaos.so (or taos.dll) on the client, while JDBC-RESTful encapsulates the RESTful interface implementation internally.
|
||||
|
||||

|
||||
|
||||
The figure above shows the three ways Java applications can access the TDengine:
|
||||
|
||||
* JDBC-JNI: The Java application uses JDBC-JNI's API on physical node1 (pnode1) and directly calls the client API (libtaos.so or taos.dll) to send write or query requests to the taosd instance on physical node2 (pnode2).
|
||||
* RESTful: The Java application sends the SQL to the RESTful connector on physical node2 (pnode2), which then calls the client API (libtaos.so).
|
||||
* JDBC-RESTful: The Java application uses the JDBC-restful API to encapsulate SQL into a RESTful request and send it to the RESTful connector of physical node 2.
|
||||
|
||||
In terms of implementation, the JDBC driver of TDengine is as consistent as possible with the behavior of the relational database driver. However, due to the differences between TDengine and relational database in the object and technical characteristics of services, there are some differences between taos-jdbcdriver and traditional relational database JDBC driver. The following points should be watched:
|
||||
|
||||
* deleting a record is not supported in TDengine.
|
||||
* transaction is not supported in TDengine.
|
||||
|
||||
### Difference between JDBC-JNI and JDBC-restful
|
||||
|
||||
<table>
|
||||
<tr align="center"><th>Difference</th><th>JDBC-JNI</th><th>JDBC-RESTful</th></tr>
|
||||
<tr align="center">
|
||||
<td>Supported OS</td>
|
||||
<td>linux、windows</td>
|
||||
<td>all platform</td>
|
||||
</tr>
|
||||
<tr align="center">
|
||||
<td>Whether to install the Client</td>
|
||||
<td>need</td>
|
||||
<td>do not need</td>
|
||||
</tr>
|
||||
<tr align="center">
|
||||
<td>Whether to upgrade the client after the server is upgraded</td>
|
||||
<td>need</td>
|
||||
<td>do not need</td>
|
||||
</tr>
|
||||
<tr align="center">
|
||||
<td>Write performance</td>
|
||||
<td colspan="2">JDBC-RESTful is 50% to 90% of JDBC-JNI</td>
|
||||
</tr>
|
||||
<tr align="center">
|
||||
<td>Read performance</td>
|
||||
<td colspan="2">JDBC-RESTful is no different from JDBC-JNI</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
**Note**: RESTful interfaces are stateless. Therefore, when using JDBC-restful, you should specify the database name in SQL before all table names and super table names, for example:
|
||||
|
||||
```sql
|
||||
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6);
|
||||
```
|
||||
|
||||
## JDBC driver version and supported TDengine and JDK versions
|
||||
|
||||
| taos-jdbcdriver | TDengine | JDK |
|
||||
| -------------------- | ----------------- | -------- |
|
||||
| 2.0.33 - 2.0.34 | 2.0.3.0 and above | 1.8.x |
|
||||
| 2.0.31 - 2.0.32 | 2.1.3.0 and above | 1.8.x |
|
||||
| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
|
||||
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
|
||||
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
|
||||
| 1.0.3 | 1.6.1.x and above | 1.8.x |
|
||||
| 1.0.2 | 1.6.1.x and above | 1.8.x |
|
||||
| 1.0.1 | 1.6.1.x and above | 1.8.x |
|
||||
|
||||
## DataType in TDengine and Java connector
|
||||
|
||||
The TDengine supports the following data types and Java data types:
|
||||
|
||||
| TDengine DataType | Java DataType |
|
||||
| ----------------- | ------------------ |
|
||||
| TIMESTAMP | java.sql.Timestamp |
|
||||
| INT | java.lang.Integer |
|
||||
| BIGINT | java.lang.Long |
|
||||
| FLOAT | java.lang.Float |
|
||||
| DOUBLE | java.lang.Double |
|
||||
| SMALLINT | java.lang.Short |
|
||||
| TINYINT | java.lang.Byte |
|
||||
| BOOL | java.lang.Boolean |
|
||||
| BINARY | byte[] |
|
||||
| NCHAR | java.lang.String |
|
||||
|
||||
## Install Java connector
|
||||
|
||||
### Runtime Requirements
|
||||
|
||||
To run TDengine's Java connector, the following requirements shall be met:
|
||||
|
||||
1. A Linux or Windows System
|
||||
|
||||
2. Java Runtime Environment 1.8 or later
|
||||
|
||||
3. TDengine client (required for JDBC-JNI, not required for JDBC-restful)
|
||||
|
||||
**Note**:
|
||||
|
||||
* After the TDengine client is successfully installed on Linux, the libtaos.so file is automatically copied to /usr/lib/libtaos.so, which is included in the Linux automatic scan path and does not need to be specified separately.
|
||||
* After the TDengine client is installed on Windows, the taos.dll file that the driver package depends on is automatically copied to the default search path C:/Windows/System32. You do not need to specify it separately.
|
||||
|
||||
### Obtain JDBC driver by maven
|
||||
|
||||
To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver). Add the following dependencies in pom.xml for your maven projects.
|
||||
|
||||
```xml
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.34</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
```
|
||||
|
||||
### Obtain JDBC driver by compiling source code
|
||||
|
||||
You can download the TDengine source code and compile the latest version of the JDBC Connector.
|
||||
|
||||
```shell
|
||||
git clone https://github.com/taosdata/TDengine.git
|
||||
cd TDengine/src/connector/jdbc
|
||||
mvn clean package -Dmaven.test.skip=true
|
||||
```
|
||||
|
||||
a taos-jdbcdriver-2.0.xx-dist.jar will be released in the target directory.
|
||||
|
||||
## Usage of java connector
|
||||
|
||||
### Establishing a Connection
|
||||
|
||||
#### Establishing a connection with URL
|
||||
|
||||
Establish the connection by specifying the URL, as shown below:
|
||||
|
||||
```java
|
||||
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl);
|
||||
```
|
||||
|
||||
In the example above, the JDBC-RESTful driver is used to establish a connection to the hostname of 'taosdemo.com', port of 6041, and database name of 'test'. This URL specifies the user name as 'root' and the password as 'taosdata'.
|
||||
|
||||
The JDBC-RESTful does not depend on the local function library. Compared with JDBC-JNI, only the following is required:
|
||||
|
||||
* DriverClass designated as "com.taosdata.jdbc.rs.RestfulDriver"
|
||||
* JdbcUrl starts with "JDBC:TAOS-RS://"
|
||||
* Use port 6041 as the connection port
|
||||
|
||||
For better write and query performance, Java applications can use the JDBC-JNI driver, as shown below:
|
||||
|
||||
```java
|
||||
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl);
|
||||
```
|
||||
|
||||
In the example above, The JDBC-JNI driver is used to establish a connection to the hostname of 'taosdemo.com', port 6030 (TDengine's default port), and database name of 'test'. This URL specifies the user name as 'root' and the password as 'taosdata'.
|
||||
|
||||
<!-- You can also see the JDBC-JNI video tutorial: [JDBC connector of TDengine](https://www.taosdata.com/blog/2020/11/11/1955.html) -->
|
||||
|
||||
The format of JDBC URL is:
|
||||
|
||||
```url
|
||||
jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]
|
||||
```
|
||||
|
||||
The configuration parameters in the URL are as follows:
|
||||
|
||||
* user: user name for logging in to the TDengine. The default value is 'root'.
|
||||
* password: the user login password. The default value is 'taosdata'.
|
||||
* cfgdir: directory of the client configuration file. It is valid only for JDBC-JNI. The default value is `/etc/taos` on Linux and `C:/TDengine/cfg` on Windows.
|
||||
* charset: character set used by the client. The default value is the system character set.
|
||||
* locale: client locale. The default value is the current system locale.
|
||||
* timezone: timezone used by the client. The default value is the current timezone of the system.
|
||||
* batchfetch: only valid for JDBC-JNI. True if batch ResultSet fetching is enabled; false if row-by-row ResultSet fetching is enabled. Default value is flase.
|
||||
* timestampFormat: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'.
|
||||
* batchErrorIgnore: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false.
|
||||
|
||||
#### Establishing a connection with URL and Properties
|
||||
|
||||
In addition to establish the connection with the specified URL, you can also use Properties to specify the parameters to set up the connection, as shown below:
|
||||
|
||||
```java
|
||||
public Connection getConn() throws Exception{
|
||||
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
|
||||
// String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
|
||||
return conn;
|
||||
}
|
||||
```
|
||||
|
||||
In the example above, JDBC-JNI is used to establish a connection to hostname of 'taosdemo.com', port at 6030, and database name of 'test'. The annotation is the method when using JDBC-RESTful. The connection specifies the user name as 'root' and the password as 'taosdata' in the URL, and the character set to use, locale, time zone, and so on in connProps.
|
||||
|
||||
The configuration parameters in properties are as follows:
|
||||
|
||||
* TSDBDriver.PROPERTY_KEY_USER: user name for logging in to the TDengine. The default value is 'root'.
|
||||
* TSDBDriver.PROPERTY_KEY_PASSWORD: the user login password. The default value is 'taosdata'.
|
||||
* TSDBDriver.PROPERTY_KEY_CONFIG_DIR: directory of the client configuration file. It is valid only for JDBC-JNI. The default value is `/etc/taos` on Linux and `C:/TDengine/cfg on Windows`.
|
||||
* TSDBDriver.PROPERTY_KEY_CHARSET: character set used by the client. The default value is the system character set.
|
||||
* TSDBDriver.PROPERTY_KEY_LOCALE: client locale. The default value is the current system locale.
|
||||
* TSDBDriver.PROPERTY_KEY_TIME_ZONE: timezone used by the client. The default value is the current timezone of the system.
|
||||
* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-JNI. True if batch ResultSet fetching is enabled; false if row-by-row ResultSet fetching is enabled. Default value is flase.
|
||||
* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'.
|
||||
* TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false.
|
||||
|
||||
#### Establishing a connection with configuration file
|
||||
|
||||
When JDBC-JNI is used to connect to the TDengine cluster, you can specify firstEp and secondEp parameters of the cluster in the client configuration file. As follows:
|
||||
|
||||
1. The hostname and port are not specified in Java applications
|
||||
|
||||
```java
|
||||
public Connection getConn() throws Exception{
|
||||
String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
|
||||
return conn;
|
||||
}
|
||||
```
|
||||
|
||||
2. Specify firstEp and secondEp in the configuration file
|
||||
|
||||
```txt
|
||||
# first fully qualified domain name (FQDN) for TDengine system
|
||||
firstEp cluster_node1:6030
|
||||
# second fully qualified domain name (FQDN) for TDengine system, for cluster only
|
||||
secondEp cluster_node2:6030
|
||||
```
|
||||
|
||||
In the above example, JDBC driver uses the client configuration file to establish a connection to the hostname of 'cluster_node1', port 6030, and database name of 'test'. When the firstEp node in the cluster fails, JDBC will try to connect to the cluster using secondEp. In the TDengine, as long as one node in firstEp and secondEp is valid, the connection to the cluster can be established.
|
||||
|
||||
**Note**: In this case, the configuration file belongs to TDengine client which is running inside a Java application. default file path of Linux OS is '/etc/taos/taos.cfg', and default file path of Windows OS is 'C://TDengine/cfg/taos.cfg'.
|
||||
|
||||
#### Priority of the parameters
|
||||
|
||||
If the parameters in the URL, Properties, and client configuration file are repeated set, the priorities of the parameters in descending order are as follows:
|
||||
|
||||
1. URL parameters
|
||||
2. Properties
|
||||
3. Client configuration file in taos.cfg
|
||||
|
||||
For example, if you specify password as 'taosdata' in the URL and password as 'taosdemo' in the Properties, JDBC will establish a connection using the password in the URL.
|
||||
|
||||
For details, see Client Configuration:[client configuration](https://www.taosdata.com/en/documentation/administrator#client)
|
||||
|
||||
### Create database and table
|
||||
|
||||
```java
|
||||
Statement stmt = conn.createStatement();
|
||||
// create database
|
||||
stmt.executeUpdate("create database if not exists db");
|
||||
// use database
|
||||
stmt.executeUpdate("use db");
|
||||
// create table
|
||||
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
|
||||
```
|
||||
|
||||
### Insert
|
||||
|
||||
```java
|
||||
// insert data
|
||||
int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
|
||||
System.out.println("insert " + affectedRows + " rows.");
|
||||
```
|
||||
|
||||
**Note**: 'now' is an internal system function. The default value is the current time of the computer where the client resides. 'now + 1s' indicates that the current time on the client is added by one second. The following time units are a(millisecond), s (second), m(minute), h(hour), d(day), w(week), n(month), and y(year).
|
||||
|
||||
### Query
|
||||
|
||||
```java
|
||||
// query data
|
||||
ResultSet resultSet = stmt.executeQuery("select * from tb");
|
||||
Timestamp ts = null;
|
||||
int temperature = 0;
|
||||
float humidity = 0;
|
||||
while(resultSet.next()){
|
||||
ts = resultSet.getTimestamp(1);
|
||||
temperature = resultSet.getInt(2);
|
||||
humidity = resultSet.getFloat("humidity");
|
||||
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
|
||||
}
|
||||
```
|
||||
|
||||
**Note**: The query is consistent with the operation of the relational database, and the index in ResultSet starts from 1.
|
||||
|
||||
### Handle exceptions
|
||||
|
||||
```java
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
// executeQuery
|
||||
ResultSet resultSet = statement.executeQuery(sql);
|
||||
// print result
|
||||
printResult(resultSet);
|
||||
} catch (SQLException e) {
|
||||
System.out.println("ERROR Message: " + e.getMessage());
|
||||
System.out.println("ERROR Code: " + e.getErrorCode());
|
||||
e.printStackTrace();
|
||||
}
|
||||
```
|
||||
|
||||
The Java connector may report three types of error codes: JDBC Driver (error codes ranging from 0x2301 to 0x2350), JNI method (error codes ranging from 0x2351 to 0x2400), and TDengine Error. For details about the error code, see:
|
||||
|
||||
- https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
|
||||
- https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
|
||||
|
||||
### Write data through parameter binding
|
||||
|
||||
Since version 2.1.2.0, TDengine's JDBC-JNI implementation has significantly improved parameter binding support for data write (INSERT) scenarios. Data can be written in the following way, avoiding SQL parsing and significantly improving the write performance.(**Note**: parameter binding is not supported in JDBC-RESTful)
|
||||
|
||||
```java
|
||||
Statement stmt = conn.createStatement();
|
||||
Random r = new Random();
|
||||
|
||||
// In the INSERT statement, the VALUES clause allows you to specify a specific column; If automatic table creation is adopted, the TAGS clause needs to set the parameter values of all TAGS columns
|
||||
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)");
|
||||
|
||||
s.setTableName("w1");
|
||||
|
||||
// set tags
|
||||
s.setTagInt(0, r.nextInt(10));
|
||||
s.setTagString(1, "Beijing");
|
||||
int numOfRows = 10;
|
||||
|
||||
// set values
|
||||
ArrayList<Long> ts = new ArrayList<>();
|
||||
for (int i = 0; i < numOfRows; i++){
|
||||
ts.add(System.currentTimeMillis() + i);
|
||||
}
|
||||
s.setTimestamp(0, ts);
|
||||
ArrayList<Integer> s1 = new ArrayList<>();
|
||||
for (int i = 0; i < numOfRows; i++){
|
||||
s1.add(r.nextInt(100));
|
||||
}
|
||||
s.setInt(1, s1);
|
||||
ArrayList<String> s2 = new ArrayList<>();
|
||||
for (int i = 0; i < numOfRows; i++){
|
||||
s2.add("test" + r.nextInt(100));
|
||||
}
|
||||
s.setString(2, s2, 10);
|
||||
|
||||
// The cache is not cleared after AddBatch. Do not bind new data again before ExecuteBatch
|
||||
s.columnDataAddBatch();
|
||||
s.columnDataExecuteBatch();
|
||||
// Clear the cache, after which you can bind new data(including table names, tags, values):
|
||||
s.columnDataClearBatch();
|
||||
s.columnDataCloseBatch();
|
||||
```
|
||||
|
||||
The methods used to set tags are:
|
||||
|
||||
```java
|
||||
public void setTagNull(int index, int type)
|
||||
public void setTagBoolean(int index, boolean value)
|
||||
public void setTagInt(int index, int value)
|
||||
public void setTagByte(int index, byte value)
|
||||
public void setTagShort(int index, short value)
|
||||
public void setTagLong(int index, long value)
|
||||
public void setTagTimestamp(int index, long value)
|
||||
public void setTagFloat(int index, float value)
|
||||
public void setTagDouble(int index, double value)
|
||||
public void setTagString(int index, String value)
|
||||
public void setTagNString(int index, String value)
|
||||
```
|
||||
|
||||
The methods used to set columns are:
|
||||
|
||||
```java
|
||||
public void setInt(int columnIndex, ArrayList<Integer> list) throws SQLException
|
||||
public void setFloat(int columnIndex, ArrayList<Float> list) throws SQLException
|
||||
public void setTimestamp(int columnIndex, ArrayList<Long> list) throws SQLException
|
||||
public void setLong(int columnIndex, ArrayList<Long> list) throws SQLException
|
||||
public void setDouble(int columnIndex, ArrayList<Double> list) throws SQLException
|
||||
public void setBoolean(int columnIndex, ArrayList<Boolean> list) throws SQLException
|
||||
public void setByte(int columnIndex, ArrayList<Byte> list) throws SQLException
|
||||
public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
|
||||
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
```
|
||||
|
||||
**Note**: Both setString and setNString require the user to declare the column width of the corresponding column in the table definition in the size parameter.
|
||||
|
||||
### Data Subscription
|
||||
|
||||
#### Subscribe
|
||||
|
||||
```java
|
||||
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
|
||||
```
|
||||
|
||||
parameters:
|
||||
|
||||
* topic: the unique topic name of the subscription.
|
||||
* sql: a select statement.
|
||||
* restart: true if restart the subscription already exists; false if continue the previous subscription.
|
||||
|
||||
In the example above, a subscription named 'topic' is created which use the SQL statement 'select * from meters'. If the subscription already exists, it will continue with the previous query progress, rather than consuming all the data from scratch.
|
||||
|
||||
#### Consume
|
||||
|
||||
```java
|
||||
int total = 0;
|
||||
while(true) {
|
||||
TSDBResultSet rs = sub.consume();
|
||||
int count = 0;
|
||||
while(rs.next()) {
|
||||
count++;
|
||||
}
|
||||
total += count;
|
||||
System.out.printf("%d rows consumed, total %d\n", count, total);
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
```
|
||||
|
||||
The consume method returns a result set containing all the new data so far since the last consume. Make sure to call consume as often as you need (like Thread.sleep(1000) in the example), otherwise you will put unnecessary stress on the server.
|
||||
|
||||
#### Close
|
||||
|
||||
```java
|
||||
sub.close(true);
|
||||
// release resources
|
||||
resultSet.close();
|
||||
stmt.close();
|
||||
conn.close();
|
||||
```
|
||||
|
||||
The close method closes a subscription. If the parameter is true, the subscription progress information is reserved, and a subscription with the same name can be created later to continue consuming data. If false, the subscription progress is not retained.
|
||||
|
||||
**Note**: the connection must be closed; otherwise, a connection leak may occur.
|
||||
|
||||
## Connection Pool
|
||||
|
||||
### HikariCP example
|
||||
|
||||
```java
|
||||
public static void main(String[] args) throws SQLException {
|
||||
HikariConfig config = new HikariConfig();
|
||||
// jdbc properties
|
||||
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
|
||||
config.setUsername("root");
|
||||
config.setPassword("taosdata");
|
||||
// connection pool configurations
|
||||
config.setMinimumIdle(10); //minimum number of idle connection
|
||||
config.setMaximumPoolSize(10); //maximum number of connection in the pool
|
||||
config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool
|
||||
config.setMaxLifetime(0); // maximum life time for each connection
|
||||
config.setIdleTimeout(0); // max idle time for recycle idle connection
|
||||
config.setConnectionTestQuery("select server_status()"); //validation query
|
||||
HikariDataSource ds = new HikariDataSource(config); //create datasource
|
||||
Connection connection = ds.getConnection(); // get connection
|
||||
Statement statement = connection.createStatement(); // get statement
|
||||
//query or insert
|
||||
// ...
|
||||
connection.close(); // put back to conneciton pool
|
||||
}
|
||||
```
|
||||
|
||||
### Druid example
|
||||
|
||||
```java
|
||||
public static void main(String[] args) throws Exception {
|
||||
DruidDataSource dataSource = new DruidDataSource();
|
||||
// jdbc properties
|
||||
dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
|
||||
dataSource.setUrl(url);
|
||||
dataSource.setUsername("root");
|
||||
dataSource.setPassword("taosdata");
|
||||
// pool configurations
|
||||
dataSource.setInitialSize(10);
|
||||
dataSource.setMinIdle(10);
|
||||
dataSource.setMaxActive(10);
|
||||
dataSource.setMaxWait(30000);
|
||||
dataSource.setValidationQuery("select server_status()");
|
||||
Connection connection = dataSource.getConnection(); // get connection
|
||||
Statement statement = connection.createStatement(); // get statement
|
||||
//query or insert
|
||||
// ...
|
||||
connection.close(); // put back to conneciton pool
|
||||
}
|
||||
```
|
||||
|
||||
**Note**:
|
||||
|
||||
As of TDengine V1.6.4.1, the function select server_status() is supported specifically for heartbeat detection, so it is recommended to use select server_status() for Validation queries when using connection pools.
|
||||
|
||||
Select server_status() returns 1 on success, as shown below.
|
||||
|
||||
```sql
|
||||
taos> select server_status();
|
||||
server_status()|
|
||||
================
|
||||
1 |
|
||||
Query OK, 1 row(s) in set (0.000141s)
|
||||
```
|
||||
|
||||
## Integrated with framework
|
||||
|
||||
- Please refer to [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) if using taos-jdbcdriver in Spring JdbcTemplate.
|
||||
- Please refer to [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) if using taos-jdbcdriver in Spring JdbcTemplate.
|
||||
|
||||
## Example Codes
|
||||
|
||||
you see sample code here: [JDBC example](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC)
|
||||
|
||||
## FAQ
|
||||
|
||||
- java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
||||
|
||||
**Cause**:The application program cannot find Library function *taos*
|
||||
|
||||
**Answer**:Copy `C:\TDengine\driver\taos.dll` to `C:\Windows\System32\` on Windows and make a soft link through `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` on Linux.
|
||||
|
||||
- java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
|
||||
|
||||
**Cause**:Currently TDengine only support 64bit JDK
|
||||
|
||||
**Answer**:re-install 64bit JDK.
|
||||
|
||||
- For other questions, please refer to [Issues](https://github.com/taosdata/TDengine/issues)
|
||||
|
|
@ -194,6 +194,9 @@ keepColumnName 1
|
|||
# maximum number of rows returned by the restful interface
|
||||
# restfulRowLimit 10240
|
||||
|
||||
# database name must be specified in restful interface if the following parameter is set, off by default
|
||||
# httpDbNameMandatory 1
|
||||
|
||||
# The following parameter is used to limit the maximum number of lines in log files.
|
||||
# max number of lines per log filters
|
||||
# numOfLogLines 10000000
|
||||
|
|
|
@ -19,6 +19,7 @@ else
|
|||
fi
|
||||
|
||||
# Dynamic directory
|
||||
|
||||
data_dir="/var/lib/taos"
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
|
@ -29,25 +30,32 @@ fi
|
|||
|
||||
data_link_dir="/usr/local/taos/data"
|
||||
log_link_dir="/usr/local/taos/log"
|
||||
|
||||
cfg_install_dir="/etc/taos"
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
cfg_install_dir="/etc/taos"
|
||||
else
|
||||
cfg_install_dir="/usr/local/Cellar/tdengine/${verNumber}/taos"
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
else
|
||||
bin_link_dir="/usr/local/bin"
|
||||
lib_link_dir="/usr/local/lib"
|
||||
inc_link_dir="/usr/local/include"
|
||||
fi
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/taos"
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
install_main_dir="/usr/local/taos"
|
||||
else
|
||||
install_main_dir="/usr/local/Cellar/tdengine/${verNumber}"
|
||||
fi
|
||||
|
||||
# old bin dir
|
||||
bin_dir="/usr/local/taos/bin"
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
bin_dir="/usr/local/taos/bin"
|
||||
else
|
||||
bin_dir="/usr/local/Cellar/tdengine/${verNumber}/bin"
|
||||
fi
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
|
||||
|
@ -59,12 +67,11 @@ GREEN_UNDERLINE='\033[4;32m'
|
|||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo"
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo"
|
||||
fi
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
|
@ -137,17 +144,16 @@ function install_main_path() {
|
|||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo} rm -f ${bin_link_dir}/taos || :
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo} rm -f ${bin_link_dir}/taos || :
|
||||
${csudo} rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo} rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo} rm -f ${bin_link_dir}/perfMonitor || :
|
||||
${csudo} rm -f ${bin_link_dir}/taosdump || :
|
||||
${csudo} rm -f ${bin_link_dir}/set_core || :
|
||||
${csudo} rm -f ${bin_link_dir}/rmtaos || :
|
||||
fi
|
||||
|
||||
${csudo} rm -f ${bin_link_dir}/rmtaos || :
|
||||
|
||||
|
||||
${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin
|
||||
${csudo} cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin
|
||||
|
||||
|
@ -161,19 +167,17 @@ function install_bin() {
|
|||
${csudo} chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
|
||||
[ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
|
||||
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
|
||||
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
||||
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
|
||||
[ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||
fi
|
||||
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
|
||||
else
|
||||
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
|
||||
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -220,7 +224,7 @@ function install_jemalloc() {
|
|||
fi
|
||||
|
||||
if [ -d /etc/ld.so.conf.d ]; then
|
||||
${csudo} echo "/usr/local/lib" > /etc/ld.so.conf.d/jemalloc.conf
|
||||
echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf
|
||||
${csudo} ldconfig
|
||||
else
|
||||
echo "/etc/ld.so.conf.d not found!"
|
||||
|
@ -246,10 +250,8 @@ function install_lib() {
|
|||
fi
|
||||
else
|
||||
${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
|
||||
${csudo} ln -sf ${install_main_dir}/driver/libtaos.1.dylib ${lib_link_dir}/libtaos.1.dylib
|
||||
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
|
||||
fi
|
||||
|
||||
|
||||
install_jemalloc
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
|
@ -259,10 +261,14 @@ function install_lib() {
|
|||
|
||||
function install_header() {
|
||||
|
||||
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
|
||||
fi
|
||||
${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
|
||||
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
fi
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
|
@ -270,29 +276,30 @@ function install_config() {
|
|||
|
||||
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
|
||||
${csudo} mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/../cfg/taos.cfg ] && ${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir}
|
||||
[ -f ${script_dir}/../cfg/taos.cfg ] &&
|
||||
${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir}
|
||||
${csudo} chmod 644 ${cfg_install_dir}/*
|
||||
fi
|
||||
|
||||
${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
|
||||
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
|
||||
fi
|
||||
}
|
||||
|
||||
function install_log() {
|
||||
${csudo} rm -rf ${log_dir} || :
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
|
||||
else
|
||||
mkdir -p ${log_dir} && chmod 777 ${log_dir}
|
||||
${csudo} rm -rf ${log_dir} || :
|
||||
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
|
||||
${csudo} ln -s ${log_dir} ${install_main_dir}/log
|
||||
fi
|
||||
|
||||
${csudo} ln -s ${log_dir} ${install_main_dir}/log
|
||||
}
|
||||
|
||||
function install_data() {
|
||||
${csudo} mkdir -p ${data_dir}
|
||||
${csudo} ln -s ${data_dir} ${install_main_dir}/data
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo} mkdir -p ${data_dir}
|
||||
${csudo} ln -s ${data_dir} ${install_main_dir}/data
|
||||
fi
|
||||
}
|
||||
|
||||
function install_connector() {
|
||||
|
@ -307,7 +314,6 @@ function install_connector() {
|
|||
echo "WARNING: go connector not found, please check if want to use it!"
|
||||
fi
|
||||
${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector
|
||||
|
||||
${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null
|
||||
}
|
||||
|
||||
|
@ -487,24 +493,24 @@ function install_TDengine() {
|
|||
else
|
||||
echo -e "${GREEN}Start to install TDEngine Client ...${NC}"
|
||||
fi
|
||||
|
||||
|
||||
install_main_path
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
install_data
|
||||
fi
|
||||
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
install_connector
|
||||
install_examples
|
||||
|
||||
install_bin
|
||||
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
install_service
|
||||
fi
|
||||
|
||||
|
||||
install_config
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
name: tdengine
|
||||
base: core18
|
||||
version: '2.1.6.0'
|
||||
version: '2.1.7.2'
|
||||
icon: snap/gui/t-dengine.svg
|
||||
summary: an open-source big data platform designed and optimized for IoT.
|
||||
description: |
|
||||
|
@ -72,7 +72,7 @@ parts:
|
|||
- usr/bin/taosd
|
||||
- usr/bin/taos
|
||||
- usr/bin/taosdemo
|
||||
- usr/lib/libtaos.so.2.1.6.0
|
||||
- usr/lib/libtaos.so.2.1.7.2
|
||||
- usr/lib/libtaos.so.1
|
||||
- usr/lib/libtaos.so
|
||||
|
||||
|
|
|
@ -116,8 +116,17 @@ void bnCleanupDnodes() {
|
|||
|
||||
static void bnCheckDnodesSize(int32_t dnodesNum) {
|
||||
if (tsBnDnodes.maxSize <= dnodesNum) {
|
||||
tsBnDnodes.maxSize = dnodesNum * 2;
|
||||
tsBnDnodes.list = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *));
|
||||
int32_t maxSize = dnodesNum * 2;
|
||||
SDnodeObj** list1 = NULL;
|
||||
int32_t retry = 0;
|
||||
|
||||
while(list1 == NULL && retry++ < 3) {
|
||||
list1 = realloc(tsBnDnodes.list, maxSize * sizeof(SDnodeObj *));
|
||||
}
|
||||
if(list1) {
|
||||
tsBnDnodes.list = list1;
|
||||
tsBnDnodes.maxSize = maxSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,8 @@ PROJECT(TDengine)
|
|||
INCLUDE_DIRECTORIES(inc)
|
||||
INCLUDE_DIRECTORIES(jni)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/plugins/http/inc)
|
||||
AUX_SOURCE_DIRECTORY(src SRC)
|
||||
|
||||
IF (TD_LINUX)
|
||||
|
|
|
@ -50,6 +50,12 @@ void tscUnlockByThread(int64_t *lockedBy);
|
|||
|
||||
int tsInsertInitialCheck(SSqlObj *pSql);
|
||||
|
||||
void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs);
|
||||
|
||||
void tscFreeRetrieveSup(SSqlObj *pSql);
|
||||
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -36,7 +36,7 @@ extern "C" {
|
|||
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE))
|
||||
|
||||
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \
|
||||
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo) || UTIL_TABLE_IS_TMP_TABLE(metaInfo)))
|
||||
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
|
||||
|
||||
#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \
|
||||
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE))
|
||||
|
@ -144,6 +144,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
|
|||
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
|
||||
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
|
||||
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
|
||||
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo);
|
||||
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
|
||||
bool hasTagValOutput(SQueryInfo* pQueryInfo);
|
||||
bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo);
|
||||
|
@ -190,6 +191,7 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo);
|
|||
void tscFieldInfoCopy(SFieldInfo* pFieldInfo, const SFieldInfo* pSrc, const SArray* pExprList);
|
||||
|
||||
static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQueryInfo->fieldsInfo.numOfOutput; }
|
||||
int32_t tscGetFirstInvisibleFieldPos(SQueryInfo* pQueryInfo);
|
||||
|
||||
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize);
|
||||
void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, uint64_t uid);
|
||||
|
@ -214,6 +216,7 @@ SExprInfo* tscExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t function
|
|||
int16_t size);
|
||||
|
||||
size_t tscNumOfExprs(SQueryInfo* pQueryInfo);
|
||||
int32_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo);
|
||||
SExprInfo *tscExprGet(SQueryInfo* pQueryInfo, int32_t index);
|
||||
int32_t tscExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy);
|
||||
int32_t tscExprCopyAll(SArray* dst, const SArray* src, bool deepcopy);
|
||||
|
@ -362,6 +365,8 @@ STblCond* tsGetTableFilter(SArray* filters, uint64_t uid, int16_t idx);
|
|||
|
||||
void tscRemoveCachedTableMeta(STableMetaInfo* pTableMetaInfo, uint64_t id);
|
||||
|
||||
char* cloneCurrentDBName(SSqlObj* pSql);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -38,6 +38,11 @@ extern "C" {
|
|||
#include "qUtil.h"
|
||||
#include "tcmdtype.h"
|
||||
|
||||
typedef enum {
|
||||
TAOS_REQ_FROM_SHELL,
|
||||
TAOS_REQ_FROM_HTTP
|
||||
} SReqOrigin;
|
||||
|
||||
// forward declaration
|
||||
struct SSqlInfo;
|
||||
|
||||
|
@ -123,7 +128,7 @@ typedef struct {
|
|||
int32_t kvLen; // len of SKVRow
|
||||
} SMemRowInfo;
|
||||
typedef struct {
|
||||
uint8_t memRowType; // default is 0, that is SDataRow
|
||||
uint8_t memRowType; // default is 0, that is SDataRow
|
||||
uint8_t compareStat; // 0 no need, 1 need compare
|
||||
TDRowTLenT kvRowInitLen;
|
||||
SMemRowInfo *rowInfo;
|
||||
|
@ -340,6 +345,7 @@ typedef struct STscObj {
|
|||
SRpcCorEpSet *tscCorMgmtEpSet;
|
||||
pthread_mutex_t mutex;
|
||||
int32_t numOfObj; // number of sqlObj from this tscObj
|
||||
SReqOrigin from;
|
||||
} STscObj;
|
||||
|
||||
typedef struct SSubqueryState {
|
||||
|
@ -486,6 +492,7 @@ bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
|
|||
void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols);
|
||||
|
||||
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
|
||||
int32_t tscErrorMsgWithCode(int32_t code, char* dstBuffer, const char* errMsg, const char* sql);
|
||||
|
||||
int32_t tscInvalidOperationMsg(char *msg, const char *additionalInfo, const char *sql);
|
||||
int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* sql);
|
||||
|
|
|
@ -363,15 +363,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
}
|
||||
|
||||
if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { // stmt insert
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
code = tscGetTableMeta(pSql, pTableMetaInfo);
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
taosReleaseRef(tscObjRef, pSql->self);
|
||||
return;
|
||||
} else {
|
||||
assert(code == TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
(*pSql->fp)(pSql->param, pSql, code);
|
||||
} else if (TSDB_QUERY_HAS_TYPE(pCmd->insertParam.insertType, TSDB_QUERY_TYPE_FILE_INSERT)) { // file insert
|
||||
tscImportDataFromFile(pSql);
|
||||
|
|
|
@ -35,6 +35,7 @@ typedef struct SCompareParam {
|
|||
|
||||
static bool needToMerge(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) {
|
||||
int32_t ret = 0;
|
||||
|
||||
size_t size = taosArrayGetSize(columnIndexList);
|
||||
if (size > 0) {
|
||||
ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC);
|
||||
|
@ -564,9 +565,11 @@ static void savePrevOrderColumns(char** prevRow, SArray* pColumnList, SSDataBloc
|
|||
(*hasPrev) = true;
|
||||
}
|
||||
|
||||
// tsdb_func_tag function only produce one row of result. Therefore, we need to copy the
|
||||
// output value to multiple rows
|
||||
static void setTagValueForMultipleRows(SQLFunctionCtx* pCtx, int32_t numOfOutput, int32_t numOfRows) {
|
||||
if (numOfRows <= 1) {
|
||||
return ;
|
||||
return;
|
||||
}
|
||||
|
||||
for (int32_t k = 0; k < numOfOutput; ++k) {
|
||||
|
@ -574,12 +577,49 @@ static void setTagValueForMultipleRows(SQLFunctionCtx* pCtx, int32_t numOfOutput
|
|||
continue;
|
||||
}
|
||||
|
||||
int32_t inc = numOfRows - 1; // tsdb_func_tag function only produce one row of result
|
||||
char* src = pCtx[k].pOutput;
|
||||
char* src = pCtx[k].pOutput;
|
||||
char* dst = pCtx[k].pOutput + pCtx[k].outputBytes;
|
||||
|
||||
for (int32_t i = 0; i < inc; ++i) {
|
||||
pCtx[k].pOutput += pCtx[k].outputBytes;
|
||||
memcpy(pCtx[k].pOutput, src, (size_t)pCtx[k].outputBytes);
|
||||
// Let's start from the second row, as the first row has result value already.
|
||||
for (int32_t i = 1; i < numOfRows; ++i) {
|
||||
memcpy(dst, src, (size_t)pCtx[k].outputBytes);
|
||||
dst += pCtx[k].outputBytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void doMergeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr, int32_t rowIndex, char** pDataPtr) {
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) {
|
||||
pCtx[j].pInput = pDataPtr[j] + pCtx[j].inputBytes * rowIndex;
|
||||
}
|
||||
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) {
|
||||
int32_t functionId = pCtx[j].functionId;
|
||||
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (functionId < 0) {
|
||||
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
|
||||
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
|
||||
} else {
|
||||
aAggs[functionId].mergeFunc(&pCtx[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void doFinalizeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr) {
|
||||
for(int32_t j = 0; j < numOfExpr; ++j) {
|
||||
int32_t functionId = pCtx[j].functionId;
|
||||
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (functionId < 0) {
|
||||
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
|
||||
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
|
||||
} else {
|
||||
aAggs[functionId].xFinalize(&pCtx[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -588,52 +628,18 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
|
|||
SMultiwayMergeInfo* pInfo = pOperator->info;
|
||||
SQLFunctionCtx* pCtx = pInfo->binfo.pCtx;
|
||||
|
||||
char** add = calloc(pBlock->info.numOfCols, POINTER_BYTES);
|
||||
char** addrPtr = calloc(pBlock->info.numOfCols, POINTER_BYTES);
|
||||
for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
|
||||
add[i] = pCtx[i].pInput;
|
||||
addrPtr[i] = pCtx[i].pInput;
|
||||
pCtx[i].size = 1;
|
||||
}
|
||||
|
||||
for(int32_t i = 0; i < pBlock->info.rows; ++i) {
|
||||
if (pInfo->hasPrev) {
|
||||
if (needToMerge(pBlock, pInfo->orderColumnList, i, pInfo->prevRow)) {
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) {
|
||||
pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i;
|
||||
}
|
||||
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) {
|
||||
int32_t functionId = pCtx[j].functionId;
|
||||
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (functionId < 0) {
|
||||
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
|
||||
|
||||
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
aAggs[functionId].mergeFunc(&pCtx[j]);
|
||||
}
|
||||
doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr);
|
||||
} else {
|
||||
for(int32_t j = 0; j < numOfExpr; ++j) { // TODO refactor
|
||||
int32_t functionId = pCtx[j].functionId;
|
||||
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (functionId < 0) {
|
||||
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
|
||||
|
||||
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
aAggs[functionId].xFinalize(&pCtx[j]);
|
||||
}
|
||||
doFinalizeResultImpl(pInfo, pCtx, numOfExpr);
|
||||
|
||||
int32_t numOfRows = getNumOfResult(pOperator->pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput);
|
||||
setTagValueForMultipleRows(pCtx, pOperator->numOfOutput, numOfRows);
|
||||
|
@ -643,7 +649,7 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
|
|||
for(int32_t j = 0; j < numOfExpr; ++j) {
|
||||
pCtx[j].pOutput += (pCtx[j].outputBytes * numOfRows);
|
||||
if (pCtx[j].functionId == TSDB_FUNC_TOP || pCtx[j].functionId == TSDB_FUNC_BOTTOM) {
|
||||
pCtx[j].ptsOutputBuf = pCtx[0].pOutput;
|
||||
if(j > 0) pCtx[j].ptsOutputBuf = pCtx[j - 1].pOutput;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -655,48 +661,10 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
|
|||
aAggs[pCtx[j].functionId].init(&pCtx[j], pCtx[j].resultInfo);
|
||||
}
|
||||
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) {
|
||||
pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i;
|
||||
}
|
||||
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) {
|
||||
int32_t functionId = pCtx[j].functionId;
|
||||
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (functionId < 0) {
|
||||
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
|
||||
|
||||
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
aAggs[functionId].mergeFunc(&pCtx[j]);
|
||||
}
|
||||
doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr);
|
||||
}
|
||||
} else {
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) {
|
||||
pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i;
|
||||
}
|
||||
|
||||
for (int32_t j = 0; j < numOfExpr; ++j) {
|
||||
int32_t functionId = pCtx[j].functionId;
|
||||
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (functionId < 0) {
|
||||
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
|
||||
|
||||
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
aAggs[functionId].mergeFunc(&pCtx[j]);
|
||||
}
|
||||
doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr);
|
||||
}
|
||||
|
||||
savePrevOrderColumns(pInfo->prevRow, pInfo->orderColumnList, pBlock, i, &pInfo->hasPrev);
|
||||
|
@ -704,11 +672,11 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
|
|||
|
||||
{
|
||||
for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
|
||||
pCtx[i].pInput = add[i];
|
||||
pCtx[i].pInput = addrPtr[i];
|
||||
}
|
||||
}
|
||||
|
||||
tfree(add);
|
||||
tfree(addrPtr);
|
||||
}
|
||||
|
||||
static bool isAllSourcesCompleted(SGlobalMerger *pMerger) {
|
||||
|
@ -816,6 +784,8 @@ SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup) {
|
|||
SLocalDataSource *pOneDataSrc = pMerger->pLocalDataSrc[pTree->pNode[0].index];
|
||||
bool sameGroup = true;
|
||||
if (pInfo->hasPrev) {
|
||||
|
||||
// todo refactor extract method
|
||||
int32_t numOfCols = (int32_t)taosArrayGetSize(pInfo->orderColumnList);
|
||||
|
||||
// if this row belongs to current result set group
|
||||
|
@ -955,9 +925,10 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
|
|||
break;
|
||||
}
|
||||
|
||||
bool sameGroup = true;
|
||||
if (pAggInfo->hasGroupColData) {
|
||||
bool sameGroup = isSameGroup(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData);
|
||||
if (!sameGroup) {
|
||||
sameGroup = isSameGroup(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData);
|
||||
if (!sameGroup && !pAggInfo->multiGroupResults) {
|
||||
*newgroup = true;
|
||||
pAggInfo->hasDataBlockForNewGroup = true;
|
||||
pAggInfo->pExistBlock = pBlock;
|
||||
|
@ -976,26 +947,10 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
|
|||
}
|
||||
|
||||
if (handleData) { // data in current group is all handled
|
||||
for(int32_t j = 0; j < pOperator->numOfOutput; ++j) {
|
||||
int32_t functionId = pAggInfo->binfo.pCtx[j].functionId;
|
||||
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (functionId < 0) {
|
||||
SUdfInfo* pUdfInfo = taosArrayGet(pAggInfo->udfInfo, -1 * functionId - 1);
|
||||
|
||||
doInvokeUdf(pUdfInfo, &pAggInfo->binfo.pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
aAggs[functionId].xFinalize(&pAggInfo->binfo.pCtx[j]);
|
||||
}
|
||||
|
||||
doFinalizeResultImpl(pAggInfo, pAggInfo->binfo.pCtx, pOperator->numOfOutput);
|
||||
int32_t numOfRows = getNumOfResult(pOperator->pRuntimeEnv, pAggInfo->binfo.pCtx, pOperator->numOfOutput);
|
||||
pAggInfo->binfo.pRes->info.rows += numOfRows;
|
||||
|
||||
pAggInfo->binfo.pRes->info.rows += numOfRows;
|
||||
setTagValueForMultipleRows(pAggInfo->binfo.pCtx, pOperator->numOfOutput, numOfRows);
|
||||
}
|
||||
|
||||
|
@ -1019,71 +974,127 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
|
|||
return (pRes->info.rows != 0)? pRes:NULL;
|
||||
}
|
||||
|
||||
static SSDataBlock* skipGroupBlock(SOperatorInfo* pOperator, bool* newgroup) {
|
||||
SSLimitOperatorInfo *pInfo = pOperator->info;
|
||||
assert(pInfo->currentGroupOffset >= 0);
|
||||
static void doHandleDataInCurrentGroup(SSLimitOperatorInfo* pInfo, SSDataBlock* pBlock, int32_t rowIndex) {
|
||||
if (pInfo->currentOffset > 0) {
|
||||
pInfo->currentOffset -= 1;
|
||||
} else {
|
||||
// discard the data rows in current group
|
||||
if (pInfo->limit.limit < 0 || (pInfo->limit.limit >= 0 && pInfo->rowsTotal < pInfo->limit.limit)) {
|
||||
size_t num1 = taosArrayGetSize(pInfo->pRes->pDataBlock);
|
||||
for (int32_t i = 0; i < num1; ++i) {
|
||||
SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
SColumnInfoData *pDstInfoData = taosArrayGet(pInfo->pRes->pDataBlock, i);
|
||||
|
||||
SSDataBlock* pBlock = NULL;
|
||||
if (pInfo->currentGroupOffset == 0) {
|
||||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
|
||||
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
|
||||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
|
||||
if (pBlock == NULL) {
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
SColumnInfo *pColInfo = &pColInfoData->info;
|
||||
|
||||
char *pSrc = rowIndex * pColInfo->bytes + (char *)pColInfoData->pData;
|
||||
char *pDst = (char *)pDstInfoData->pData + (pInfo->pRes->info.rows * pColInfo->bytes);
|
||||
|
||||
memcpy(pDst, pSrc, pColInfo->bytes);
|
||||
}
|
||||
|
||||
pInfo->rowsTotal += 1;
|
||||
pInfo->pRes->info.rows += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (*newgroup == false && pInfo->limit.limit > 0 && pInfo->rowsTotal >= pInfo->limit.limit) {
|
||||
while ((*newgroup) == false) { // ignore the remain blocks
|
||||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
|
||||
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
|
||||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
|
||||
if (pBlock == NULL) {
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
return NULL;
|
||||
static void ensureOutputBuf(SSLimitOperatorInfo * pInfo, SSDataBlock *pResultBlock, int32_t numOfRows) {
|
||||
if (pInfo->capacity < pResultBlock->info.rows + numOfRows) {
|
||||
int32_t total = pResultBlock->info.rows + numOfRows;
|
||||
|
||||
size_t num = taosArrayGetSize(pResultBlock->pDataBlock);
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
SColumnInfoData *pInfoData = taosArrayGet(pResultBlock->pDataBlock, i);
|
||||
|
||||
char *tmp = realloc(pInfoData->pData, total * pInfoData->info.bytes);
|
||||
if (tmp != NULL) {
|
||||
pInfoData->pData = tmp;
|
||||
} else {
|
||||
// todo handle the malloc failure
|
||||
}
|
||||
|
||||
pInfo->capacity = total;
|
||||
pInfo->threshold = (int64_t)(total * 0.8);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum {
|
||||
BLOCK_NEW_GROUP = 1,
|
||||
BLOCK_NO_GROUP = 2,
|
||||
BLOCK_SAME_GROUP = 3,
|
||||
};
|
||||
|
||||
static int32_t doSlimitImpl(SOperatorInfo* pOperator, SSLimitOperatorInfo* pInfo, SSDataBlock* pBlock) {
|
||||
int32_t rowIndex = 0;
|
||||
|
||||
while (rowIndex < pBlock->info.rows) {
|
||||
int32_t numOfCols = (int32_t)taosArrayGetSize(pInfo->orderColumnList);
|
||||
|
||||
bool samegroup = true;
|
||||
if (pInfo->hasPrev) {
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColIndex *pIndex = taosArrayGet(pInfo->orderColumnList, i);
|
||||
SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, pIndex->colIndex);
|
||||
|
||||
SColumnInfo *pColInfo = &pColInfoData->info;
|
||||
|
||||
char *d = rowIndex * pColInfo->bytes + (char *)pColInfoData->pData;
|
||||
int32_t ret = columnValueAscendingComparator(pInfo->prevRow[i], d, pColInfo->type, pColInfo->bytes);
|
||||
if (ret != 0) { // it is a new group
|
||||
samegroup = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return pBlock;
|
||||
if (!samegroup || !pInfo->hasPrev) {
|
||||
pInfo->ignoreCurrentGroup = false;
|
||||
savePrevOrderColumns(pInfo->prevRow, pInfo->orderColumnList, pBlock, rowIndex, &pInfo->hasPrev);
|
||||
|
||||
pInfo->currentOffset = pInfo->limit.offset; // reset the offset value for a new group
|
||||
pInfo->rowsTotal = 0;
|
||||
|
||||
if (pInfo->currentGroupOffset > 0) {
|
||||
pInfo->ignoreCurrentGroup = true;
|
||||
pInfo->currentGroupOffset -= 1; // now we are in the next group data
|
||||
rowIndex += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// A new group has arrived according to the result rows, and the group limitation has already reached.
|
||||
// Let's jump out of current loop and return immediately.
|
||||
if (pInfo->slimit.limit >= 0 && pInfo->groupTotal >= pInfo->slimit.limit) {
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
return BLOCK_NO_GROUP;
|
||||
}
|
||||
|
||||
pInfo->groupTotal += 1;
|
||||
|
||||
// data in current group not allowed, return if current result does not belong to the previous group.And there
|
||||
// are results exists in current SSDataBlock
|
||||
if (!pInfo->multigroupResult && !samegroup && pInfo->pRes->info.rows > 0) {
|
||||
return BLOCK_NEW_GROUP;
|
||||
}
|
||||
|
||||
doHandleDataInCurrentGroup(pInfo, pBlock, rowIndex);
|
||||
|
||||
} else { // handle the offset in the same group
|
||||
// All the data in current group needs to be discarded, due to the limit parameter in the SQL statement
|
||||
if (pInfo->ignoreCurrentGroup) {
|
||||
rowIndex += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
doHandleDataInCurrentGroup(pInfo, pBlock, rowIndex);
|
||||
}
|
||||
|
||||
rowIndex += 1;
|
||||
}
|
||||
|
||||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
|
||||
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
|
||||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
|
||||
|
||||
if (pBlock == NULL) {
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
while(1) {
|
||||
if (*newgroup) {
|
||||
pInfo->currentGroupOffset -= 1;
|
||||
*newgroup = false;
|
||||
}
|
||||
|
||||
while ((*newgroup) == false) {
|
||||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
|
||||
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
|
||||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
|
||||
|
||||
if (pBlock == NULL) {
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// now we have got the first data block of the next group.
|
||||
if (pInfo->currentGroupOffset == 0) {
|
||||
return pBlock;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return BLOCK_SAME_GROUP;
|
||||
}
|
||||
|
||||
SSDataBlock* doSLimit(void* param, bool* newgroup) {
|
||||
|
@ -1093,63 +1104,41 @@ SSDataBlock* doSLimit(void* param, bool* newgroup) {
|
|||
}
|
||||
|
||||
SSLimitOperatorInfo *pInfo = pOperator->info;
|
||||
pInfo->pRes->info.rows = 0;
|
||||
|
||||
if (pInfo->pPrevBlock != NULL) {
|
||||
ensureOutputBuf(pInfo, pInfo->pRes, pInfo->pPrevBlock->info.rows);
|
||||
int32_t ret = doSlimitImpl(pOperator, pInfo, pInfo->pPrevBlock);
|
||||
assert(ret != BLOCK_NEW_GROUP);
|
||||
|
||||
pInfo->pPrevBlock = NULL;
|
||||
}
|
||||
|
||||
assert(pInfo->currentGroupOffset >= 0);
|
||||
|
||||
while(1) {
|
||||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
|
||||
SSDataBlock *pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
|
||||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
|
||||
|
||||
SSDataBlock *pBlock = NULL;
|
||||
while (1) {
|
||||
pBlock = skipGroupBlock(pOperator, newgroup);
|
||||
if (pBlock == NULL) {
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
return NULL;
|
||||
return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
|
||||
}
|
||||
|
||||
if (*newgroup) { // a new group arrives
|
||||
pInfo->groupTotal += 1;
|
||||
pInfo->rowsTotal = 0;
|
||||
pInfo->currentOffset = pInfo->limit.offset;
|
||||
ensureOutputBuf(pInfo, pInfo->pRes, pBlock->info.rows);
|
||||
int32_t ret = doSlimitImpl(pOperator, pInfo, pBlock);
|
||||
if (ret == BLOCK_NEW_GROUP) {
|
||||
pInfo->pPrevBlock = pBlock;
|
||||
return pInfo->pRes;
|
||||
}
|
||||
|
||||
assert(pInfo->currentGroupOffset == 0);
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
|
||||
}
|
||||
|
||||
if (pInfo->currentOffset >= pBlock->info.rows) {
|
||||
pInfo->currentOffset -= pBlock->info.rows;
|
||||
} else {
|
||||
if (pInfo->currentOffset == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
int32_t remain = (int32_t)(pBlock->info.rows - pInfo->currentOffset);
|
||||
pBlock->info.rows = remain;
|
||||
|
||||
// move the remain rows of this data block to the front.
|
||||
for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
|
||||
SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
|
||||
int16_t bytes = pColInfoData->info.bytes;
|
||||
memmove(pColInfoData->pData, pColInfoData->pData + bytes * pInfo->currentOffset, remain * bytes);
|
||||
}
|
||||
|
||||
pInfo->currentOffset = 0;
|
||||
break;
|
||||
// now the number of rows in current group is enough, let's return to the invoke function
|
||||
if (pInfo->pRes->info.rows > pInfo->threshold) {
|
||||
return pInfo->pRes;
|
||||
}
|
||||
}
|
||||
|
||||
if (pInfo->slimit.limit > 0 && pInfo->groupTotal > pInfo->slimit.limit) { // reach the group limit, abort
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (pInfo->limit.limit > 0 && (pInfo->rowsTotal + pBlock->info.rows >= pInfo->limit.limit)) {
|
||||
pBlock->info.rows = (int32_t)(pInfo->limit.limit - pInfo->rowsTotal);
|
||||
pInfo->rowsTotal = pInfo->limit.limit;
|
||||
|
||||
if (pInfo->slimit.limit > 0 && pInfo->groupTotal >= pInfo->slimit.limit) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
}
|
||||
|
||||
// setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
} else {
|
||||
pInfo->rowsTotal += pBlock->info.rows;
|
||||
}
|
||||
|
||||
return pBlock;
|
||||
}
|
||||
|
|
|
@ -1693,7 +1693,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
|
|||
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
|
||||
|
||||
SInsertStatementParam* pInsertParam = &pCmd->insertParam;
|
||||
SInsertStatementParam *pInsertParam = &pCmd->insertParam;
|
||||
destroyTableNameList(pInsertParam);
|
||||
|
||||
pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks);
|
||||
|
@ -1777,6 +1777,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
|
|||
}
|
||||
|
||||
_error:
|
||||
pParentSql->res.code = code;
|
||||
tfree(tokenBuf);
|
||||
tfree(line);
|
||||
taos_free_result(pSql);
|
||||
|
|
|
@ -32,9 +32,6 @@ typedef struct {
|
|||
uint8_t type;
|
||||
int16_t length;
|
||||
char* value;
|
||||
|
||||
//===================================
|
||||
uint32_t fieldSchemaIdx;
|
||||
} TAOS_SML_KV;
|
||||
|
||||
typedef struct {
|
||||
|
@ -47,9 +44,6 @@ typedef struct {
|
|||
// first kv must be timestamp
|
||||
TAOS_SML_KV* fields;
|
||||
int32_t fieldNum;
|
||||
|
||||
//================================
|
||||
uint32_t schemaIdx;
|
||||
} TAOS_SML_DATA_POINT;
|
||||
|
||||
typedef enum {
|
||||
|
@ -62,10 +56,23 @@ typedef enum {
|
|||
|
||||
typedef struct {
|
||||
uint64_t id;
|
||||
|
||||
SHashObj* smlDataToSchema;
|
||||
} SSmlLinesInfo;
|
||||
|
||||
//=================================================================================================
|
||||
|
||||
static uint64_t linesSmlHandleId = 0;
|
||||
|
||||
uint64_t genLinesSmlId() {
|
||||
uint64_t id;
|
||||
|
||||
do {
|
||||
id = atomic_add_fetch_64(&linesSmlHandleId, 1);
|
||||
} while (id == 0);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
int compareSmlColKv(const void* p1, const void* p2) {
|
||||
TAOS_SML_KV* kv1 = (TAOS_SML_KV*)p1;
|
||||
TAOS_SML_KV* kv2 = (TAOS_SML_KV*)p2;
|
||||
|
@ -168,11 +175,46 @@ static int32_t buildSmlKvSchema(TAOS_SML_KV* smlKv, SHashObj* hash, SArray* arra
|
|||
taosHashPut(hash, field.name, tagKeyLen, &fieldIdx, sizeof(fieldIdx));
|
||||
}
|
||||
|
||||
smlKv->fieldSchemaIdx = (uint32_t)fieldIdx;
|
||||
uintptr_t valPointer = (uintptr_t)smlKv;
|
||||
taosHashPut(info->smlDataToSchema, &valPointer, sizeof(uintptr_t), &fieldIdx, sizeof(fieldIdx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableName, int* tableNameLen,
|
||||
SSmlLinesInfo* info) {
|
||||
tscDebug("SML:0x%"PRIx64" taos_sml_insert get child table name through md5", info->id);
|
||||
qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv);
|
||||
|
||||
SStringBuilder sb; memset(&sb, 0, sizeof(sb));
|
||||
char sTableName[TSDB_TABLE_NAME_LEN] = {0};
|
||||
strtolower(sTableName, point->stableName);
|
||||
taosStringBuilderAppendString(&sb, sTableName);
|
||||
for (int j = 0; j < point->tagNum; ++j) {
|
||||
taosStringBuilderAppendChar(&sb, ',');
|
||||
TAOS_SML_KV* tagKv = point->tags + j;
|
||||
char tagName[TSDB_COL_NAME_LEN] = {0};
|
||||
strtolower(tagName, tagKv->key);
|
||||
taosStringBuilderAppendString(&sb, tagName);
|
||||
taosStringBuilderAppendChar(&sb, '=');
|
||||
taosStringBuilderAppend(&sb, tagKv->value, tagKv->length);
|
||||
}
|
||||
size_t len = 0;
|
||||
char* keyJoined = taosStringBuilderGetResult(&sb, &len);
|
||||
MD5_CTX context;
|
||||
MD5Init(&context);
|
||||
MD5Update(&context, (uint8_t *)keyJoined, (uint32_t)len);
|
||||
MD5Final(&context);
|
||||
*tableNameLen = snprintf(tableName, *tableNameLen,
|
||||
"t_%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", context.digest[0],
|
||||
context.digest[1], context.digest[2], context.digest[3], context.digest[4], context.digest[5], context.digest[6],
|
||||
context.digest[7], context.digest[8], context.digest[9], context.digest[10], context.digest[11],
|
||||
context.digest[12], context.digest[13], context.digest[14], context.digest[15]);
|
||||
taosStringBuilderDestroy(&sb);
|
||||
tscDebug("SML:0x%"PRIx64" child table name: %s", info->id, tableName);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint, SArray* stableSchemas, SSmlLinesInfo* info) {
|
||||
int32_t code = 0;
|
||||
SHashObj* sname2shema = taosHashInit(32,
|
||||
|
@ -203,6 +245,15 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint,
|
|||
|
||||
for (int j = 0; j < point->tagNum; ++j) {
|
||||
TAOS_SML_KV* tagKv = point->tags + j;
|
||||
if (!point->childTableName) {
|
||||
char childTableName[TSDB_TABLE_NAME_LEN];
|
||||
int32_t tableNameLen = TSDB_TABLE_NAME_LEN;
|
||||
getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info);
|
||||
point->childTableName = calloc(1, tableNameLen+1);
|
||||
strncpy(point->childTableName, childTableName, tableNameLen);
|
||||
point->childTableName[tableNameLen] = '\0';
|
||||
}
|
||||
|
||||
code = buildSmlKvSchema(tagKv, pStableSchema->tagHash, pStableSchema->tags, info);
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64" build data point schema failed. point no.: %d, tag key: %s", info->id, i, tagKv->key);
|
||||
|
@ -219,7 +270,8 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint,
|
|||
}
|
||||
}
|
||||
|
||||
point->schemaIdx = (uint32_t)stableIdx;
|
||||
uintptr_t valPointer = (uintptr_t)point;
|
||||
taosHashPut(info->smlDataToSchema, &valPointer, sizeof(uintptr_t), &stableIdx, sizeof(stableIdx));
|
||||
}
|
||||
|
||||
size_t numStables = taosArrayGetSize(stableSchemas);
|
||||
|
@ -319,7 +371,22 @@ static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInf
|
|||
buildColumnDescription(action->alterSTable.field, result+n, capacity-n, &outBytes);
|
||||
TAOS_RES* res = taos_query(taos, result); //TODO async doAsyncQuery
|
||||
code = taos_errno(res);
|
||||
char* errStr = taos_errstr(res);
|
||||
char* begin = strstr(errStr, "duplicated column names");
|
||||
bool tscDupColNames = (begin != NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("SML:0x%"PRIx64" apply schema action. error: %s", info->id, errStr);
|
||||
}
|
||||
taos_free_result(res);
|
||||
|
||||
if (code == TSDB_CODE_MND_FIELD_ALREAY_EXIST || code == TSDB_CODE_MND_TAG_ALREAY_EXIST || tscDupColNames) {
|
||||
TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
|
||||
code = taos_errno(res2);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
|
||||
}
|
||||
taos_free_result(res2);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case SCHEMA_ACTION_ADD_TAG: {
|
||||
|
@ -328,7 +395,22 @@ static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInf
|
|||
result+n, capacity-n, &outBytes);
|
||||
TAOS_RES* res = taos_query(taos, result); //TODO async doAsyncQuery
|
||||
code = taos_errno(res);
|
||||
char* errStr = taos_errstr(res);
|
||||
char* begin = strstr(errStr, "duplicated column names");
|
||||
bool tscDupColNames = (begin != NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
|
||||
}
|
||||
taos_free_result(res);
|
||||
|
||||
if (code == TSDB_CODE_MND_TAG_ALREAY_EXIST || code == TSDB_CODE_MND_FIELD_ALREAY_EXIST || tscDupColNames) {
|
||||
TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
|
||||
code = taos_errno(res2);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
|
||||
}
|
||||
taos_free_result(res2);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case SCHEMA_ACTION_CHANGE_COLUMN_SIZE: {
|
||||
|
@ -337,7 +419,19 @@ static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInf
|
|||
capacity-n, &outBytes);
|
||||
TAOS_RES* res = taos_query(taos, result); //TODO async doAsyncQuery
|
||||
code = taos_errno(res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
|
||||
}
|
||||
taos_free_result(res);
|
||||
|
||||
if (code == TSDB_CODE_MND_INVALID_COLUMN_LENGTH || code == TSDB_CODE_TSC_INVALID_COLUMN_LENGTH) {
|
||||
TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
|
||||
code = taos_errno(res2);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
|
||||
}
|
||||
taos_free_result(res2);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case SCHEMA_ACTION_CHANGE_TAG_SIZE: {
|
||||
|
@ -346,7 +440,19 @@ static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInf
|
|||
capacity-n, &outBytes);
|
||||
TAOS_RES* res = taos_query(taos, result); //TODO async doAsyncQuery
|
||||
code = taos_errno(res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
|
||||
}
|
||||
taos_free_result(res);
|
||||
|
||||
if (code == TSDB_CODE_MND_INVALID_TAG_LENGTH || code == TSDB_CODE_TSC_INVALID_TAG_LENGTH) {
|
||||
TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
|
||||
code = taos_errno(res2);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
|
||||
}
|
||||
taos_free_result(res2);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case SCHEMA_ACTION_CREATE_STABLE: {
|
||||
|
@ -375,7 +481,19 @@ static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInf
|
|||
outBytes = snprintf(pos, freeBytes, ")");
|
||||
TAOS_RES* res = taos_query(taos, result);
|
||||
code = taos_errno(res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("SML:0x%"PRIx64" apply schema action. error : %s", info->id, taos_errstr(res));
|
||||
}
|
||||
taos_free_result(res);
|
||||
|
||||
if (code == TSDB_CODE_MND_TABLE_ALREADY_EXIST) {
|
||||
TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
|
||||
code = taos_errno(res2);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("SML:0x%" PRIx64 " apply schema action. reset query cache. error: %s", info->id, taos_errstr(res2));
|
||||
}
|
||||
taos_free_result(res2);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -385,7 +503,7 @@ static int32_t applySchemaAction(TAOS* taos, SSchemaAction* action, SSmlLinesInf
|
|||
|
||||
free(result);
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64 "apply schema action failure. %s", info->id, tstrerror(code));
|
||||
tscError("SML:0x%"PRIx64 " apply schema action failure. %s", info->id, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
@ -398,70 +516,12 @@ static int32_t destroySmlSTableSchema(SSmlSTableSchema* schema) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSmlLinesInfo* info) {
|
||||
int32_t code = 0;
|
||||
|
||||
STscObj *pObj = (STscObj *)taos;
|
||||
if (pObj == NULL || pObj->signature != pObj) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return TSDB_CODE_TSC_DISCONNECTED;
|
||||
}
|
||||
|
||||
tscDebug("SML:0x%"PRIx64" load table schema. super table name: %s", info->id, tableName);
|
||||
|
||||
char tableNameLowerCase[TSDB_TABLE_NAME_LEN];
|
||||
strtolower(tableNameLowerCase, tableName);
|
||||
|
||||
char sql[256];
|
||||
snprintf(sql, 256, "describe %s", tableNameLowerCase);
|
||||
TAOS_RES* res = taos_query(taos, sql);
|
||||
code = taos_errno(res);
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64" describe table failure. %s", info->id, taos_errstr(res));
|
||||
taos_free_result(res);
|
||||
return code;
|
||||
}
|
||||
taos_free_result(res);
|
||||
|
||||
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
|
||||
if (pSql == NULL){
|
||||
tscError("failed to allocate memory, reason:%s", strerror(errno));
|
||||
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return code;
|
||||
}
|
||||
pSql->pTscObj = taos;
|
||||
pSql->signature = pSql;
|
||||
pSql->fp = NULL;
|
||||
|
||||
SStrToken tableToken = {.z=tableNameLowerCase, .n=(uint32_t)strlen(tableNameLowerCase), .type=TK_ID};
|
||||
tGetToken(tableNameLowerCase, &tableToken.type);
|
||||
// Check if the table name available or not
|
||||
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
|
||||
code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
|
||||
sprintf(pSql->cmd.payload, "table name is invalid");
|
||||
tscFreeSqlObj(pSql);
|
||||
return code;
|
||||
}
|
||||
|
||||
SName sname = {0};
|
||||
if ((code = tscSetTableFullName(&sname, &tableToken, pSql)) != TSDB_CODE_SUCCESS) {
|
||||
tscFreeSqlObj(pSql);
|
||||
return code;
|
||||
}
|
||||
char fullTableName[TSDB_TABLE_FNAME_LEN] = {0};
|
||||
memset(fullTableName, 0, tListLen(fullTableName));
|
||||
tNameExtractFullName(&sname, fullTableName);
|
||||
tscFreeSqlObj(pSql);
|
||||
|
||||
static int32_t fillDbSchema(STableMeta* tableMeta, char* tableName, SSmlSTableSchema* schema, SSmlLinesInfo* info) {
|
||||
schema->tags = taosArrayInit(8, sizeof(SSchema));
|
||||
schema->fields = taosArrayInit(64, sizeof(SSchema));
|
||||
schema->tagHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
|
||||
schema->fieldHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, false);
|
||||
|
||||
size_t size = 0;
|
||||
STableMeta* tableMeta = NULL;
|
||||
taosHashGetCloneExt(tscTableMetaMap, fullTableName, strlen(fullTableName), NULL, (void **)&tableMeta, &size);
|
||||
|
||||
tstrncpy(schema->sTableName, tableName, strlen(tableName)+1);
|
||||
schema->precision = tableMeta->tableInfo.precision;
|
||||
for (int i=0; i<tableMeta->tableInfo.numOfColumns; ++i) {
|
||||
|
@ -484,9 +544,93 @@ int32_t loadTableMeta(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSm
|
|||
size_t tagIndex = taosArrayGetSize(schema->tags) - 1;
|
||||
taosHashPut(schema->tagHash, field.name, strlen(field.name), &tagIndex, sizeof(tagIndex));
|
||||
}
|
||||
tscDebug("SML:0x%"PRIx64 " load table meta succeed. table name: %s, columns number: %d, tag number: %d, precision: %d",
|
||||
tscDebug("SML:0x%"PRIx64 " load table schema succeed. table name: %s, columns number: %d, tag number: %d, precision: %d",
|
||||
info->id, tableName, tableMeta->tableInfo.numOfColumns, tableMeta->tableInfo.numOfTags, schema->precision);
|
||||
free(tableMeta); tableMeta = NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t retrieveTableMeta(TAOS* taos, char* tableName, STableMeta** pTableMeta, SSmlLinesInfo* info) {
|
||||
int32_t code = 0;
|
||||
int32_t retries = 0;
|
||||
STableMeta* tableMeta = NULL;
|
||||
while (retries++ < TSDB_MAX_REPLICA && tableMeta == NULL) {
|
||||
STscObj* pObj = (STscObj*)taos;
|
||||
if (pObj == NULL || pObj->signature != pObj) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return TSDB_CODE_TSC_DISCONNECTED;
|
||||
}
|
||||
|
||||
tscDebug("SML:0x%" PRIx64 " retrieve table meta. super table name: %s", info->id, tableName);
|
||||
|
||||
char tableNameLowerCase[TSDB_TABLE_NAME_LEN];
|
||||
strtolower(tableNameLowerCase, tableName);
|
||||
|
||||
char sql[256];
|
||||
snprintf(sql, 256, "describe %s", tableNameLowerCase);
|
||||
TAOS_RES* res = taos_query(taos, sql);
|
||||
code = taos_errno(res);
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%" PRIx64 " describe table failure. %s", info->id, taos_errstr(res));
|
||||
taos_free_result(res);
|
||||
return code;
|
||||
}
|
||||
taos_free_result(res);
|
||||
|
||||
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
|
||||
if (pSql == NULL) {
|
||||
tscError("SML:0x%" PRIx64 " failed to allocate memory, reason:%s", info->id, strerror(errno));
|
||||
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return code;
|
||||
}
|
||||
pSql->pTscObj = taos;
|
||||
pSql->signature = pSql;
|
||||
pSql->fp = NULL;
|
||||
|
||||
registerSqlObj(pSql);
|
||||
SStrToken tableToken = {.z = tableNameLowerCase, .n = (uint32_t)strlen(tableNameLowerCase), .type = TK_ID};
|
||||
tGetToken(tableNameLowerCase, &tableToken.type);
|
||||
// Check if the table name available or not
|
||||
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
|
||||
code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
|
||||
sprintf(pSql->cmd.payload, "table name is invalid");
|
||||
tscFreeRegisteredSqlObj(pSql);
|
||||
return code;
|
||||
}
|
||||
|
||||
SName sname = {0};
|
||||
if ((code = tscSetTableFullName(&sname, &tableToken, pSql)) != TSDB_CODE_SUCCESS) {
|
||||
tscFreeRegisteredSqlObj(pSql);
|
||||
return code;
|
||||
}
|
||||
char fullTableName[TSDB_TABLE_FNAME_LEN] = {0};
|
||||
memset(fullTableName, 0, tListLen(fullTableName));
|
||||
tNameExtractFullName(&sname, fullTableName);
|
||||
tscFreeRegisteredSqlObj(pSql);
|
||||
|
||||
size_t size = 0;
|
||||
taosHashGetCloneExt(tscTableMetaMap, fullTableName, strlen(fullTableName), NULL, (void**)&tableMeta, &size);
|
||||
}
|
||||
|
||||
if (tableMeta != NULL) {
|
||||
*pTableMeta = tableMeta;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
tscError("SML:0x%" PRIx64 " failed to retrieve table meta. super table name: %s", info->id, tableName);
|
||||
return TSDB_CODE_TSC_NO_META_CACHED;
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t loadTableSchemaFromDB(TAOS* taos, char* tableName, SSmlSTableSchema* schema, SSmlLinesInfo* info) {
|
||||
int32_t code = 0;
|
||||
STableMeta* tableMeta = NULL;
|
||||
code = retrieveTableMeta(taos, tableName, &tableMeta, info);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
assert(tableMeta != NULL);
|
||||
fillDbSchema(tableMeta, tableName, schema, info);
|
||||
free(tableMeta);
|
||||
tableMeta = NULL;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -498,7 +642,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
|
|||
SSmlSTableSchema dbSchema;
|
||||
memset(&dbSchema, 0, sizeof(SSmlSTableSchema));
|
||||
|
||||
code = loadTableMeta(taos, pointSchema->sTableName, &dbSchema, info);
|
||||
code = loadTableSchemaFromDB(taos, pointSchema->sTableName, &dbSchema, info);
|
||||
if (code == TSDB_CODE_MND_INVALID_TABLE_NAME) {
|
||||
SSchemaAction schemaAction = {0};
|
||||
schemaAction.action = SCHEMA_ACTION_CREATE_STABLE;
|
||||
|
@ -507,7 +651,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
|
|||
schemaAction.createSTable.tags = pointSchema->tags;
|
||||
schemaAction.createSTable.fields = pointSchema->fields;
|
||||
applySchemaAction(taos, &schemaAction, info);
|
||||
code = loadTableMeta(taos, pointSchema->sTableName, &dbSchema, info);
|
||||
code = loadTableSchemaFromDB(taos, pointSchema->sTableName, &dbSchema, info);
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64" reconcile point schema failed. can not create %s", info->id, pointSchema->sTableName);
|
||||
return code;
|
||||
|
@ -567,74 +711,6 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableName, int* tableNameLen,
|
||||
SSmlLinesInfo* info) {
|
||||
tscDebug("SML:0x%"PRIx64" taos_sml_insert get child table name through md5", info->id);
|
||||
qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv);
|
||||
|
||||
SStringBuilder sb; memset(&sb, 0, sizeof(sb));
|
||||
char sTableName[TSDB_TABLE_NAME_LEN] = {0};
|
||||
strtolower(sTableName, point->stableName);
|
||||
taosStringBuilderAppendString(&sb, sTableName);
|
||||
for (int j = 0; j < point->tagNum; ++j) {
|
||||
taosStringBuilderAppendChar(&sb, ',');
|
||||
TAOS_SML_KV* tagKv = point->tags + j;
|
||||
char tagName[TSDB_COL_NAME_LEN] = {0};
|
||||
strtolower(tagName, tagKv->key);
|
||||
taosStringBuilderAppendString(&sb, tagName);
|
||||
taosStringBuilderAppendChar(&sb, '=');
|
||||
taosStringBuilderAppend(&sb, tagKv->value, tagKv->length);
|
||||
}
|
||||
size_t len = 0;
|
||||
char* keyJoined = taosStringBuilderGetResult(&sb, &len);
|
||||
MD5_CTX context;
|
||||
MD5Init(&context);
|
||||
MD5Update(&context, (uint8_t *)keyJoined, (uint32_t)len);
|
||||
MD5Final(&context);
|
||||
*tableNameLen = snprintf(tableName, *tableNameLen,
|
||||
"t_%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", context.digest[0],
|
||||
context.digest[1], context.digest[2], context.digest[3], context.digest[4], context.digest[5], context.digest[6],
|
||||
context.digest[7], context.digest[8], context.digest[9], context.digest[10], context.digest[11],
|
||||
context.digest[12], context.digest[13], context.digest[14], context.digest[15]);
|
||||
taosStringBuilderDestroy(&sb);
|
||||
tscDebug("SML:0x%"PRIx64" child table name: %s", info->id, tableName);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int32_t changeChildTableTagValue(TAOS* taos, const char* cTableName, const char* tagName, TAOS_BIND* bind, SSmlLinesInfo* info) {
|
||||
char sql[512];
|
||||
sprintf(sql, "alter table %s set tag %s=?", cTableName, tagName);
|
||||
|
||||
int32_t code;
|
||||
TAOS_STMT* stmt = taos_stmt_init(taos);
|
||||
code = taos_stmt_prepare(stmt, sql, (unsigned long)strlen(sql));
|
||||
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
return code;
|
||||
}
|
||||
|
||||
code = taos_stmt_bind_param(stmt, bind);
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
return code;
|
||||
}
|
||||
|
||||
code = taos_stmt_execute(stmt);
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
return code;
|
||||
}
|
||||
|
||||
code = taos_stmt_close(stmt);
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
return code;
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t creatChildTableIfNotExists(TAOS* taos, const char* cTableName, const char* sTableName,
|
||||
SArray* tagsSchema, SArray* tagsBind, SSmlLinesInfo* info) {
|
||||
size_t numTags = taosArrayGetSize(tagsSchema);
|
||||
|
@ -673,28 +749,28 @@ static int32_t creatChildTableIfNotExists(TAOS* taos, const char* cTableName, co
|
|||
free(sql);
|
||||
|
||||
if (code != 0) {
|
||||
tfree(stmt);
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
tscError("SML:0x%"PRIx64" taos_stmt_prepare returns %d:%s", info->id, code, tstrerror(code));
|
||||
taos_stmt_close(stmt);
|
||||
return code;
|
||||
}
|
||||
|
||||
code = taos_stmt_bind_param(stmt, TARRAY_GET_START(tagsBind));
|
||||
if (code != 0) {
|
||||
tfree(stmt);
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
tscError("SML:0x%"PRIx64" taos_stmt_bind_param returns %d:%s", info->id, code, tstrerror(code));
|
||||
taos_stmt_close(stmt);
|
||||
return code;
|
||||
}
|
||||
|
||||
code = taos_stmt_execute(stmt);
|
||||
if (code != 0) {
|
||||
tfree(stmt);
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
tscError("SML:0x%"PRIx64" taos_stmt_execute returns %d:%s", info->id, code, tstrerror(code));
|
||||
taos_stmt_close(stmt);
|
||||
return code;
|
||||
}
|
||||
|
||||
code = taos_stmt_close(stmt);
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
tscError("SML:0x%"PRIx64" taos_stmt_close return %d:%s", info->id, code, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
return code;
|
||||
|
@ -726,27 +802,29 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols
|
|||
tscDebug("SML:0x%"PRIx64" insert rows into child table %s. num of rows: %zu", info->id, cTableName, taosArrayGetSize(rowsBind));
|
||||
|
||||
int32_t code = 0;
|
||||
int32_t try = 0;
|
||||
|
||||
TAOS_STMT* stmt = taos_stmt_init(taos);
|
||||
if (stmt == NULL) {
|
||||
tfree(sql);
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
code = taos_stmt_prepare(stmt, sql, (unsigned long)strlen(sql));
|
||||
tfree(sql);
|
||||
|
||||
if (code != 0) {
|
||||
tfree(stmt);
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
tscError("SML:0x%"PRIx64" taos_stmt_prepare return %d:%s", info->id, code, tstrerror(code));
|
||||
taos_stmt_close(stmt);
|
||||
return code;
|
||||
}
|
||||
|
||||
bool tryAgain = false;
|
||||
int32_t try = 0;
|
||||
do {
|
||||
code = taos_stmt_set_tbname(stmt, cTableName);
|
||||
if (code != 0) {
|
||||
tfree(stmt);
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
tscError("SML:0x%"PRIx64" taos_stmt_set_tbname return %d:%s", info->id, code, tstrerror(code));
|
||||
taos_stmt_close(stmt);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -755,31 +833,52 @@ static int32_t insertChildTableBatch(TAOS* taos, char* cTableName, SArray* cols
|
|||
TAOS_BIND* colsBinds = taosArrayGetP(rowsBind, i);
|
||||
code = taos_stmt_bind_param(stmt, colsBinds);
|
||||
if (code != 0) {
|
||||
tfree(stmt);
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
tscError("SML:0x%"PRIx64" taos_stmt_bind_param return %d:%s", info->id, code, tstrerror(code));
|
||||
taos_stmt_close(stmt);
|
||||
return code;
|
||||
}
|
||||
code = taos_stmt_add_batch(stmt);
|
||||
if (code != 0) {
|
||||
tfree(stmt);
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
tscError("SML:0x%"PRIx64" taos_stmt_add_batch return %d:%s", info->id, code, tstrerror(code));
|
||||
taos_stmt_close(stmt);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
code = taos_stmt_execute(stmt);
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
tscError("SML:0x%"PRIx64" taos_stmt_execute return %d:%s, try:%d", info->id, code, tstrerror(code), try);
|
||||
}
|
||||
} while (code == TSDB_CODE_TDB_TABLE_RECONFIGURE && try++ < TSDB_MAX_REPLICA);
|
||||
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64" %s", info->id, taos_stmt_errstr(stmt));
|
||||
taos_stmt_close(stmt);
|
||||
} else {
|
||||
taos_stmt_close(stmt);
|
||||
}
|
||||
tryAgain = false;
|
||||
if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID
|
||||
|| code == TSDB_CODE_VND_INVALID_VGROUP_ID
|
||||
|| code == TSDB_CODE_TDB_TABLE_RECONFIGURE
|
||||
|| code == TSDB_CODE_APP_NOT_READY
|
||||
|| code == TSDB_CODE_RPC_NETWORK_UNAVAIL) && try++ < TSDB_MAX_REPLICA) {
|
||||
tryAgain = true;
|
||||
}
|
||||
|
||||
if (code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
|
||||
TAOS_RES* res2 = taos_query(taos, "RESET QUERY CACHE");
|
||||
int32_t code2 = taos_errno(res2);
|
||||
if (code2 != TSDB_CODE_SUCCESS) {
|
||||
tscError("SML:0x%" PRIx64 " insert child table. reset query cache. error: %s", info->id, taos_errstr(res2));
|
||||
}
|
||||
taos_free_result(res2);
|
||||
if (tryAgain) {
|
||||
taosMsleep(50 * (2 << try));
|
||||
}
|
||||
}
|
||||
if (code == TSDB_CODE_APP_NOT_READY || code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
|
||||
if (tryAgain) {
|
||||
taosMsleep( 50 * (2 << try));
|
||||
}
|
||||
}
|
||||
} while (tryAgain);
|
||||
|
||||
|
||||
taos_stmt_close(stmt);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -787,16 +886,10 @@ static int32_t arrangePointsByChildTableName(TAOS_SML_DATA_POINT* points, int nu
|
|||
SHashObj* cname2points, SArray* stableSchemas, SSmlLinesInfo* info) {
|
||||
for (int32_t i = 0; i < numPoints; ++i) {
|
||||
TAOS_SML_DATA_POINT * point = points + i;
|
||||
if (!point->childTableName) {
|
||||
char childTableName[TSDB_TABLE_NAME_LEN];
|
||||
int32_t tableNameLen = TSDB_TABLE_NAME_LEN;
|
||||
getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info);
|
||||
point->childTableName = calloc(1, tableNameLen+1);
|
||||
strncpy(point->childTableName, childTableName, tableNameLen);
|
||||
point->childTableName[tableNameLen] = '\0';
|
||||
}
|
||||
|
||||
SSmlSTableSchema* stableSchema = taosArrayGet(stableSchemas, point->schemaIdx);
|
||||
uintptr_t valPointer = (uintptr_t)point;
|
||||
size_t* pSchemaIndex = taosHashGet(info->smlDataToSchema, &valPointer, sizeof(uintptr_t));
|
||||
assert(pSchemaIndex != NULL);
|
||||
SSmlSTableSchema* stableSchema = taosArrayGet(stableSchemas, *pSchemaIndex);
|
||||
|
||||
for (int j = 0; j < point->tagNum; ++j) {
|
||||
TAOS_SML_KV* kv = point->tags + j;
|
||||
|
@ -840,16 +933,10 @@ static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableNam
|
|||
TAOS_SML_DATA_POINT * pDataPoint = taosArrayGetP(cTablePoints, i);
|
||||
for (int j = 0; j < pDataPoint->tagNum; ++j) {
|
||||
TAOS_SML_KV* kv = pDataPoint->tags + j;
|
||||
tagKVs[kv->fieldSchemaIdx] = kv;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t notNullTagsIndices[TSDB_MAX_TAGS] = {0};
|
||||
int32_t numNotNullTags = 0;
|
||||
for (int32_t i = 0; i < numTags; ++i) {
|
||||
if (tagKVs[i] != NULL) {
|
||||
notNullTagsIndices[numNotNullTags] = i;
|
||||
++numNotNullTags;
|
||||
uintptr_t valPointer = (uintptr_t)kv;
|
||||
size_t* pFieldSchemaIdx = taosHashGet(info->smlDataToSchema, &valPointer, sizeof(uintptr_t));
|
||||
assert(pFieldSchemaIdx != NULL);
|
||||
tagKVs[*pFieldSchemaIdx] = kv;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -863,7 +950,10 @@ static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableNam
|
|||
for (int j = 0; j < numTags; ++j) {
|
||||
if (tagKVs[j] == NULL) continue;
|
||||
TAOS_SML_KV* kv = tagKVs[j];
|
||||
TAOS_BIND* bind = taosArrayGet(tagBinds, kv->fieldSchemaIdx);
|
||||
uintptr_t valPointer = (uintptr_t)kv;
|
||||
size_t* pFieldSchemaIdx = taosHashGet(info->smlDataToSchema, &valPointer, sizeof(uintptr_t));
|
||||
assert(pFieldSchemaIdx != NULL);
|
||||
TAOS_BIND* bind = taosArrayGet(tagBinds, *pFieldSchemaIdx);
|
||||
bind->buffer_type = kv->type;
|
||||
bind->length = malloc(sizeof(uintptr_t*));
|
||||
*bind->length = kv->length;
|
||||
|
@ -871,65 +961,8 @@ static int32_t applyChildTableTags(TAOS* taos, char* cTableName, char* sTableNam
|
|||
bind->is_null = NULL;
|
||||
}
|
||||
|
||||
// select tag1,tag2,... from stable where tbname in (ctable)
|
||||
char* sql = malloc(tsMaxSQLStringLen+1);
|
||||
int freeBytes = tsMaxSQLStringLen + 1;
|
||||
snprintf(sql, freeBytes, "select tbname, ");
|
||||
for (int i = 0; i < numNotNullTags ; ++i) {
|
||||
snprintf(sql + strlen(sql), freeBytes-strlen(sql), "%s,", tagKVs[notNullTagsIndices[i]]->key);
|
||||
}
|
||||
snprintf(sql + strlen(sql) - 1, freeBytes - strlen(sql) + 1,
|
||||
" from %s where tbname in (\'%s\')", sTableName, cTableName);
|
||||
sql[strlen(sql)] = '\0';
|
||||
int32_t code = creatChildTableIfNotExists(taos, cTableName, sTableName, sTableSchema->tags, tagBinds, info);
|
||||
|
||||
TAOS_RES* result = taos_query(taos, sql);
|
||||
free(sql);
|
||||
|
||||
int32_t code = taos_errno(result);
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64" get child table %s tags failed. error string %s", info->id, cTableName, taos_errstr(result));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
// check tag value and set tag values if different
|
||||
TAOS_ROW row = taos_fetch_row(result);
|
||||
if (row != NULL) {
|
||||
int numFields = taos_field_count(result);
|
||||
TAOS_FIELD* fields = taos_fetch_fields(result);
|
||||
int* lengths = taos_fetch_lengths(result);
|
||||
for (int i = 1; i < numFields; ++i) {
|
||||
uint8_t dbType = fields[i].type;
|
||||
int32_t length = lengths[i];
|
||||
char* val = row[i];
|
||||
|
||||
TAOS_SML_KV* tagKV = tagKVs[notNullTagsIndices[i-1]];
|
||||
if (tagKV->type != dbType) {
|
||||
tscError("SML:0x%"PRIx64" child table %s tag %s type mismatch. point type : %d, db type : %d",
|
||||
info->id, cTableName, tagKV->key, tagKV->type, dbType);
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
assert(tagKV->value);
|
||||
|
||||
if (val == NULL || length != tagKV->length || memcmp(tagKV->value, val, length) != 0) {
|
||||
TAOS_BIND* bind = taosArrayGet(tagBinds, tagKV->fieldSchemaIdx);
|
||||
code = changeChildTableTagValue(taos, cTableName, tagKV->key, bind, info);
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64" change child table tag failed. table name %s, tag %s", info->id, cTableName, tagKV->key);
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
}
|
||||
tscDebug("SML:0x%"PRIx64" successfully applied point tags. child table: %s", info->id, cTableName);
|
||||
} else {
|
||||
code = creatChildTableIfNotExists(taos, cTableName, sTableName, sTableSchema->tags, tagBinds, info);
|
||||
if (code != 0) {
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
cleanup:
|
||||
taos_free_result(result);
|
||||
for (int i = 0; i < taosArrayGetSize(tagBinds); ++i) {
|
||||
TAOS_BIND* bind = taosArrayGet(tagBinds, i);
|
||||
free(bind->length);
|
||||
|
@ -963,7 +996,10 @@ static int32_t applyChildTableFields(TAOS* taos, SSmlSTableSchema* sTableSchema,
|
|||
}
|
||||
for (int j = 0; j < point->fieldNum; ++j) {
|
||||
TAOS_SML_KV* kv = point->fields + j;
|
||||
TAOS_BIND* bind = colBinds + kv->fieldSchemaIdx;
|
||||
uintptr_t valPointer = (uintptr_t)kv;
|
||||
size_t* pFieldSchemaIdx = taosHashGet(info->smlDataToSchema, &valPointer, sizeof(uintptr_t));
|
||||
assert(pFieldSchemaIdx != NULL);
|
||||
TAOS_BIND* bind = colBinds + *pFieldSchemaIdx;
|
||||
bind->buffer_type = kv->type;
|
||||
bind->length = malloc(sizeof(uintptr_t*));
|
||||
*bind->length = kv->length;
|
||||
|
@ -1000,9 +1036,11 @@ static int32_t applyDataPoints(TAOS* taos, TAOS_SML_DATA_POINT* points, int32_t
|
|||
while (pCTablePoints) {
|
||||
SArray* cTablePoints = *pCTablePoints;
|
||||
|
||||
|
||||
TAOS_SML_DATA_POINT* point = taosArrayGetP(cTablePoints, 0);
|
||||
SSmlSTableSchema* sTableSchema = taosArrayGet(stableSchemas, point->schemaIdx);
|
||||
uintptr_t valPointer = (uintptr_t)point;
|
||||
size_t* pSchemaIndex = taosHashGet(info->smlDataToSchema, &valPointer, sizeof(uintptr_t));
|
||||
assert(pSchemaIndex != NULL);
|
||||
SSmlSTableSchema* sTableSchema = taosArrayGet(stableSchemas, *pSchemaIndex);
|
||||
|
||||
tscDebug("SML:0x%"PRIx64" apply child table tags. child table: %s", info->id, point->childTableName);
|
||||
code = applyChildTableTags(taos, point->childTableName, point->stableName, sTableSchema, cTablePoints, info);
|
||||
|
@ -1014,7 +1052,7 @@ static int32_t applyDataPoints(TAOS* taos, TAOS_SML_DATA_POINT* points, int32_t
|
|||
tscDebug("SML:0x%"PRIx64" apply child table points. child table: %s", info->id, point->childTableName);
|
||||
code = applyChildTableFields(taos, sTableSchema, point->childTableName, cTablePoints, info);
|
||||
if (code != 0) {
|
||||
tscError("Apply child table fields failed. child table %s, error %s", point->childTableName, tstrerror(code));
|
||||
tscError("SML:0x%"PRIx64" Apply child table fields failed. child table %s, error %s", info->id, point->childTableName, tstrerror(code));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
|
@ -1034,10 +1072,11 @@ cleanup:
|
|||
return code;
|
||||
}
|
||||
|
||||
int taos_sml_insert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info) {
|
||||
int tscSmlInsert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info) {
|
||||
tscDebug("SML:0x%"PRIx64" taos_sml_insert. number of points: %d", info->id, numPoint);
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
info->smlDataToSchema = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), true, false);
|
||||
|
||||
tscDebug("SML:0x%"PRIx64" build data point schemas", info->id);
|
||||
SArray* stableSchemas = taosArrayInit(32, sizeof(SSmlSTableSchema)); // SArray<STableColumnsSchema>
|
||||
|
@ -1067,6 +1106,15 @@ clean_up:
|
|||
taosArrayDestroy(schema->tags);
|
||||
}
|
||||
taosArrayDestroy(stableSchemas);
|
||||
taosHashCleanup(info->smlDataToSchema);
|
||||
return code;
|
||||
}
|
||||
|
||||
int taos_sml_insert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint) {
|
||||
SSmlLinesInfo* info = calloc(1, sizeof(SSmlLinesInfo));
|
||||
info->id = genLinesSmlId();
|
||||
int code = tscSmlInsert(taos, points, numPoint, info);
|
||||
free(info);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -2076,18 +2124,6 @@ int32_t tscParseLine(const char* sql, TAOS_SML_DATA_POINT* smlData, SSmlLinesInf
|
|||
|
||||
//=========================================================================
|
||||
|
||||
static uint64_t linesSmlHandleId = 0;
|
||||
|
||||
uint64_t genLinesSmlId() {
|
||||
uint64_t id;
|
||||
|
||||
do {
|
||||
id = atomic_add_fetch_64(&linesSmlHandleId, 1);
|
||||
} while (id == 0);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
void destroySmlDataPoint(TAOS_SML_DATA_POINT* point) {
|
||||
for (int i=0; i<point->tagNum; ++i) {
|
||||
free((point->tags+i)->key);
|
||||
|
@ -2157,7 +2193,7 @@ int taos_insert_lines(TAOS* taos, char* lines[], int numLines) {
|
|||
}
|
||||
|
||||
TAOS_SML_DATA_POINT* points = TARRAY_GET_START(lpPoints);
|
||||
code = taos_sml_insert(taos, points, (int)numPoints, info);
|
||||
code = tscSmlInsert(taos, points, (int)numPoints, info);
|
||||
if (code != 0) {
|
||||
tscError("SML:0x%"PRIx64" taos_sml_insert error: %s", info->id, tstrerror((code)));
|
||||
}
|
||||
|
|
|
@ -206,6 +206,8 @@ static int normalStmtPrepare(STscStmt* stmt) {
|
|||
return code;
|
||||
}
|
||||
start = i + token.n;
|
||||
} else if (token.type == TK_ILLEGAL) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "invalid sql");
|
||||
}
|
||||
|
||||
i += token.n;
|
||||
|
@ -1527,8 +1529,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
|||
pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT;
|
||||
pCmd->insertParam.objectId = pSql->self;
|
||||
|
||||
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
|
||||
|
||||
char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
|
||||
if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
|
||||
pSql->sqlstr = sqlstr;
|
||||
if (pSql->sqlstr == NULL) {
|
||||
tscError("%p failed to malloc sql string buffer", pSql);
|
||||
STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY);
|
||||
|
@ -1537,6 +1540,8 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
|||
pRes->qId = 0;
|
||||
pRes->numOfRows = 1;
|
||||
|
||||
registerSqlObj(pSql);
|
||||
|
||||
strtolower(pSql->sqlstr, sql);
|
||||
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
|
||||
|
||||
|
@ -1546,8 +1551,6 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
|||
pSql->cmd.insertParam.numOfParams = 0;
|
||||
pSql->cmd.batchSize = 0;
|
||||
|
||||
registerSqlObj(pSql);
|
||||
|
||||
int32_t ret = stmtParseInsertTbTags(pSql, pStmt);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
STMT_RET(ret);
|
||||
|
|
|
@ -72,7 +72,6 @@ static int convertTimestampStrToInt64(tVariant *pVar, int32_t precision);
|
|||
static bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType, uint8_t precision);
|
||||
|
||||
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
|
||||
static char* cloneCurrentDBName(SSqlObj* pSql);
|
||||
static int32_t getDelimiterIndex(SStrToken* pTableName);
|
||||
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
|
||||
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
|
||||
|
@ -117,7 +116,7 @@ static int32_t validateColumnName(char* name);
|
|||
static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType);
|
||||
static int32_t setCompactVnodeInfo(SSqlObj* pSql, struct SSqlInfo* pInfo);
|
||||
|
||||
static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField);
|
||||
static int32_t validateOneTag(SSqlCmd* pCmd, TAOS_FIELD* pTagField);
|
||||
static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo);
|
||||
static bool hasNormalColumnFilter(SQueryInfo* pQueryInfo);
|
||||
|
||||
|
@ -429,7 +428,6 @@ int32_t readFromFile(char *name, uint32_t *len, void **buf) {
|
|||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
}
|
||||
close(fd);
|
||||
tfree(*buf);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -895,6 +893,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
case TSDB_SQL_SELECT: {
|
||||
const char * msg1 = "no nested query supported in union clause";
|
||||
code = loadAllTableMeta(pSql, pInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
|
@ -908,6 +907,10 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
|
||||
tscTrace("0x%"PRIx64" start to parse the %dth subclause, total:%"PRIzu, pSql->self, i, size);
|
||||
|
||||
if (size > 1 && pSqlNode->from && pSqlNode->from->type == SQL_NODE_FROM_SUBQUERY) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
|
||||
// normalizeSqlNode(pSqlNode); // normalize the column name in each function
|
||||
if ((code = validateSqlNode(pSql, pSqlNode, pQueryInfo)) != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
|
@ -928,7 +931,6 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
pQueryInfo = pCmd->active;
|
||||
pQueryInfo->pUdfInfo = pUdfInfo;
|
||||
pQueryInfo->udfCopy = true;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1082,12 +1084,13 @@ int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pS
|
|||
const char* msg1 = "sliding cannot be used without interval";
|
||||
const char* msg2 = "interval cannot be less than 1 us";
|
||||
const char* msg3 = "interval value is too small";
|
||||
const char* msg4 = "only point interpolation query requires keyword EVERY";
|
||||
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||
|
||||
|
||||
if (!TPARSER_HAS_TOKEN(pSqlNode->interval.interval)) {
|
||||
if (TPARSER_HAS_TOKEN(pSqlNode->sliding)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
|
@ -1113,7 +1116,6 @@ int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pS
|
|||
}
|
||||
|
||||
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
|
||||
|
||||
// interval cannot be less than 10 milliseconds
|
||||
if (convertTimePrecision(pQueryInfo->interval.interval, tinfo.precision, TSDB_TIME_PRECISION_MICRO) < tsMinIntervalTime) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
|
@ -1128,9 +1130,15 @@ int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pS
|
|||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
bool interpQuery = tscIsPointInterpQuery(pQueryInfo);
|
||||
if ((pSqlNode->interval.token == TK_EVERY && (!interpQuery)) || (pSqlNode->interval.token == TK_INTERVAL && interpQuery)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
}
|
||||
|
||||
// The following part is used to check for the invalid query expression.
|
||||
return checkInvalidExprForTimeWindow(pCmd, pQueryInfo);
|
||||
}
|
||||
|
||||
static int32_t validateStateWindowNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, bool isStable) {
|
||||
|
||||
const char* msg1 = "invalid column name";
|
||||
|
@ -1537,9 +1545,7 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
|
|||
/*
|
||||
* tags name /column name is truncated in sql.y
|
||||
*/
|
||||
bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
|
||||
//const char* msg1 = "timestamp not allowed in tags";
|
||||
const char* msg2 = "duplicated column names";
|
||||
int32_t validateOneTag(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
|
||||
const char* msg3 = "tag length too long";
|
||||
const char* msg4 = "invalid tag name";
|
||||
const char* msg5 = "invalid binary/nchar tag length";
|
||||
|
@ -1554,8 +1560,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
|
|||
|
||||
// no more max columns
|
||||
if (numOfTags + numOfCols >= TSDB_MAX_COLUMNS) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
|
||||
return false;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
|
||||
}
|
||||
|
||||
// no more than 6 tags
|
||||
|
@ -1563,8 +1568,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
|
|||
char msg[128] = {0};
|
||||
sprintf(msg, "tags no more than %d", TSDB_MAX_TAGS);
|
||||
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
return false;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
|
||||
// no timestamp allowable
|
||||
|
@ -1574,8 +1578,7 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
|
|||
//}
|
||||
|
||||
if ((pTagField->type < TSDB_DATA_TYPE_BOOL) || (pTagField->type > TSDB_DATA_TYPE_UBIGINT)) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||
return false;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||
}
|
||||
|
||||
SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
|
||||
|
@ -1587,20 +1590,17 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
|
|||
|
||||
// length less than TSDB_MAX_TASG_LEN
|
||||
if (nLen + pTagField->bytes > TSDB_MAX_TAGS_LEN) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
return false;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
|
||||
// tags name can not be a keyword
|
||||
if (validateColumnName(pTagField->name) != TSDB_CODE_SUCCESS) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
return false;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
}
|
||||
|
||||
// binary(val), val can not be equalled to or less than 0
|
||||
if ((pTagField->type == TSDB_DATA_TYPE_BINARY || pTagField->type == TSDB_DATA_TYPE_NCHAR) && pTagField->bytes <= 0) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||
return false;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||
}
|
||||
|
||||
// field name must be unique
|
||||
|
@ -1608,17 +1608,16 @@ bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField) {
|
|||
|
||||
for (int32_t i = 0; i < numOfTags + numOfCols; ++i) {
|
||||
if (strncasecmp(pTagField->name, pSchema[i].name, sizeof(pTagField->name) - 1) == 0) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
return false;
|
||||
//return tscErrorMsgWithCode(TSDB_CODE_TSC_DUP_COL_NAMES, tscGetErrorMsgPayload(pCmd), pTagField->name, NULL);
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "duplicated column names");
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
|
||||
int32_t validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
|
||||
const char* msg1 = "too many columns";
|
||||
const char* msg2 = "duplicated column names";
|
||||
const char* msg3 = "column length too long";
|
||||
const char* msg4 = "invalid data type";
|
||||
const char* msg5 = "invalid column name";
|
||||
|
@ -1633,18 +1632,15 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
|
|||
|
||||
// no more max columns
|
||||
if (numOfCols >= TSDB_MAX_COLUMNS || numOfTags + numOfCols >= TSDB_MAX_COLUMNS) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
return false;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
|
||||
if (pColField->type < TSDB_DATA_TYPE_BOOL || pColField->type > TSDB_DATA_TYPE_UBIGINT) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
return false;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
}
|
||||
|
||||
if (validateColumnName(pColField->name) != TSDB_CODE_SUCCESS) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||
return false;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||
}
|
||||
|
||||
SSchema* pSchema = tscGetTableSchema(pTableMeta);
|
||||
|
@ -1655,25 +1651,23 @@ bool validateOneColumn(SSqlCmd* pCmd, TAOS_FIELD* pColField) {
|
|||
}
|
||||
|
||||
if (pColField->bytes <= 0) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||
return false;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||
}
|
||||
|
||||
// length less than TSDB_MAX_BYTES_PER_ROW
|
||||
if (nLen + pColField->bytes > TSDB_MAX_BYTES_PER_ROW) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
return false;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
|
||||
// field name must be unique
|
||||
for (int32_t i = 0; i < numOfTags + numOfCols; ++i) {
|
||||
if (strncasecmp(pColField->name, pSchema[i].name, sizeof(pColField->name) - 1) == 0) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
return false;
|
||||
//return tscErrorMsgWithCode(TSDB_CODE_TSC_DUP_COL_NAMES, tscGetErrorMsgPayload(pCmd), pColField->name, NULL);
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "duplicated column names");
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
/* is contained in pFieldList or not */
|
||||
|
@ -1689,14 +1683,6 @@ static bool has(SArray* pFieldList, int32_t startIdx, const char* name) {
|
|||
|
||||
static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; }
|
||||
|
||||
static char* cloneCurrentDBName(SSqlObj* pSql) {
|
||||
pthread_mutex_lock(&pSql->pTscObj->mutex);
|
||||
char *p = strdup(pSql->pTscObj->db);
|
||||
pthread_mutex_unlock(&pSql->pTscObj->mutex);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/* length limitation, strstr cannot be applied */
|
||||
static int32_t getDelimiterIndex(SStrToken* pTableName) {
|
||||
for (uint32_t i = 0; i < pTableName->n; ++i) {
|
||||
|
@ -2052,9 +2038,10 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
|
|||
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
|
||||
}
|
||||
|
||||
|
||||
bool hasDistinct = false;
|
||||
bool hasAgg = false;
|
||||
size_t numOfExpr = taosArrayGetSize(pSelNodeList);
|
||||
size_t numOfExpr = taosArrayGetSize(pSelNodeList);
|
||||
int32_t distIdx = -1;
|
||||
for (int32_t i = 0; i < numOfExpr; ++i) {
|
||||
int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo);
|
||||
|
@ -2109,7 +2096,6 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
//TODO(dengyihao), refactor as function
|
||||
//handle distinct func mixed with other func
|
||||
if (hasDistinct == true) {
|
||||
|
@ -2125,6 +2111,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
|
|||
if (pQueryInfo->pDownstream != NULL) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8);
|
||||
}
|
||||
|
||||
pQueryInfo->distinct = true;
|
||||
}
|
||||
|
||||
|
@ -2609,13 +2596,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
|
||||
// set the first column ts for diff query
|
||||
if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
|
||||
colIndex += 1;
|
||||
SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0};
|
||||
SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP,
|
||||
TSDB_KEYSIZE, getNewResColId(pCmd), TSDB_KEYSIZE, false);
|
||||
|
||||
SColumnList ids = createColumnList(1, 0, 0);
|
||||
insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr);
|
||||
insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr);
|
||||
}
|
||||
|
||||
SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false);
|
||||
|
@ -2649,7 +2635,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO);
|
||||
} else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI);
|
||||
}
|
||||
}
|
||||
|
||||
if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
|
||||
|
@ -2683,8 +2669,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
assert(ids.num == 1);
|
||||
tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema);
|
||||
}
|
||||
|
||||
tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -2888,7 +2874,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
|
||||
const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX;
|
||||
SColumnList ids = createColumnList(1, index.tableIndex, TS_COLUMN_INDEX);
|
||||
insertResultField(pQueryInfo, TS_COLUMN_INDEX, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP,
|
||||
insertResultField(pQueryInfo, colIndex, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP,
|
||||
aAggs[TSDB_FUNC_TS].name, pExpr);
|
||||
|
||||
colIndex += 1; // the first column is ts
|
||||
|
@ -3066,7 +3052,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, &s);
|
||||
}
|
||||
}
|
||||
|
||||
tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -4718,7 +4703,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql
|
|||
}
|
||||
|
||||
pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY;
|
||||
ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg);
|
||||
ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pCmd->payload);
|
||||
*pExpr = NULL;
|
||||
if (type) {
|
||||
*type |= TSQL_EXPR_JOIN;
|
||||
|
@ -5141,10 +5126,6 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) {
|
|||
tSqlExprDestroy(pCondExpr->pTableCond);
|
||||
}
|
||||
|
||||
if (pCondExpr->pTagCond) {
|
||||
tSqlExprDestroy(pCondExpr->pTagCond);
|
||||
}
|
||||
|
||||
if (pCondExpr->pColumnCond) {
|
||||
tSqlExprDestroy(pCondExpr->pColumnCond);
|
||||
}
|
||||
|
@ -5701,6 +5682,7 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
|
|||
const char* msg3 = "top/bottom not support fill";
|
||||
const char* msg4 = "illegal value or data overflow";
|
||||
const char* msg5 = "fill only available for interval query";
|
||||
const char* msg6 = "not supported function now";
|
||||
|
||||
if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||
|
@ -5739,6 +5721,9 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
|
|||
}
|
||||
} else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) {
|
||||
pQueryInfo->fillType = TSDB_FILL_PREV;
|
||||
if (tscIsPointInterpQuery(pQueryInfo) && pQueryInfo->order.order == TSDB_ORDER_DESC) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||
}
|
||||
} else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) {
|
||||
pQueryInfo->fillType = TSDB_FILL_NEXT;
|
||||
} else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) {
|
||||
|
@ -5843,14 +5828,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
|
|||
const char* msg7 = "only primary timestamp/column in groupby clause allowed as order column";
|
||||
const char* msg8 = "only column in groupby clause allowed as order column";
|
||||
const char* msg9 = "orderby column must projected in subquery";
|
||||
const char* msg10 = "not support distinct mixed with order by";
|
||||
|
||||
setDefaultOrderInfo(pQueryInfo);
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
||||
if (pQueryInfo->distinct || pSqlNode->pSortOrder == NULL) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pSqlNode->pSortOrder == NULL) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
char* pMsgBuf = tscGetErrorMsgPayload(pCmd);
|
||||
SArray* pSortOrder = pSqlNode->pSortOrder;
|
||||
|
||||
|
@ -5870,6 +5854,9 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
|
|||
return invalidOperationMsg(pMsgBuf, msg2);
|
||||
}
|
||||
}
|
||||
if (size > 0 && pQueryInfo->distinct) {
|
||||
return invalidOperationMsg(pMsgBuf, msg10);
|
||||
}
|
||||
|
||||
// handle the first part of order by
|
||||
tVariant* pVar = taosArrayGet(pSortOrder, 0);
|
||||
|
@ -5938,10 +5925,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
|
|||
pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId;
|
||||
} else if (isTopBottomQuery(pQueryInfo)) {
|
||||
/* order of top/bottom query in interval is not valid */
|
||||
SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
|
||||
|
||||
int32_t pos = tscExprTopBottomIndex(pQueryInfo);
|
||||
assert(pos > 0);
|
||||
SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1);
|
||||
assert(pExpr->base.functionId == TSDB_FUNC_TS);
|
||||
|
||||
pExpr = tscExprGet(pQueryInfo, 1);
|
||||
pExpr = tscExprGet(pQueryInfo, pos);
|
||||
|
||||
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
return invalidOperationMsg(pMsgBuf, msg5);
|
||||
}
|
||||
|
@ -6032,11 +6023,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
|
|||
return invalidOperationMsg(pMsgBuf, msg8);
|
||||
}
|
||||
} else {
|
||||
/* order of top/bottom query in interval is not valid */
|
||||
SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
|
||||
int32_t pos = tscExprTopBottomIndex(pQueryInfo);
|
||||
assert(pos > 0);
|
||||
SExprInfo* pExpr = tscExprGet(pQueryInfo, pos - 1);
|
||||
assert(pExpr->base.functionId == TSDB_FUNC_TS);
|
||||
|
||||
pExpr = tscExprGet(pQueryInfo, 1);
|
||||
pExpr = tscExprGet(pQueryInfo, pos);
|
||||
|
||||
if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
return invalidOperationMsg(pMsgBuf, msg5);
|
||||
}
|
||||
|
@ -6095,7 +6088,6 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
const char* msg19 = "invalid new tag name";
|
||||
const char* msg20 = "table is not super table";
|
||||
const char* msg21 = "only binary/nchar column length could be modified";
|
||||
const char* msg22 = "new column length should be bigger than old one";
|
||||
const char* msg23 = "only column length coulbe be modified";
|
||||
const char* msg24 = "invalid binary/nchar column length";
|
||||
|
||||
|
@ -6147,8 +6139,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
TAOS_FIELD* p = taosArrayGet(pFieldList, 0);
|
||||
if (!validateOneTags(pCmd, p)) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
int32_t ret = validateOneTag(pCmd, p);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p);
|
||||
|
@ -6325,8 +6318,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
TAOS_FIELD* p = taosArrayGet(pFieldList, 0);
|
||||
if (!validateOneColumn(pCmd, p)) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
int32_t ret = validateOneColumn(pCmd, p);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, p);
|
||||
|
@ -6389,7 +6383,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
if (pItem->bytes <= pColSchema->bytes) {
|
||||
return invalidOperationMsg(pMsg, msg22);
|
||||
return tscErrorMsgWithCode(TSDB_CODE_TSC_INVALID_COLUMN_LENGTH, pMsg, pItem->name, NULL);
|
||||
}
|
||||
|
||||
SSchema* pSchema = (SSchema*) pTableMetaInfo->pTableMeta->schema;
|
||||
|
@ -6440,7 +6434,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
if (pItem->bytes <= pColSchema->bytes) {
|
||||
return invalidOperationMsg(pMsg, msg22);
|
||||
return tscErrorMsgWithCode(TSDB_CODE_TSC_INVALID_TAG_LENGTH, pMsg, pItem->name, NULL);
|
||||
}
|
||||
|
||||
SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta);
|
||||
|
@ -7230,7 +7224,6 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
|
|||
const char* msg1 = "interval not allowed in group by normal column";
|
||||
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
||||
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
|
||||
|
||||
SSchema* tagSchema = NULL;
|
||||
|
@ -7256,9 +7249,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
|
|||
s = &pSchema[colIndex];
|
||||
}
|
||||
}
|
||||
|
||||
size_t size = tscNumOfExprs(pQueryInfo);
|
||||
|
||||
|
||||
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
|
||||
|
||||
int32_t f = TSDB_FUNC_TAG;
|
||||
|
@ -7266,8 +7257,10 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
|
|||
f = TSDB_FUNC_TAGPRJ;
|
||||
}
|
||||
|
||||
int32_t pos = tscGetFirstInvisibleFieldPos(pQueryInfo);
|
||||
|
||||
SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex};
|
||||
SExprInfo* pExpr = tscExprAppend(pQueryInfo, f, &index, s->type, s->bytes, getNewResColId(pCmd), s->bytes, true);
|
||||
SExprInfo* pExpr = tscExprInsert(pQueryInfo, pos, f, &index, s->type, s->bytes, getNewResColId(pCmd), s->bytes, true);
|
||||
|
||||
memset(pExpr->base.aliasName, 0, sizeof(pExpr->base.aliasName));
|
||||
tstrncpy(pExpr->base.aliasName, s->name, sizeof(pExpr->base.aliasName));
|
||||
|
@ -7277,13 +7270,15 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
|
|||
|
||||
// NOTE: tag column does not add to source column list
|
||||
SColumnList ids = createColumnList(1, 0, pColIndex->colIndex);
|
||||
insertResultField(pQueryInfo, (int32_t)size, &ids, s->bytes, (int8_t)s->type, s->name, pExpr);
|
||||
insertResultField(pQueryInfo, pos, &ids, s->bytes, (int8_t)s->type, s->name, pExpr);
|
||||
} else {
|
||||
// if this query is "group by" normal column, time window query is not allowed
|
||||
if (isTimeWindowQuery(pQueryInfo)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
|
||||
size_t size = tscNumOfExprs(pQueryInfo);
|
||||
|
||||
bool hasGroupColumn = false;
|
||||
for (int32_t j = 0; j < size; ++j) {
|
||||
SExprInfo* pExpr = tscExprGet(pQueryInfo, j);
|
||||
|
@ -8512,7 +8507,10 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
|
||||
size_t len = strlen(name);
|
||||
|
||||
taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&pTableMeta, &tableMetaCapacity);
|
||||
if (NULL == taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&pTableMeta, &tableMetaCapacity)) {
|
||||
// not found
|
||||
tfree(pTableMeta);
|
||||
}
|
||||
|
||||
if (pTableMeta && pTableMeta->id.uid > 0) {
|
||||
tscDebug("0x%"PRIx64" retrieve table meta %s from local buf", pSql->self, name);
|
||||
|
@ -8742,8 +8740,8 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) {
|
|||
|
||||
n += 1;
|
||||
}
|
||||
|
||||
info->numOfColumns = n;
|
||||
|
||||
|
||||
return meta;
|
||||
}
|
||||
|
@ -8756,6 +8754,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS
|
|||
if (taosArrayGetSize(subInfo->pSubquery) >= 2) {
|
||||
return invalidOperationMsg(msgBuf, "not support union in subquery");
|
||||
}
|
||||
|
||||
SQueryInfo* pSub = calloc(1, sizeof(SQueryInfo));
|
||||
tscInitQueryInfo(pSub);
|
||||
|
||||
|
@ -8773,12 +8772,12 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS
|
|||
return code;
|
||||
}
|
||||
|
||||
|
||||
// create dummy table meta info
|
||||
STableMetaInfo* pTableMetaInfo1 = calloc(1, sizeof(STableMetaInfo));
|
||||
if (pTableMetaInfo1 == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pTableMetaInfo1->pTableMeta = extractTempTableMetaFromSubquery(pSub);
|
||||
pTableMetaInfo1->tableMetaCapacity = tscGetTableMetaSize(pTableMetaInfo1->pTableMeta);
|
||||
|
||||
|
@ -8844,7 +8843,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
* select server_status();
|
||||
* select server_version();
|
||||
* select client_version();
|
||||
* select current_database();
|
||||
* select database();
|
||||
*/
|
||||
if (pSqlNode->from == NULL) {
|
||||
assert(pSqlNode->fillType == NULL && pSqlNode->pGroupby == NULL && pSqlNode->pWhere == NULL &&
|
||||
|
@ -8862,7 +8861,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
// check if there is 3 level select
|
||||
SRelElementPair* subInfo = taosArrayGet(pSqlNode->from->list, i);
|
||||
SSqlNode* p = taosArrayGetP(subInfo->pSubquery, 0);
|
||||
if (p->from->type == SQL_NODE_FROM_SUBQUERY){
|
||||
if (p->from->type == SQL_NODE_FROM_SUBQUERY) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
|
||||
}
|
||||
|
||||
|
@ -8955,6 +8954,15 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
}
|
||||
}
|
||||
|
||||
// disable group result mixed up if interval/session window query exists.
|
||||
if (isTimeWindowQuery(pQueryInfo)) {
|
||||
size_t num = taosArrayGetSize(pQueryInfo->pUpstream);
|
||||
for(int32_t i = 0; i < num; ++i) {
|
||||
SQueryInfo* pUp = taosArrayGetP(pQueryInfo->pUpstream, i);
|
||||
pUp->multigroupResult = false;
|
||||
}
|
||||
}
|
||||
|
||||
// parse the having clause in the first place
|
||||
int32_t joinQuery = (pSqlNode->from != NULL && taosArrayGetSize(pSqlNode->from->list) > 1);
|
||||
if (validateHavingClause(pQueryInfo, pSqlNode->pHaving, pCmd, pSqlNode->pSelNodeList, joinQuery, timeWindowQuery) !=
|
||||
|
|
|
@ -331,22 +331,42 @@ int tscSendMsgToServer(SSqlObj *pSql) {
|
|||
.handle = NULL,
|
||||
.code = 0
|
||||
};
|
||||
|
||||
|
||||
rpcSendRequest(pObj->pRpcObj->pDnodeConn, &pSql->epSet, &rpcMsg, &pSql->rpcRid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
|
||||
SRpcMsg* rpcMsg = pSchedMsg->ahandle;
|
||||
SRpcEpSet* pEpSet = pSchedMsg->thandle;
|
||||
// handle three situation
|
||||
// 1. epset retry, only return last failure ep
|
||||
// 2. no epset retry, like 'taos -h invalidFqdn', return invalidFqdn
|
||||
// 3. other situation, no expected
|
||||
void tscSetFqdnErrorMsg(SSqlObj* pSql, SRpcEpSet* pEpSet) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
SSqlRes* pRes = &pSql->res;
|
||||
|
||||
char* msgBuf = tscGetErrorMsgPayload(pCmd);
|
||||
|
||||
if (pEpSet) {
|
||||
sprintf(msgBuf, "%s\"%s\"", tstrerror(pRes->code),pEpSet->fqdn[(pEpSet->inUse)%(pEpSet->numOfEps)]);
|
||||
} else if (pCmd->command >= TSDB_SQL_MGMT) {
|
||||
SRpcEpSet tEpset;
|
||||
|
||||
SRpcCorEpSet *pCorEpSet = pSql->pTscObj->tscCorMgmtEpSet;
|
||||
taosCorBeginRead(&pCorEpSet->version);
|
||||
tEpset = pCorEpSet->epSet;
|
||||
taosCorEndRead(&pCorEpSet->version);
|
||||
|
||||
sprintf(msgBuf, "%s\"%s\"", tstrerror(pRes->code),tEpset.fqdn[(tEpset.inUse)%(tEpset.numOfEps)]);
|
||||
} else {
|
||||
sprintf(msgBuf, "%s", tstrerror(pRes->code));
|
||||
}
|
||||
}
|
||||
|
||||
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
||||
TSDB_CACHE_PTR_TYPE handle = (TSDB_CACHE_PTR_TYPE) rpcMsg->ahandle;
|
||||
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, handle);
|
||||
if (pSql == NULL) {
|
||||
rpcFreeCont(rpcMsg->pCont);
|
||||
free(rpcMsg);
|
||||
free(pEpSet);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -357,28 +377,23 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
|
|||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
|
||||
pSql->rpcRid = -1;
|
||||
|
||||
if (pObj->signature != pObj) {
|
||||
tscDebug("0x%"PRIx64" DB connection is closed, cmd:%d pObj:%p signature:%p", pSql->self, pCmd->command, pObj, pObj->signature);
|
||||
|
||||
taosRemoveRef(tscObjRef, handle);
|
||||
taosReleaseRef(tscObjRef, handle);
|
||||
rpcFreeCont(rpcMsg->pCont);
|
||||
free(rpcMsg);
|
||||
free(pEpSet);
|
||||
return;
|
||||
}
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
|
||||
if (pQueryInfo != NULL && pQueryInfo->type == TSDB_QUERY_TYPE_FREE_RESOURCE) {
|
||||
tscDebug("0x%"PRIx64" sqlObj needs to be released or DB connection is closed, cmd:%d type:%d, pObj:%p signature:%p",
|
||||
pSql->self, pCmd->command, pQueryInfo->type, pObj, pObj->signature);
|
||||
pSql->self, pCmd->command, pQueryInfo->type, pObj, pObj->signature);
|
||||
|
||||
taosRemoveRef(tscObjRef, handle);
|
||||
taosReleaseRef(tscObjRef, handle);
|
||||
rpcFreeCont(rpcMsg->pCont);
|
||||
free(rpcMsg);
|
||||
free(pEpSet);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -407,9 +422,9 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
|
|||
// 1. super table subquery
|
||||
// 2. nest queries are all not updated the tablemeta and retry parse the sql after cleanup local tablemeta/vgroup id buffer
|
||||
if ((TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY |
|
||||
TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) &&
|
||||
!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) ||
|
||||
(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY)) || (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->distinct)) {
|
||||
TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) &&
|
||||
!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) ||
|
||||
(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY)) || (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->distinct)) {
|
||||
// do nothing in case of super table subquery
|
||||
} else {
|
||||
pSql->retry += 1;
|
||||
|
@ -432,8 +447,6 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
|
|||
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
taosReleaseRef(tscObjRef, handle);
|
||||
rpcFreeCont(rpcMsg->pCont);
|
||||
free(rpcMsg);
|
||||
free(pEpSet);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -485,7 +498,7 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
|
|||
|
||||
pRes->numOfRows += pMsg->affectedRows;
|
||||
tscDebug("0x%"PRIx64" SQL cmd:%s, code:%s inserted rows:%d rspLen:%d", pSql->self, sqlCmd[pCmd->command],
|
||||
tstrerror(pRes->code), pMsg->affectedRows, pRes->rspLen);
|
||||
tstrerror(pRes->code), pMsg->affectedRows, pRes->rspLen);
|
||||
} else {
|
||||
tscDebug("0x%"PRIx64" SQL cmd:%s, code:%s rspLen:%d", pSql->self, sqlCmd[pCmd->command], tstrerror(pRes->code), pRes->rspLen);
|
||||
}
|
||||
|
@ -500,28 +513,13 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
|
|||
if (rpcMsg->code != TSDB_CODE_SUCCESS) {
|
||||
pRes->code = rpcMsg->code;
|
||||
}
|
||||
|
||||
rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? (int32_t)pRes->numOfRows : pRes->code;
|
||||
if (pRes->code == TSDB_CODE_RPC_FQDN_ERROR) {
|
||||
tscAllocPayload(pCmd, TSDB_FQDN_LEN + 64);
|
||||
// handle three situation
|
||||
// 1. epset retry, only return last failure ep
|
||||
// 2. no epset retry, like 'taos -h invalidFqdn', return invalidFqdn
|
||||
// 3. other situation, no expected
|
||||
if (pEpSet) {
|
||||
sprintf(tscGetErrorMsgPayload(pCmd), "%s\"%s\"", tstrerror(pRes->code),pEpSet->fqdn[(pEpSet->inUse)%(pEpSet->numOfEps)]);
|
||||
} else if (pCmd->command >= TSDB_SQL_MGMT) {
|
||||
SRpcEpSet tEpset;
|
||||
|
||||
SRpcCorEpSet *pCorEpSet = pSql->pTscObj->tscCorMgmtEpSet;
|
||||
taosCorBeginRead(&pCorEpSet->version);
|
||||
tEpset = pCorEpSet->epSet;
|
||||
taosCorEndRead(&pCorEpSet->version);
|
||||
|
||||
sprintf(tscGetErrorMsgPayload(pCmd), "%s\"%s\"", tstrerror(pRes->code),tEpset.fqdn[(tEpset.inUse)%(tEpset.numOfEps)]);
|
||||
} else {
|
||||
sprintf(tscGetErrorMsgPayload(pCmd), "%s", tstrerror(pRes->code));
|
||||
}
|
||||
if (rpcMsg->code == TSDB_CODE_RPC_FQDN_ERROR) {
|
||||
tscAllocPayload(pCmd, TSDB_FQDN_LEN + 64);
|
||||
tscSetFqdnErrorMsg(pSql, pEpSet);
|
||||
}
|
||||
|
||||
(*pSql->fp)(pSql->param, pSql, rpcMsg->code);
|
||||
}
|
||||
|
||||
|
@ -532,35 +530,6 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) {
|
|||
|
||||
taosReleaseRef(tscObjRef, handle);
|
||||
rpcFreeCont(rpcMsg->pCont);
|
||||
free(rpcMsg);
|
||||
free(pEpSet);
|
||||
}
|
||||
|
||||
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
||||
int64_t st = taosGetTimestampUs();
|
||||
SSchedMsg schedMsg = {0};
|
||||
|
||||
schedMsg.fp = doProcessMsgFromServer;
|
||||
|
||||
SRpcMsg* rpcMsgCopy = calloc(1, sizeof(SRpcMsg));
|
||||
memcpy(rpcMsgCopy, rpcMsg, sizeof(struct SRpcMsg));
|
||||
schedMsg.ahandle = (void*)rpcMsgCopy;
|
||||
|
||||
SRpcEpSet* pEpSetCopy = NULL;
|
||||
if (pEpSet != NULL) {
|
||||
pEpSetCopy = calloc(1, sizeof(SRpcEpSet));
|
||||
memcpy(pEpSetCopy, pEpSet, sizeof(SRpcEpSet));
|
||||
}
|
||||
|
||||
schedMsg.thandle = (void*)pEpSetCopy;
|
||||
schedMsg.msg = NULL;
|
||||
|
||||
taosScheduleTask(tscQhandle, &schedMsg);
|
||||
|
||||
int64_t et = taosGetTimestampUs();
|
||||
if (et - st > 100) {
|
||||
tscDebug("add message to task queue, elapsed time:%"PRId64, et - st);
|
||||
}
|
||||
}
|
||||
|
||||
int doBuildAndSendMsg(SSqlObj *pSql) {
|
||||
|
@ -733,8 +702,13 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
|
|||
}
|
||||
}
|
||||
|
||||
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + srcTagFilterSize + exprSize + tsBufSize +
|
||||
tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
|
||||
SCond* pCond = &pQueryInfo->tagCond.tbnameCond;
|
||||
if (pCond->len > 0) {
|
||||
srcColListSize += pCond->len;
|
||||
}
|
||||
|
||||
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + srcTagFilterSize +
|
||||
exprSize + tsBufSize + tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen;
|
||||
}
|
||||
|
||||
static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, char *pMsg,
|
||||
|
@ -1429,7 +1403,6 @@ int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
|
|||
}
|
||||
|
||||
int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||
STscObj *pObj = pSql->pTscObj;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
pCmd->msgType = TSDB_MSG_TYPE_CM_SHOW;
|
||||
pCmd->payloadLen = sizeof(SShowMsg) + 100;
|
||||
|
@ -1452,9 +1425,9 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
}
|
||||
|
||||
if (tNameIsEmpty(&pTableMetaInfo->name)) {
|
||||
pthread_mutex_lock(&pObj->mutex);
|
||||
tstrncpy(pShowMsg->db, pObj->db, sizeof(pShowMsg->db));
|
||||
pthread_mutex_unlock(&pObj->mutex);
|
||||
char *p = cloneCurrentDBName(pSql);
|
||||
tstrncpy(pShowMsg->db, p, sizeof(pShowMsg->db));
|
||||
tfree(p);
|
||||
} else {
|
||||
tNameGetFullDbName(&pTableMetaInfo->name, pShowMsg->db);
|
||||
}
|
||||
|
@ -2950,11 +2923,15 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
|
|||
// just make runtime happy
|
||||
if (pTableMetaInfo->tableMetaCapacity != 0 && pTableMetaInfo->pTableMeta != NULL) {
|
||||
memset(pTableMetaInfo->pTableMeta, 0, pTableMetaInfo->tableMetaCapacity);
|
||||
}
|
||||
taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&(pTableMetaInfo->pTableMeta), &pTableMetaInfo->tableMetaCapacity);
|
||||
}
|
||||
|
||||
if (NULL == taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&(pTableMetaInfo->pTableMeta), &pTableMetaInfo->tableMetaCapacity)) {
|
||||
tfree(pTableMetaInfo->pTableMeta);
|
||||
}
|
||||
|
||||
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
|
||||
STableMeta* pSTMeta = (STableMeta *)(pSql->pBuf);
|
||||
|
||||
if (pMeta && pMeta->id.uid > 0) {
|
||||
// in case of child table, here only get the
|
||||
if (pMeta->tableType == TSDB_CHILD_TABLE) {
|
||||
|
@ -2964,6 +2941,8 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
|
|||
return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate);
|
||||
}
|
||||
}
|
||||
|
||||
tscDebug("0x%"PRIx64 " %s retrieve tableMeta from cache, numOfCols:%d, numOfTags:%d", pSql->self, name, pMeta->tableInfo.numOfColumns, pMeta->tableInfo.numOfTags);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -892,7 +892,9 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
|
|||
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
|
||||
}
|
||||
|
||||
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
|
||||
char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
|
||||
if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr);
|
||||
pSql->sqlstr = sqlstr;
|
||||
if (pSql->sqlstr == NULL) {
|
||||
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
|
||||
tfree(pSql);
|
||||
|
|
|
@ -15,8 +15,9 @@
|
|||
#define _GNU_SOURCE
|
||||
|
||||
#include "os.h"
|
||||
|
||||
#include "texpr.h"
|
||||
|
||||
#include "tsched.h"
|
||||
#include "qTsbuf.h"
|
||||
#include "tcompare.h"
|
||||
#include "tscLog.h"
|
||||
|
@ -2038,17 +2039,14 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
|
|||
tscAsyncResultOnError(pSql);
|
||||
}
|
||||
|
||||
static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) {
|
||||
void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) {
|
||||
assert(numOfSubs <= pSql->subState.numOfSub && numOfSubs >= 0);
|
||||
|
||||
for(int32_t i = 0; i < numOfSubs; ++i) {
|
||||
SSqlObj* pSub = pSql->pSubs[i];
|
||||
assert(pSub != NULL);
|
||||
|
||||
SRetrieveSupport* pSupport = pSub->param;
|
||||
|
||||
tfree(pSupport->localBuffer);
|
||||
tfree(pSupport);
|
||||
|
||||
tscFreeRetrieveSup(pSub);
|
||||
|
||||
taos_free_result(pSub);
|
||||
}
|
||||
|
@ -2406,6 +2404,10 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) {
|
|||
} else {
|
||||
SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex};
|
||||
tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss);
|
||||
int32_t ti = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid);
|
||||
assert(ti >= 0);
|
||||
SColumn* x = taosArrayGetP(pNewQueryInfo->colList, ti);
|
||||
tscColumnCopy(x, pCol);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2442,7 +2444,11 @@ static void doSendQueryReqs(SSchedMsg* pSchedMsg) {
|
|||
SSqlObj* pSql = pSchedMsg->ahandle;
|
||||
SPair* p = pSchedMsg->msg;
|
||||
|
||||
for(int32_t i = p->first; i < p->second; ++i) {
|
||||
for (int32_t i = p->first; i < p->second; ++i) {
|
||||
if (i >= pSql->subState.numOfSub) {
|
||||
tfree(p);
|
||||
return;
|
||||
}
|
||||
SSqlObj* pSub = pSql->pSubs[i];
|
||||
SRetrieveSupport* pSupport = pSub->param;
|
||||
|
||||
|
@ -2582,7 +2588,12 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
int32_t numOfTasks = (pState->numOfSub + MAX_REQUEST_PER_TASK - 1)/MAX_REQUEST_PER_TASK;
|
||||
assert(numOfTasks >= 1);
|
||||
|
||||
int32_t num = (pState->numOfSub/numOfTasks) + 1;
|
||||
int32_t num;
|
||||
if (pState->numOfSub / numOfTasks == MAX_REQUEST_PER_TASK) {
|
||||
num = MAX_REQUEST_PER_TASK;
|
||||
} else {
|
||||
num = pState->numOfSub / numOfTasks + 1;
|
||||
}
|
||||
tscDebug("0x%"PRIx64 " query will be sent by %d threads", pSql->self, numOfTasks);
|
||||
|
||||
for(int32_t j = 0; j < numOfTasks; ++j) {
|
||||
|
@ -2607,7 +2618,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void tscFreeRetrieveSup(SSqlObj *pSql) {
|
||||
void tscFreeRetrieveSup(SSqlObj *pSql) {
|
||||
SRetrieveSupport *trsupport = pSql->param;
|
||||
|
||||
void* p = atomic_val_compare_exchange_ptr(&pSql->param, trsupport, 0);
|
||||
|
@ -2738,7 +2749,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
|
|||
}
|
||||
} else { // reach the maximum retry count, abort
|
||||
atomic_val_compare_exchange_32(&pParentSql->res.code, TSDB_CODE_SUCCESS, numOfRows);
|
||||
tscError("0x%"PRIx64" sub:0x%"PRIx64" retrieve failed,code:%s,orderOfSub:%d failed.no more retry,set global code:%s", pParentSql->self, pSql->self,
|
||||
tscError("0x%"PRIx64" sub:0x%"PRIx64" retrieve failed, code:%s, orderOfSub:%d FAILED. no more retry, set global code:%s", pParentSql->self, pSql->self,
|
||||
tstrerror(numOfRows), subqueryIndex, tstrerror(pParentSql->res.code));
|
||||
}
|
||||
}
|
||||
|
@ -2765,27 +2776,43 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
|
|||
if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
|
||||
|
||||
int32_t code = pParentSql->res.code;
|
||||
if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) {
|
||||
// remove the cached tableMeta and vgroup id list, and then parse the sql again
|
||||
tscResetSqlCmd( &pParentSql->cmd, true, pParentSql->self);
|
||||
SSqlObj *userSql = NULL;
|
||||
if (pParentSql->param) {
|
||||
userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql;
|
||||
}
|
||||
|
||||
pParentSql->retry++;
|
||||
pParentSql->res.code = TSDB_CODE_SUCCESS;
|
||||
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self,
|
||||
tstrerror(code), pParentSql->retry);
|
||||
if (userSql == NULL) {
|
||||
userSql = pParentSql;
|
||||
}
|
||||
|
||||
code = tsParseSql(pParentSql, true);
|
||||
if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) {
|
||||
if (userSql != pParentSql) {
|
||||
tscFreeRetrieveSup(pParentSql);
|
||||
}
|
||||
|
||||
tscFreeSubobj(userSql);
|
||||
tfree(userSql->pSubs);
|
||||
|
||||
userSql->res.code = TSDB_CODE_SUCCESS;
|
||||
userSql->retry++;
|
||||
|
||||
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self,
|
||||
tstrerror(code), userSql->retry);
|
||||
|
||||
tscResetSqlCmd(&userSql->cmd, true, userSql->self);
|
||||
code = tsParseSql(userSql, true);
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pParentSql->res.code = code;
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
userSql->res.code = code;
|
||||
tscAsyncResultOnError(userSql);
|
||||
return;
|
||||
}
|
||||
|
||||
executeQuery(pParentSql, pQueryInfo);
|
||||
pQueryInfo = tscGetQueryInfo(&userSql->cmd);
|
||||
executeQuery(userSql, pQueryInfo);
|
||||
} else {
|
||||
(*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code);
|
||||
}
|
||||
|
@ -2855,7 +2882,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
|||
pParentSql->self, pState->numOfSub, pState->numOfRetrievedRows);
|
||||
|
||||
SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd);
|
||||
tscClearInterpInfo(pPQueryInfo);
|
||||
|
||||
code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self);
|
||||
pParentSql->res.code = code;
|
||||
|
@ -2970,7 +2996,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
|||
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" retrieve numOfRows:%d totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d",
|
||||
pParentSql->self, pSql->self, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
|
||||
|
||||
if (num > tsMaxNumOfOrderedResults && /*tscIsProjectionQueryOnSTable(pQueryInfo, 0) &&*/ !(tscGetQueryInfo(&pParentSql->cmd)->distinct)) {
|
||||
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0) && !(tscGetQueryInfo(&pParentSql->cmd)->distinct)) {
|
||||
tscError("0x%"PRIx64" sub:0x%"PRIx64" num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
|
||||
pParentSql->self, pSql->self, tsMaxNumOfOrderedResults, num);
|
||||
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "tsclient.h"
|
||||
#include "ttimer.h"
|
||||
#include "ttokendef.h"
|
||||
#include "httpInt.h"
|
||||
|
||||
static void freeQueryInfoImpl(SQueryInfo* pQueryInfo);
|
||||
|
||||
|
@ -403,6 +404,27 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) {
|
|||
return false;
|
||||
}
|
||||
|
||||
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) {
|
||||
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
|
||||
|
||||
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
|
||||
if (pExpr == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pExpr->base.functionId == TSDB_FUNC_TS) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo) {
|
||||
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
|
||||
|
||||
|
@ -659,8 +681,10 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo
|
|||
|
||||
} else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
|
||||
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
|
||||
pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
|
||||
|
||||
char* buffer = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
|
||||
if(buffer == NULL)
|
||||
return ;
|
||||
pRes->buffer[i] = buffer;
|
||||
// string terminated char for binary data
|
||||
memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows);
|
||||
|
||||
|
@ -1236,6 +1260,7 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
|
|||
}
|
||||
|
||||
SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilters);
|
||||
|
||||
pOutput->precision = pSqlObjList[0]->res.precision;
|
||||
|
||||
SSchema* schema = NULL;
|
||||
|
@ -2096,6 +2121,22 @@ TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes) {
|
|||
return f;
|
||||
}
|
||||
|
||||
int32_t tscGetFirstInvisibleFieldPos(SQueryInfo* pQueryInfo) {
|
||||
if (pQueryInfo->fieldsInfo.numOfOutput <= 0 || pQueryInfo->fieldsInfo.internalField == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
SInternalField* pField = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
|
||||
if (!pField->visible) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return pQueryInfo->fieldsInfo.numOfOutput;
|
||||
}
|
||||
|
||||
|
||||
SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) {
|
||||
assert(pFieldInfo != NULL);
|
||||
pFieldInfo->numOfOutput++;
|
||||
|
@ -2419,6 +2460,19 @@ size_t tscNumOfExprs(SQueryInfo* pQueryInfo) {
|
|||
return taosArrayGetSize(pQueryInfo->exprList);
|
||||
}
|
||||
|
||||
int32_t tscExprTopBottomIndex(SQueryInfo* pQueryInfo){
|
||||
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
|
||||
for(int32_t i = 0; i < numOfExprs; ++i) {
|
||||
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
|
||||
if (pExpr == NULL)
|
||||
continue;
|
||||
if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
// todo REFACTOR
|
||||
void tscExprAddParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) {
|
||||
assert (pExpr != NULL || argument != NULL || bytes != 0);
|
||||
|
@ -3128,6 +3182,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
|
|||
pQueryInfo->slimit.offset = 0;
|
||||
pQueryInfo->pUpstream = taosArrayInit(4, POINTER_BYTES);
|
||||
pQueryInfo->window = TSWINDOW_INITIALIZER;
|
||||
pQueryInfo->multigroupResult = true;
|
||||
}
|
||||
|
||||
int32_t tscAddQueryInfo(SSqlCmd* pCmd) {
|
||||
|
@ -3139,7 +3194,6 @@ int32_t tscAddQueryInfo(SSqlCmd* pCmd) {
|
|||
}
|
||||
|
||||
tscInitQueryInfo(pQueryInfo);
|
||||
|
||||
pQueryInfo->msg = pCmd->payload; // pointer to the parent error message buffer
|
||||
|
||||
if (pCmd->pQueryInfo == NULL) {
|
||||
|
@ -3188,6 +3242,7 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) {
|
|||
|
||||
taosArrayDestroy(pQueryInfo->pUpstream);
|
||||
pQueryInfo->pUpstream = NULL;
|
||||
pQueryInfo->bufLen = 0;
|
||||
}
|
||||
|
||||
void tscClearSubqueryInfo(SSqlCmd* pCmd) {
|
||||
|
@ -3222,6 +3277,7 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) {
|
|||
pQueryInfo->window = pSrc->window;
|
||||
pQueryInfo->sessionWindow = pSrc->sessionWindow;
|
||||
pQueryInfo->pTableMetaInfo = NULL;
|
||||
pQueryInfo->multigroupResult = pSrc->multigroupResult;
|
||||
|
||||
pQueryInfo->bufLen = pSrc->bufLen;
|
||||
pQueryInfo->orderProjectQuery = pSrc->orderProjectQuery;
|
||||
|
@ -3607,24 +3663,25 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
|
|||
pnCmd->active = pNewQueryInfo;
|
||||
|
||||
memcpy(&pNewQueryInfo->interval, &pQueryInfo->interval, sizeof(pNewQueryInfo->interval));
|
||||
pNewQueryInfo->type = pQueryInfo->type;
|
||||
pNewQueryInfo->window = pQueryInfo->window;
|
||||
pNewQueryInfo->limit = pQueryInfo->limit;
|
||||
pNewQueryInfo->slimit = pQueryInfo->slimit;
|
||||
pNewQueryInfo->order = pQueryInfo->order;
|
||||
pNewQueryInfo->vgroupLimit = pQueryInfo->vgroupLimit;
|
||||
pNewQueryInfo->tsBuf = NULL;
|
||||
pNewQueryInfo->fillType = pQueryInfo->fillType;
|
||||
pNewQueryInfo->fillVal = NULL;
|
||||
pNewQueryInfo->type = pQueryInfo->type;
|
||||
pNewQueryInfo->window = pQueryInfo->window;
|
||||
pNewQueryInfo->limit = pQueryInfo->limit;
|
||||
pNewQueryInfo->slimit = pQueryInfo->slimit;
|
||||
pNewQueryInfo->order = pQueryInfo->order;
|
||||
pNewQueryInfo->tsBuf = NULL;
|
||||
pNewQueryInfo->fillType = pQueryInfo->fillType;
|
||||
pNewQueryInfo->fillVal = NULL;
|
||||
pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit;
|
||||
pNewQueryInfo->prjOffset = pQueryInfo->prjOffset;
|
||||
pNewQueryInfo->numOfFillVal = 0;
|
||||
pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit;
|
||||
pNewQueryInfo->prjOffset = pQueryInfo->prjOffset;
|
||||
pNewQueryInfo->numOfTables = 0;
|
||||
pNewQueryInfo->numOfTables = 0;
|
||||
pNewQueryInfo->pTableMetaInfo = NULL;
|
||||
pNewQueryInfo->bufLen = pQueryInfo->bufLen;
|
||||
pNewQueryInfo->distinct = pQueryInfo->distinct;
|
||||
pNewQueryInfo->bufLen = pQueryInfo->bufLen;
|
||||
pNewQueryInfo->vgroupLimit = pQueryInfo->vgroupLimit;
|
||||
pNewQueryInfo->distinct = pQueryInfo->distinct;
|
||||
pNewQueryInfo->multigroupResult = pQueryInfo->multigroupResult;
|
||||
|
||||
pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
|
||||
pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
|
||||
if (pNewQueryInfo->buf == NULL) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
|
@ -3840,8 +3897,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
|
|||
int32_t index = ps->subqueryIndex;
|
||||
bool ret = subAndCheckDone(pSql, pParentSql, index);
|
||||
|
||||
tfree(ps);
|
||||
pSql->param = NULL;
|
||||
tscFreeRetrieveSup(pSql);
|
||||
|
||||
if (!ret) {
|
||||
tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index);
|
||||
|
@ -3850,7 +3906,15 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
|
|||
|
||||
// todo refactor
|
||||
tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self);
|
||||
tscResetSqlCmd(&pParentSql->cmd, true, pParentSql->self);
|
||||
if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry)) {
|
||||
pParentSql->res.code = code;
|
||||
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
return;
|
||||
}
|
||||
|
||||
tscFreeSubobj(pParentSql);
|
||||
tfree(pParentSql->pSubs);
|
||||
|
||||
pParentSql->res.code = TSDB_CODE_SUCCESS;
|
||||
pParentSql->retry++;
|
||||
|
@ -3858,6 +3922,9 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
|
|||
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self,
|
||||
tstrerror(code), pParentSql->retry);
|
||||
|
||||
|
||||
tscResetSqlCmd(&pParentSql->cmd, true, pParentSql->self);
|
||||
|
||||
code = tsParseSql(pParentSql, true);
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
return;
|
||||
|
@ -3892,9 +3959,11 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
|
|||
}
|
||||
|
||||
if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly
|
||||
assert(pSql->subState.numOfSub == 0);
|
||||
pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream);
|
||||
|
||||
assert(pSql->pSubs == NULL);
|
||||
pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES);
|
||||
assert(pSql->subState.states == NULL);
|
||||
pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t));
|
||||
code = pthread_mutex_init(&pSql->subState.mutex, NULL);
|
||||
|
||||
|
@ -3919,7 +3988,11 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
|
|||
pNew->signature = pNew;
|
||||
pNew->sqlstr = strdup(pSql->sqlstr);
|
||||
pNew->fp = tscSubqueryCompleteCallback;
|
||||
pNew->fetchFp = tscSubqueryCompleteCallback;
|
||||
pNew->maxRetry = pSql->maxRetry;
|
||||
|
||||
pNew->cmd.resColumnId = TSDB_RES_COL_ID;
|
||||
|
||||
tsem_init(&pNew->rspSem, 0, 0);
|
||||
|
||||
SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id
|
||||
|
@ -4096,6 +4169,31 @@ int32_t tscInvalidOperationMsg(char* msg, const char* additionalInfo, const char
|
|||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
int32_t tscErrorMsgWithCode(int32_t code, char* dstBuffer, const char* errMsg, const char* sql) {
|
||||
const char* msgFormat1 = "%s:%s";
|
||||
const char* msgFormat2 = "%s:\'%s\' (%s)";
|
||||
const char* msgFormat3 = "%s:\'%s\'";
|
||||
|
||||
const int32_t BACKWARD_CHAR_STEP = 0;
|
||||
|
||||
if (sql == NULL) {
|
||||
assert(errMsg != NULL);
|
||||
sprintf(dstBuffer, msgFormat1, tstrerror(code), errMsg);
|
||||
return code;
|
||||
}
|
||||
|
||||
char buf[64] = {0}; // only extract part of sql string
|
||||
strncpy(buf, (sql - BACKWARD_CHAR_STEP), tListLen(buf) - 1);
|
||||
|
||||
if (errMsg != NULL) {
|
||||
sprintf(dstBuffer, msgFormat2, tstrerror(code), buf, errMsg);
|
||||
} else {
|
||||
sprintf(dstBuffer, msgFormat3, tstrerror(code), buf); // no additional information for invalid sql error
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
bool tscHasReachLimitation(SQueryInfo* pQueryInfo, SSqlRes* pRes) {
|
||||
assert(pQueryInfo != NULL && pQueryInfo->clauseLimit != 0);
|
||||
return (pQueryInfo->clauseLimit > 0 && pRes->numOfClauseTotal >= pQueryInfo->clauseLimit);
|
||||
|
@ -4452,21 +4550,27 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
|
|||
assert(*ppChild != NULL);
|
||||
STableMeta* p = *ppSTable;
|
||||
STableMeta* pChild = *ppChild;
|
||||
size_t sz = (p != NULL) ? tscGetTableMetaSize(p) : 0; //ppSTableBuf actually capacity may larger than sz, dont care
|
||||
|
||||
size_t sz = (p != NULL) ? tscGetTableMetaSize(p) : 0; //ppSTableBuf actually capacity may larger than sz, dont care
|
||||
if (p != NULL && sz != 0) {
|
||||
memset((char *)p, 0, sz);
|
||||
}
|
||||
taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz);
|
||||
*ppSTable = p;
|
||||
|
||||
if (NULL == taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz)) {
|
||||
tfree(p);
|
||||
} else {
|
||||
*ppSTable = p;
|
||||
}
|
||||
|
||||
// tableMeta exists, build child table meta according to the super table meta
|
||||
// the uid need to be checked in addition to the general name of the super table.
|
||||
if (p && p->id.uid > 0 && pChild->suid == p->id.uid) {
|
||||
|
||||
int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema);
|
||||
int32_t tableMetaSize = sizeof(STableMeta) + totalBytes;
|
||||
if (*tableMetaCapacity < tableMetaSize) {
|
||||
pChild = realloc(pChild, tableMetaSize);
|
||||
STableMeta* pChild1 = realloc(pChild, tableMetaSize);
|
||||
if(pChild1 == NULL) return -1;
|
||||
pChild = pChild1;
|
||||
*tableMetaCapacity = (size_t)tableMetaSize;
|
||||
}
|
||||
|
||||
|
@ -4736,6 +4840,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
|
|||
pQueryAttr->distinct = pQueryInfo->distinct;
|
||||
pQueryAttr->sw = pQueryInfo->sessionWindow;
|
||||
pQueryAttr->stateWindow = pQueryInfo->stateWindow;
|
||||
pQueryAttr->multigroupResult = pQueryInfo->multigroupResult;
|
||||
|
||||
pQueryAttr->numOfCols = numOfCols;
|
||||
pQueryAttr->numOfOutput = numOfOutput;
|
||||
|
@ -5008,3 +5113,31 @@ void tscRemoveCachedTableMeta(STableMetaInfo* pTableMetaInfo, uint64_t id) {
|
|||
taosHashRemove(tscTableMetaMap, fname, len);
|
||||
tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap));
|
||||
}
|
||||
|
||||
char* cloneCurrentDBName(SSqlObj* pSql) {
|
||||
char *p = NULL;
|
||||
HttpContext *pCtx = NULL;
|
||||
|
||||
pthread_mutex_lock(&pSql->pTscObj->mutex);
|
||||
STscObj *pTscObj = pSql->pTscObj;
|
||||
switch (pTscObj->from) {
|
||||
case TAOS_REQ_FROM_HTTP:
|
||||
pCtx = pSql->param;
|
||||
if (pCtx && pCtx->db[0] != '\0') {
|
||||
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN] = {0};
|
||||
int32_t len = sprintf(db, "%s%s%s", pTscObj->acctId, TS_PATH_DELIMITER, pCtx->db);
|
||||
assert(len <= sizeof(db));
|
||||
|
||||
p = strdup(db);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (p == NULL) {
|
||||
p = strdup(pSql->pTscObj->db);
|
||||
}
|
||||
pthread_mutex_unlock(&pSql->pTscObj->mutex);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
|
|
@ -547,8 +547,9 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder);
|
|||
static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) {
|
||||
if (pBuilder->nCols >= pBuilder->tCols) {
|
||||
pBuilder->tCols *= 2;
|
||||
pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
|
||||
if (pBuilder->pColIdx == NULL) return -1;
|
||||
SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);
|
||||
if (pColIdx == NULL) return -1;
|
||||
pBuilder->pColIdx = pColIdx;
|
||||
}
|
||||
|
||||
pBuilder->pColIdx[pBuilder->nCols].colId = colId;
|
||||
|
@ -561,8 +562,9 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId,
|
|||
while (tlen > pBuilder->alloc - pBuilder->size) {
|
||||
pBuilder->alloc *= 2;
|
||||
}
|
||||
pBuilder->buf = realloc(pBuilder->buf, pBuilder->alloc);
|
||||
if (pBuilder->buf == NULL) return -1;
|
||||
void* buf = realloc(pBuilder->buf, pBuilder->alloc);
|
||||
if (buf == NULL) return -1;
|
||||
pBuilder->buf = buf;
|
||||
}
|
||||
|
||||
memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen);
|
||||
|
|
|
@ -131,6 +131,7 @@ extern int32_t tsHttpMaxThreads;
|
|||
extern int8_t tsHttpEnableCompress;
|
||||
extern int8_t tsHttpEnableRecordSql;
|
||||
extern int8_t tsTelegrafUseFieldNum;
|
||||
extern int8_t tsHttpDbNameMandatory;
|
||||
|
||||
// mqtt
|
||||
extern int8_t tsEnableMqttModule;
|
||||
|
@ -164,6 +165,7 @@ extern char tsDataDir[];
|
|||
extern char tsLogDir[];
|
||||
extern char tsScriptDir[];
|
||||
extern int64_t tsTickPerDay[3];
|
||||
extern int32_t tsTopicBianryLen;
|
||||
|
||||
// system info
|
||||
extern char tsOsName[];
|
||||
|
|
|
@ -138,8 +138,9 @@ int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int1
|
|||
|
||||
if (pBuilder->nCols >= pBuilder->tCols) {
|
||||
pBuilder->tCols *= 2;
|
||||
pBuilder->columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols);
|
||||
if (pBuilder->columns == NULL) return -1;
|
||||
STColumn* columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols);
|
||||
if (columns == NULL) return -1;
|
||||
pBuilder->columns = columns;
|
||||
}
|
||||
|
||||
STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]);
|
||||
|
|
|
@ -84,8 +84,9 @@ int32_t tsCompressColData = -1;
|
|||
|
||||
// client
|
||||
int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN;
|
||||
int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN;
|
||||
int32_t tsMaxRegexStringLen = TSDB_REGEX_STRING_MAX_LEN;
|
||||
int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN;
|
||||
int32_t tsMaxRegexStringLen = TSDB_REGEX_STRING_DEFAULT_LEN;
|
||||
|
||||
int8_t tsTscEnableRecordSql = 0;
|
||||
|
||||
// the maximum number of results for projection query on super table that are returned from
|
||||
|
@ -153,7 +154,6 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
|
|||
int32_t tsTsdbMetaCompactRatio = TSDB_META_COMPACT_RATIO;
|
||||
|
||||
// tsdb config
|
||||
|
||||
// For backward compatibility
|
||||
bool tsdbForceKeepFile = false;
|
||||
|
||||
|
@ -177,6 +177,7 @@ int32_t tsHttpMaxThreads = 2;
|
|||
int8_t tsHttpEnableCompress = 1;
|
||||
int8_t tsHttpEnableRecordSql = 0;
|
||||
int8_t tsTelegrafUseFieldNum = 0;
|
||||
int8_t tsHttpDbNameMandatory = 0;
|
||||
|
||||
// mqtt
|
||||
int8_t tsEnableMqttModule = 0; // not finished yet, not started it by default
|
||||
|
@ -211,6 +212,7 @@ char tsScriptDir[PATH_MAX] = {0};
|
|||
char tsTempDir[PATH_MAX] = "/tmp/";
|
||||
|
||||
int32_t tsDiskCfgNum = 0;
|
||||
int32_t tsTopicBianryLen = 16000;
|
||||
|
||||
#ifndef _STORAGE
|
||||
SDiskCfg tsDiskCfg[1];
|
||||
|
@ -571,7 +573,6 @@ static void doInitGlobalConfig(void) {
|
|||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
|
||||
cfg.option = "numOfMnodes";
|
||||
cfg.ptr = &tsNumOfMnodes;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
|
@ -1239,6 +1240,16 @@ static void doInitGlobalConfig(void) {
|
|||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "topicBianryLen";
|
||||
cfg.ptr = &tsTopicBianryLen;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
|
||||
cfg.minValue = 16;
|
||||
cfg.maxValue = 16000;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "httpEnableRecordSql";
|
||||
cfg.ptr = &tsHttpEnableRecordSql;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
|
@ -1279,6 +1290,16 @@ static void doInitGlobalConfig(void) {
|
|||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "httpDbNameMandatory";
|
||||
cfg.ptr = &tsHttpDbNameMandatory;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = 1;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
// debug flag
|
||||
cfg.option = "numOfLogLines";
|
||||
cfg.ptr = &tsNumOfLogLines;
|
||||
|
|
|
@ -117,7 +117,6 @@
|
|||
<exclude>**/DatetimeBefore1970Test.java</exclude>
|
||||
<exclude>**/FailOverTest.java</exclude>
|
||||
<exclude>**/InvalidResultSetPointerTest.java</exclude>
|
||||
<exclude>**/RestfulConnectionTest.java</exclude>
|
||||
<exclude>**/TSDBJNIConnectorTest.java</exclude>
|
||||
<exclude>**/TaosInfoMonitorTest.java</exclude>
|
||||
<exclude>**/UnsignedNumberJniTest.java</exclude>
|
||||
|
|
|
@ -40,13 +40,13 @@ public class TSDBError {
|
|||
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED, "failed to create subscription");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING, "Unsupported encoding");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR, "internal error of database");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR, "internal error of database, please see taoslog for more details");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, "JNI connection is NULL");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_RESULT_SET_NULL, "JNI result set is NULL");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_NUM_OF_FIELDS_0, "invalid num of fields");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_SQL_NULL, "empty sql string");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_FETCH_END, "fetch to the end of resultSet");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY, "JNI alloc memory failed");
|
||||
TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY, "JNI alloc memory failed, please see taoslog for more details");
|
||||
}
|
||||
|
||||
public static SQLException createSQLException(int errorCode) {
|
||||
|
|
|
@ -278,25 +278,20 @@ public class TSDBJNIConnector {
|
|||
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
|
||||
|
||||
public long prepareStmt(String sql) throws SQLException {
|
||||
long stmt;
|
||||
try {
|
||||
stmt = prepareStmtImp(sql.getBytes(), this.taos);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING);
|
||||
}
|
||||
long stmt = prepareStmtImp(sql.getBytes(), this.taos);
|
||||
|
||||
if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, "connection already closed");
|
||||
}
|
||||
|
||||
if (stmt == TSDBConstants.JNI_SQL_NULL) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
|
||||
}
|
||||
|
||||
if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
|
||||
}
|
||||
if (stmt == TSDBConstants.JNI_TDENGINE_ERROR) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR);
|
||||
}
|
||||
|
||||
return stmt;
|
||||
}
|
||||
|
@ -313,8 +308,7 @@ public class TSDBJNIConnector {
|
|||
private native int setBindTableNameImp(long stmt, String name, long conn);
|
||||
|
||||
public void setBindTableNameAndTags(long stmt, String tableName, int numOfTags, ByteBuffer tags, ByteBuffer typeList, ByteBuffer lengthList, ByteBuffer nullList) throws SQLException {
|
||||
int code = setTableNameTagsImp(stmt, tableName, numOfTags, tags.array(), typeList.array(), lengthList.array(),
|
||||
nullList.array(), this.taos);
|
||||
int code = setTableNameTagsImp(stmt, tableName, numOfTags, tags.array(), typeList.array(), lengthList.array(), nullList.array(), this.taos);
|
||||
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to bind table name and corresponding tags");
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ public class RestfulConnection extends AbstractConnection {
|
|||
private final String url;
|
||||
private final String database;
|
||||
private final String token;
|
||||
/******************************************************/
|
||||
|
||||
private boolean isClosed;
|
||||
private final DatabaseMetaData metadata;
|
||||
|
||||
|
|
|
@ -88,17 +88,24 @@ public class RestfulStatement extends AbstractStatement {
|
|||
}
|
||||
|
||||
private String getUrl() throws SQLException {
|
||||
String dbname = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_DBNAME);
|
||||
if (dbname == null || dbname.trim().isEmpty()) {
|
||||
dbname = "";
|
||||
} else {
|
||||
dbname = "/" + dbname.toLowerCase();
|
||||
}
|
||||
TimestampFormat timestampFormat = TimestampFormat.valueOf(conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).trim().toUpperCase());
|
||||
String url;
|
||||
|
||||
switch (timestampFormat) {
|
||||
case TIMESTAMP:
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt" + dbname;
|
||||
break;
|
||||
case UTC:
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc" + dbname;
|
||||
break;
|
||||
default:
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql" + dbname;
|
||||
}
|
||||
return url;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
|
||||
public class MultiConnectionWithDifferentDbTest {
|
||||
|
||||
private static String host = "127.0.0.1";
|
||||
private static String db1 = "db1";
|
||||
private static String db2 = "db2";
|
||||
|
||||
private long ts;
|
||||
|
||||
@Test
|
||||
public void test() {
|
||||
List<Thread> threads = IntStream.range(1, 3).mapToObj(i -> new Thread(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
for (int j = 0; j < 10; j++) {
|
||||
queryDb();
|
||||
try {
|
||||
TimeUnit.SECONDS.sleep(1);
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void queryDb() {
|
||||
String url = "jdbc:TAOS-RS://" + host + ":6041/db" + i + "?user=root&password=taosdata";
|
||||
try (Connection connection = DriverManager.getConnection(url)) {
|
||||
Statement stmt = connection.createStatement();
|
||||
|
||||
ResultSet rs = stmt.executeQuery("select * from weather");
|
||||
assertNotNull(rs);
|
||||
rs.next();
|
||||
long actual = rs.getTimestamp("ts").getTime();
|
||||
assertEquals(ts, actual);
|
||||
|
||||
int f1 = rs.getInt("f1");
|
||||
assertEquals(i, f1);
|
||||
|
||||
String loc = i == 1 ? "beijing" : "shanghai";
|
||||
String loc_actual = rs.getString("loc");
|
||||
assertEquals(loc, loc_actual);
|
||||
|
||||
stmt.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}, "thread-" + i)).collect(Collectors.toList());
|
||||
|
||||
threads.forEach(Thread::start);
|
||||
|
||||
for (Thread t : threads) {
|
||||
try {
|
||||
t.join();
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
ts = System.currentTimeMillis();
|
||||
|
||||
try {
|
||||
Connection conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata");
|
||||
|
||||
Statement stmt = conn.createStatement();
|
||||
stmt.execute("drop database if exists " + db1);
|
||||
stmt.execute("create database if not exists " + db1);
|
||||
stmt.execute("use " + db1);
|
||||
stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
|
||||
stmt.execute("insert into t1 using weather tags('beijing') values(" + ts + ", 1)");
|
||||
|
||||
stmt.execute("drop database if exists " + db2);
|
||||
stmt.execute("create database if not exists " + db2);
|
||||
stmt.execute("use " + db2);
|
||||
stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
|
||||
stmt.execute("insert into t1 using weather tags('shanghai') values(" + ts + ", 2)");
|
||||
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
public class UseNowInsertTimestampTest {
|
||||
String url = "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata";
|
||||
|
||||
@Test
|
||||
public void millisec() {
|
||||
try (Connection conn = DriverManager.getConnection(url)) {
|
||||
Statement stmt = conn.createStatement();
|
||||
stmt.execute("drop database if exists test");
|
||||
stmt.execute("create database if not exists test precision 'ms'");
|
||||
stmt.execute("use test");
|
||||
stmt.execute("create table weather(ts timestamp, f1 int)");
|
||||
stmt.execute("insert into weather values(now, 1)");
|
||||
|
||||
ResultSet rs = stmt.executeQuery("select * from weather");
|
||||
rs.next();
|
||||
Timestamp ts = rs.getTimestamp("ts");
|
||||
assertEquals(13, Long.toString(ts.getTime()).length());
|
||||
|
||||
int nanos = ts.getNanos();
|
||||
assertEquals(0, nanos % 1000_000);
|
||||
|
||||
stmt.execute("drop database if exists test");
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void microsec() {
|
||||
try (Connection conn = DriverManager.getConnection(url)) {
|
||||
Statement stmt = conn.createStatement();
|
||||
stmt.execute("drop database if exists test");
|
||||
stmt.execute("create database if not exists test precision 'us'");
|
||||
stmt.execute("use test");
|
||||
stmt.execute("create table weather(ts timestamp, f1 int)");
|
||||
stmt.execute("insert into weather values(now, 1)");
|
||||
|
||||
ResultSet rs = stmt.executeQuery("select * from weather");
|
||||
rs.next();
|
||||
Timestamp ts = rs.getTimestamp("ts");
|
||||
int nanos = ts.getNanos();
|
||||
|
||||
assertEquals(0, nanos % 1000);
|
||||
|
||||
stmt.execute("drop database if exists test");
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void nanosec() {
|
||||
try (Connection conn = DriverManager.getConnection(url)) {
|
||||
Statement stmt = conn.createStatement();
|
||||
stmt.execute("drop database if exists test");
|
||||
stmt.execute("create database if not exists test precision 'ns'");
|
||||
stmt.execute("use test");
|
||||
stmt.execute("create table weather(ts timestamp, f1 int)");
|
||||
stmt.execute("insert into weather values(now, 1)");
|
||||
|
||||
ResultSet rs = stmt.executeQuery("select * from weather");
|
||||
rs.next();
|
||||
|
||||
Timestamp ts = rs.getTimestamp("ts");
|
||||
|
||||
int nanos = ts.getNanos();
|
||||
assertTrue(nanos % 1000 != 0);
|
||||
|
||||
stmt.execute("drop database if exists test");
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
package com.taosdata.jdbc.rs;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
public class DatabaseSpecifiedTest {
|
||||
|
||||
private static String host = "127.0.0.1";
|
||||
private static String dbname = "test_db_spec";
|
||||
|
||||
private Connection connection;
|
||||
private long ts;
|
||||
|
||||
@Test
|
||||
public void test() throws SQLException {
|
||||
// when
|
||||
connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/" + dbname + "?user=root&password=taosdata");
|
||||
try (Statement stmt = connection.createStatement();) {
|
||||
ResultSet rs = stmt.executeQuery("select * from weather");
|
||||
|
||||
//then
|
||||
assertNotNull(rs);
|
||||
rs.next();
|
||||
long now = rs.getTimestamp("ts").getTime();
|
||||
assertEquals(ts, now);
|
||||
int f1 = rs.getInt(2);
|
||||
assertEquals(1, f1);
|
||||
String loc = rs.getString("loc");
|
||||
assertEquals("beijing", loc);
|
||||
}
|
||||
connection.close();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
ts = System.currentTimeMillis();
|
||||
try {
|
||||
Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata");
|
||||
Statement stmt = connection.createStatement();
|
||||
|
||||
stmt.execute("drop database if exists " + dbname);
|
||||
stmt.execute("create database if not exists " + dbname);
|
||||
stmt.execute("use " + dbname);
|
||||
stmt.execute("create table weather(ts timestamp, f1 int) tags(loc nchar(10))");
|
||||
stmt.execute("insert into t1 using weather tags('beijing') values( " + ts + ", 1)");
|
||||
|
||||
stmt.close();
|
||||
connection.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() {
|
||||
try {
|
||||
if (connection != null)
|
||||
connection.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -9,6 +9,8 @@ import org.junit.Test;
|
|||
import java.sql.*;
|
||||
import java.util.Properties;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class RestfulConnectionTest {
|
||||
|
||||
private static final String host = "127.0.0.1";
|
||||
|
@ -26,7 +28,7 @@ public class RestfulConnectionTest {
|
|||
ResultSet rs = stmt.executeQuery("select server_status()");
|
||||
rs.next();
|
||||
int status = rs.getInt("server_status()");
|
||||
Assert.assertEquals(1, status);
|
||||
assertEquals(1, status);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
@ -38,7 +40,7 @@ public class RestfulConnectionTest {
|
|||
ResultSet rs = pstmt.executeQuery();
|
||||
rs.next();
|
||||
int status = rs.getInt("server_status()");
|
||||
Assert.assertEquals(1, status);
|
||||
assertEquals(1, status);
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
|
@ -49,7 +51,7 @@ public class RestfulConnectionTest {
|
|||
@Test
|
||||
public void nativeSQL() throws SQLException {
|
||||
String nativeSQL = conn.nativeSQL("select * from log.log");
|
||||
Assert.assertEquals("select * from log.log", nativeSQL);
|
||||
assertEquals("select * from log.log", nativeSQL);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -87,7 +89,7 @@ public class RestfulConnectionTest {
|
|||
public void getMetaData() throws SQLException {
|
||||
DatabaseMetaData meta = conn.getMetaData();
|
||||
Assert.assertNotNull(meta);
|
||||
Assert.assertEquals("com.taosdata.jdbc.rs.RestfulDriver", meta.getDriverName());
|
||||
assertEquals("com.taosdata.jdbc.rs.RestfulDriver", meta.getDriverName());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -103,25 +105,25 @@ public class RestfulConnectionTest {
|
|||
@Test
|
||||
public void setCatalog() throws SQLException {
|
||||
conn.setCatalog("test");
|
||||
Assert.assertEquals("test", conn.getCatalog());
|
||||
assertEquals("test", conn.getCatalog());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getCatalog() throws SQLException {
|
||||
conn.setCatalog("log");
|
||||
Assert.assertEquals("log", conn.getCatalog());
|
||||
assertEquals("log", conn.getCatalog());
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
public void setTransactionIsolation() throws SQLException {
|
||||
conn.setTransactionIsolation(Connection.TRANSACTION_NONE);
|
||||
Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation());
|
||||
assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation());
|
||||
conn.setTransactionIsolation(Connection.TRANSACTION_READ_UNCOMMITTED);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getTransactionIsolation() throws SQLException {
|
||||
Assert.assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation());
|
||||
assertEquals(Connection.TRANSACTION_NONE, conn.getTransactionIsolation());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -140,7 +142,7 @@ public class RestfulConnectionTest {
|
|||
ResultSet rs = stmt.executeQuery("select server_status()");
|
||||
rs.next();
|
||||
int status = rs.getInt("server_status()");
|
||||
Assert.assertEquals(1, status);
|
||||
assertEquals(1, status);
|
||||
|
||||
conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
|
||||
}
|
||||
|
@ -152,7 +154,7 @@ public class RestfulConnectionTest {
|
|||
ResultSet rs = pstmt.executeQuery();
|
||||
rs.next();
|
||||
int status = rs.getInt("server_status()");
|
||||
Assert.assertEquals(1, status);
|
||||
assertEquals(1, status);
|
||||
|
||||
conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
|
||||
}
|
||||
|
@ -175,13 +177,13 @@ public class RestfulConnectionTest {
|
|||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
public void setHoldability() throws SQLException {
|
||||
conn.setHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT);
|
||||
Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability());
|
||||
assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability());
|
||||
conn.setHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getHoldability() throws SQLException {
|
||||
Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability());
|
||||
assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, conn.getHoldability());
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
|
@ -210,7 +212,7 @@ public class RestfulConnectionTest {
|
|||
ResultSet rs = stmt.executeQuery("select server_status()");
|
||||
rs.next();
|
||||
int status = rs.getInt("server_status()");
|
||||
Assert.assertEquals(1, status);
|
||||
assertEquals(1, status);
|
||||
|
||||
conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT);
|
||||
}
|
||||
|
@ -222,7 +224,7 @@ public class RestfulConnectionTest {
|
|||
ResultSet rs = pstmt.executeQuery();
|
||||
rs.next();
|
||||
int status = rs.getInt("server_status()");
|
||||
Assert.assertEquals(1, status);
|
||||
assertEquals(1, status);
|
||||
|
||||
conn.prepareStatement("select server_status", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT);
|
||||
}
|
||||
|
@ -299,11 +301,11 @@ public class RestfulConnectionTest {
|
|||
|
||||
Properties info = conn.getClientInfo();
|
||||
String charset = info.getProperty(TSDBDriver.PROPERTY_KEY_CHARSET);
|
||||
Assert.assertEquals("UTF-8", charset);
|
||||
assertEquals("UTF-8", charset);
|
||||
String locale = info.getProperty(TSDBDriver.PROPERTY_KEY_LOCALE);
|
||||
Assert.assertEquals("en_US.UTF-8", locale);
|
||||
assertEquals("en_US.UTF-8", locale);
|
||||
String timezone = info.getProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE);
|
||||
Assert.assertEquals("UTC-8", timezone);
|
||||
assertEquals("UTC-8", timezone);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -313,11 +315,11 @@ public class RestfulConnectionTest {
|
|||
conn.setClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
||||
String charset = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_CHARSET);
|
||||
Assert.assertEquals("UTF-8", charset);
|
||||
assertEquals("UTF-8", charset);
|
||||
String locale = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_LOCALE);
|
||||
Assert.assertEquals("en_US.UTF-8", locale);
|
||||
assertEquals("en_US.UTF-8", locale);
|
||||
String timezone = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIME_ZONE);
|
||||
Assert.assertEquals("UTC-8", timezone);
|
||||
assertEquals("UTC-8", timezone);
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
|
@ -345,14 +347,15 @@ public class RestfulConnectionTest {
|
|||
conn.abort(null);
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
@Test
|
||||
public void setNetworkTimeout() throws SQLException {
|
||||
conn.setNetworkTimeout(null, 1000);
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
@Test
|
||||
public void getNetworkTimeout() throws SQLException {
|
||||
conn.getNetworkTimeout();
|
||||
int timeout = conn.getNetworkTimeout();
|
||||
assertEquals(0, timeout);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -2,7 +2,7 @@ import taos
|
|||
|
||||
conn = taos.connect(host='127.0.0.1',
|
||||
user='root',
|
||||
passworkd='taodata',
|
||||
password='taosdata',
|
||||
database='log')
|
||||
cursor = conn.cursor()
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ def _load_taos():
|
|||
try:
|
||||
return load_func[platform.system()]()
|
||||
except:
|
||||
sys.exit("unsupported platform to TDengine connector")
|
||||
raise InterfaceError('unsupported platform or failed to load taos client library')
|
||||
|
||||
|
||||
_libtaos = _load_taos()
|
||||
|
|
|
@ -103,6 +103,9 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_TSC_FILE_EMPTY TAOS_DEF_ERROR_CODE(0, 0x021A) //"File is empty")
|
||||
#define TSDB_CODE_TSC_LINE_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x021B) //"Syntax error in Line")
|
||||
#define TSDB_CODE_TSC_NO_META_CACHED TAOS_DEF_ERROR_CODE(0, 0x021C) //"No table meta cached")
|
||||
#define TSDB_CODE_TSC_DUP_COL_NAMES TAOS_DEF_ERROR_CODE(0, 0x021D) //"duplicated column names")
|
||||
#define TSDB_CODE_TSC_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021E) //"Invalid tag length")
|
||||
#define TSDB_CODE_TSC_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021F) //"Invalid column length")
|
||||
|
||||
// mnode
|
||||
#define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed")
|
||||
|
@ -185,6 +188,9 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_MND_INVALID_FUNC TAOS_DEF_ERROR_CODE(0, 0x0374) //"Invalid func")
|
||||
#define TSDB_CODE_MND_INVALID_FUNC_BUFSIZE TAOS_DEF_ERROR_CODE(0, 0x0375) //"Invalid func bufSize")
|
||||
|
||||
#define TSDB_CODE_MND_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0376) //"invalid tag length")
|
||||
#define TSDB_CODE_MND_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x0377) //"invalid column length")
|
||||
|
||||
#define TSDB_CODE_MND_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0380) //"Database not specified or available")
|
||||
#define TSDB_CODE_MND_DB_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0381) //"Database already exists")
|
||||
#define TSDB_CODE_MND_INVALID_DB_OPTION TAOS_DEF_ERROR_CODE(0, 0x0382) //"Invalid database options")
|
||||
|
|
|
@ -92,124 +92,125 @@
|
|||
#define TK_ACCOUNT 74
|
||||
#define TK_USE 75
|
||||
#define TK_DESCRIBE 76
|
||||
#define TK_ALTER 77
|
||||
#define TK_PASS 78
|
||||
#define TK_PRIVILEGE 79
|
||||
#define TK_LOCAL 80
|
||||
#define TK_COMPACT 81
|
||||
#define TK_LP 82
|
||||
#define TK_RP 83
|
||||
#define TK_IF 84
|
||||
#define TK_EXISTS 85
|
||||
#define TK_AS 86
|
||||
#define TK_OUTPUTTYPE 87
|
||||
#define TK_AGGREGATE 88
|
||||
#define TK_BUFSIZE 89
|
||||
#define TK_PPS 90
|
||||
#define TK_TSERIES 91
|
||||
#define TK_DBS 92
|
||||
#define TK_STORAGE 93
|
||||
#define TK_QTIME 94
|
||||
#define TK_CONNS 95
|
||||
#define TK_STATE 96
|
||||
#define TK_COMMA 97
|
||||
#define TK_KEEP 98
|
||||
#define TK_CACHE 99
|
||||
#define TK_REPLICA 100
|
||||
#define TK_QUORUM 101
|
||||
#define TK_DAYS 102
|
||||
#define TK_MINROWS 103
|
||||
#define TK_MAXROWS 104
|
||||
#define TK_BLOCKS 105
|
||||
#define TK_CTIME 106
|
||||
#define TK_WAL 107
|
||||
#define TK_FSYNC 108
|
||||
#define TK_COMP 109
|
||||
#define TK_PRECISION 110
|
||||
#define TK_UPDATE 111
|
||||
#define TK_CACHELAST 112
|
||||
#define TK_PARTITIONS 113
|
||||
#define TK_UNSIGNED 114
|
||||
#define TK_TAGS 115
|
||||
#define TK_USING 116
|
||||
#define TK_NULL 117
|
||||
#define TK_NOW 118
|
||||
#define TK_SELECT 119
|
||||
#define TK_UNION 120
|
||||
#define TK_ALL 121
|
||||
#define TK_DISTINCT 122
|
||||
#define TK_FROM 123
|
||||
#define TK_VARIABLE 124
|
||||
#define TK_INTERVAL 125
|
||||
#define TK_SESSION 126
|
||||
#define TK_STATE_WINDOW 127
|
||||
#define TK_FILL 128
|
||||
#define TK_SLIDING 129
|
||||
#define TK_ORDER 130
|
||||
#define TK_BY 131
|
||||
#define TK_ASC 132
|
||||
#define TK_DESC 133
|
||||
#define TK_GROUP 134
|
||||
#define TK_HAVING 135
|
||||
#define TK_LIMIT 136
|
||||
#define TK_OFFSET 137
|
||||
#define TK_SLIMIT 138
|
||||
#define TK_SOFFSET 139
|
||||
#define TK_WHERE 140
|
||||
#define TK_RESET 141
|
||||
#define TK_QUERY 142
|
||||
#define TK_SYNCDB 143
|
||||
#define TK_ADD 144
|
||||
#define TK_COLUMN 145
|
||||
#define TK_MODIFY 146
|
||||
#define TK_TAG 147
|
||||
#define TK_CHANGE 148
|
||||
#define TK_SET 149
|
||||
#define TK_KILL 150
|
||||
#define TK_CONNECTION 151
|
||||
#define TK_STREAM 152
|
||||
#define TK_COLON 153
|
||||
#define TK_ABORT 154
|
||||
#define TK_AFTER 155
|
||||
#define TK_ATTACH 156
|
||||
#define TK_BEFORE 157
|
||||
#define TK_BEGIN 158
|
||||
#define TK_CASCADE 159
|
||||
#define TK_CLUSTER 160
|
||||
#define TK_CONFLICT 161
|
||||
#define TK_COPY 162
|
||||
#define TK_DEFERRED 163
|
||||
#define TK_DELIMITERS 164
|
||||
#define TK_DETACH 165
|
||||
#define TK_EACH 166
|
||||
#define TK_END 167
|
||||
#define TK_EXPLAIN 168
|
||||
#define TK_FAIL 169
|
||||
#define TK_FOR 170
|
||||
#define TK_IGNORE 171
|
||||
#define TK_IMMEDIATE 172
|
||||
#define TK_INITIALLY 173
|
||||
#define TK_INSTEAD 174
|
||||
#define TK_KEY 175
|
||||
#define TK_OF 176
|
||||
#define TK_RAISE 177
|
||||
#define TK_REPLACE 178
|
||||
#define TK_RESTRICT 179
|
||||
#define TK_ROW 180
|
||||
#define TK_STATEMENT 181
|
||||
#define TK_TRIGGER 182
|
||||
#define TK_VIEW 183
|
||||
#define TK_IPTOKEN 184
|
||||
#define TK_SEMI 185
|
||||
#define TK_NONE 186
|
||||
#define TK_PREV 187
|
||||
#define TK_LINEAR 188
|
||||
#define TK_IMPORT 189
|
||||
#define TK_TBNAME 190
|
||||
#define TK_JOIN 191
|
||||
#define TK_INSERT 192
|
||||
#define TK_INTO 193
|
||||
#define TK_VALUES 194
|
||||
#define TK_DESC 77
|
||||
#define TK_ALTER 78
|
||||
#define TK_PASS 79
|
||||
#define TK_PRIVILEGE 80
|
||||
#define TK_LOCAL 81
|
||||
#define TK_COMPACT 82
|
||||
#define TK_LP 83
|
||||
#define TK_RP 84
|
||||
#define TK_IF 85
|
||||
#define TK_EXISTS 86
|
||||
#define TK_AS 87
|
||||
#define TK_OUTPUTTYPE 88
|
||||
#define TK_AGGREGATE 89
|
||||
#define TK_BUFSIZE 90
|
||||
#define TK_PPS 91
|
||||
#define TK_TSERIES 92
|
||||
#define TK_DBS 93
|
||||
#define TK_STORAGE 94
|
||||
#define TK_QTIME 95
|
||||
#define TK_CONNS 96
|
||||
#define TK_STATE 97
|
||||
#define TK_COMMA 98
|
||||
#define TK_KEEP 99
|
||||
#define TK_CACHE 100
|
||||
#define TK_REPLICA 101
|
||||
#define TK_QUORUM 102
|
||||
#define TK_DAYS 103
|
||||
#define TK_MINROWS 104
|
||||
#define TK_MAXROWS 105
|
||||
#define TK_BLOCKS 106
|
||||
#define TK_CTIME 107
|
||||
#define TK_WAL 108
|
||||
#define TK_FSYNC 109
|
||||
#define TK_COMP 110
|
||||
#define TK_PRECISION 111
|
||||
#define TK_UPDATE 112
|
||||
#define TK_CACHELAST 113
|
||||
#define TK_PARTITIONS 114
|
||||
#define TK_UNSIGNED 115
|
||||
#define TK_TAGS 116
|
||||
#define TK_USING 117
|
||||
#define TK_NULL 118
|
||||
#define TK_NOW 119
|
||||
#define TK_SELECT 120
|
||||
#define TK_UNION 121
|
||||
#define TK_ALL 122
|
||||
#define TK_DISTINCT 123
|
||||
#define TK_FROM 124
|
||||
#define TK_VARIABLE 125
|
||||
#define TK_INTERVAL 126
|
||||
#define TK_EVERY 127
|
||||
#define TK_SESSION 128
|
||||
#define TK_STATE_WINDOW 129
|
||||
#define TK_FILL 130
|
||||
#define TK_SLIDING 131
|
||||
#define TK_ORDER 132
|
||||
#define TK_BY 133
|
||||
#define TK_ASC 134
|
||||
#define TK_GROUP 135
|
||||
#define TK_HAVING 136
|
||||
#define TK_LIMIT 137
|
||||
#define TK_OFFSET 138
|
||||
#define TK_SLIMIT 139
|
||||
#define TK_SOFFSET 140
|
||||
#define TK_WHERE 141
|
||||
#define TK_RESET 142
|
||||
#define TK_QUERY 143
|
||||
#define TK_SYNCDB 144
|
||||
#define TK_ADD 145
|
||||
#define TK_COLUMN 146
|
||||
#define TK_MODIFY 147
|
||||
#define TK_TAG 148
|
||||
#define TK_CHANGE 149
|
||||
#define TK_SET 150
|
||||
#define TK_KILL 151
|
||||
#define TK_CONNECTION 152
|
||||
#define TK_STREAM 153
|
||||
#define TK_COLON 154
|
||||
#define TK_ABORT 155
|
||||
#define TK_AFTER 156
|
||||
#define TK_ATTACH 157
|
||||
#define TK_BEFORE 158
|
||||
#define TK_BEGIN 159
|
||||
#define TK_CASCADE 160
|
||||
#define TK_CLUSTER 161
|
||||
#define TK_CONFLICT 162
|
||||
#define TK_COPY 163
|
||||
#define TK_DEFERRED 164
|
||||
#define TK_DELIMITERS 165
|
||||
#define TK_DETACH 166
|
||||
#define TK_EACH 167
|
||||
#define TK_END 168
|
||||
#define TK_EXPLAIN 169
|
||||
#define TK_FAIL 170
|
||||
#define TK_FOR 171
|
||||
#define TK_IGNORE 172
|
||||
#define TK_IMMEDIATE 173
|
||||
#define TK_INITIALLY 174
|
||||
#define TK_INSTEAD 175
|
||||
#define TK_KEY 176
|
||||
#define TK_OF 177
|
||||
#define TK_RAISE 178
|
||||
#define TK_REPLACE 179
|
||||
#define TK_RESTRICT 180
|
||||
#define TK_ROW 181
|
||||
#define TK_STATEMENT 182
|
||||
#define TK_TRIGGER 183
|
||||
#define TK_VIEW 184
|
||||
#define TK_IPTOKEN 185
|
||||
#define TK_SEMI 186
|
||||
#define TK_NONE 187
|
||||
#define TK_PREV 188
|
||||
#define TK_LINEAR 189
|
||||
#define TK_IMPORT 190
|
||||
#define TK_TBNAME 191
|
||||
#define TK_JOIN 192
|
||||
#define TK_INSERT 193
|
||||
#define TK_INTO 194
|
||||
#define TK_VALUES 195
|
||||
|
||||
|
||||
#define TK_SPACE 300
|
||||
|
|
|
@ -72,12 +72,13 @@ static int32_t shellShowTables(TAOS *con, char *db) {
|
|||
int32_t tbIndex = tbNum++;
|
||||
if (tbMallocNum < tbNum) {
|
||||
tbMallocNum = (tbMallocNum * 2 + 1);
|
||||
tbNames = realloc(tbNames, tbMallocNum * sizeof(char *));
|
||||
if (tbNames == NULL) {
|
||||
char** tbNames1 = realloc(tbNames, tbMallocNum * sizeof(char *));
|
||||
if (tbNames1 == NULL) {
|
||||
fprintf(stdout, "failed to malloc tablenames, num:%d\n", tbMallocNum);
|
||||
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
break;
|
||||
}
|
||||
tbNames = tbNames1;
|
||||
}
|
||||
|
||||
tbNames[tbIndex] = malloc(TSDB_TABLE_NAME_LEN);
|
||||
|
|
|
@ -254,8 +254,12 @@ int32_t shellRunCommand(TAOS* con, char* command) {
|
|||
}
|
||||
|
||||
if (c == '\\') {
|
||||
esc = true;
|
||||
continue;
|
||||
if (quote != 0 && (*command == '_' || *command == '\\')) {
|
||||
//DO nothing
|
||||
} else {
|
||||
esc = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (quote == c) {
|
||||
|
|
|
@ -108,7 +108,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
|||
arguments->is_raw_time = true;
|
||||
break;
|
||||
case 'f':
|
||||
if (wordexp(arg, &full_path, 0) != 0) {
|
||||
if ((0 == strlen(arg)) || (wordexp(arg, &full_path, 0) != 0)) {
|
||||
fprintf(stderr, "Invalid path %s\n", arg);
|
||||
return -1;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -225,16 +225,15 @@ static struct argp_option options[] = {
|
|||
{"password", 'p', 0, 0, "User password to connect to server. Default is taosdata.", 0},
|
||||
#endif
|
||||
{"port", 'P', "PORT", 0, "Port to connect", 0},
|
||||
{"cversion", 'v', "CVERION", 0, "client version", 0},
|
||||
{"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0},
|
||||
// input/output file
|
||||
{"outpath", 'o', "OUTPATH", 0, "Output file path.", 1},
|
||||
{"inpath", 'i', "INPATH", 0, "Input file path.", 1},
|
||||
{"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1},
|
||||
#ifdef _TD_POWER_
|
||||
{"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1},
|
||||
{"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1},
|
||||
#else
|
||||
{"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1},
|
||||
{"config-dir", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1},
|
||||
#endif
|
||||
{"encode", 'e', "ENCODE", 0, "Input file encoding.", 1},
|
||||
// dump unit options
|
||||
|
@ -244,7 +243,7 @@ static struct argp_option options[] = {
|
|||
// dump format options
|
||||
{"schemaonly", 's', 0, 0, "Only dump schema.", 2},
|
||||
{"without-property", 'N', 0, 0, "Dump schema without properties.", 2},
|
||||
{"avro", 'V', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2},
|
||||
{"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2},
|
||||
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
|
||||
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
|
||||
#if TSDB_SUPPORT_NANOSECOND == 1
|
||||
|
@ -267,7 +266,6 @@ typedef struct arguments {
|
|||
char *user;
|
||||
char password[SHELL_MAX_PASSWORD_LEN];
|
||||
uint16_t port;
|
||||
char cversion[12];
|
||||
uint16_t mysqlFlag;
|
||||
// output file
|
||||
char outpath[MAX_FILE_NAME_LEN];
|
||||
|
@ -338,7 +336,6 @@ struct arguments g_args = {
|
|||
"taosdata",
|
||||
#endif
|
||||
0,
|
||||
"",
|
||||
0,
|
||||
// outpath and inpath
|
||||
"",
|
||||
|
@ -370,6 +367,24 @@ struct arguments g_args = {
|
|||
false // performance_print
|
||||
};
|
||||
|
||||
static void errorPrintReqArg2(char *program, char *wrong_arg)
|
||||
{
|
||||
fprintf(stderr,
|
||||
"%s: option requires a number argument '-%s'\n",
|
||||
program, wrong_arg);
|
||||
fprintf(stderr,
|
||||
"Try `taosdump --help' or `taosdump --usage' for more information.\n");
|
||||
}
|
||||
|
||||
static void errorPrintReqArg3(char *program, char *wrong_arg)
|
||||
{
|
||||
fprintf(stderr,
|
||||
"%s: option '%s' requires an argument\n",
|
||||
program, wrong_arg);
|
||||
fprintf(stderr,
|
||||
"Try `taosdump --help' or `taosdump --usage' for more information.\n");
|
||||
}
|
||||
|
||||
/* Parse a single option. */
|
||||
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
||||
/* Get the input argument from argp_parse, which we
|
||||
|
@ -390,20 +405,15 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
|||
case 'p':
|
||||
break;
|
||||
case 'P':
|
||||
if (!isStringNumber(arg)) {
|
||||
errorPrintReqArg2("taosdump", "P");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
g_args.port = atoi(arg);
|
||||
break;
|
||||
case 'q':
|
||||
g_args.mysqlFlag = atoi(arg);
|
||||
break;
|
||||
case 'v':
|
||||
if (wordexp(arg, &full_path, 0) != 0) {
|
||||
errorPrint("Invalid client vesion %s\n", arg);
|
||||
return -1;
|
||||
}
|
||||
tstrncpy(g_args.cversion, full_path.we_wordv[0], 11);
|
||||
wordfree(&full_path);
|
||||
break;
|
||||
// output file path
|
||||
case 'o':
|
||||
if (wordexp(arg, &full_path, 0) != 0) {
|
||||
errorPrint("Invalid path %s\n", arg);
|
||||
|
@ -430,9 +440,13 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
|||
g_args.resultFile = arg;
|
||||
break;
|
||||
case 'c':
|
||||
if (0 == strlen(arg)) {
|
||||
errorPrintReqArg3("taosdump", "-c or --config-dir");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if (wordexp(arg, &full_path, 0) != 0) {
|
||||
errorPrint("Invalid path %s\n", arg);
|
||||
return -1;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
tstrncpy(configDir, full_path.we_wordv[0], MAX_FILE_NAME_LEN);
|
||||
wordfree(&full_path);
|
||||
|
@ -453,7 +467,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
|||
case 'N':
|
||||
g_args.with_property = false;
|
||||
break;
|
||||
case 'V':
|
||||
case 'v':
|
||||
g_args.avro = true;
|
||||
break;
|
||||
case 'S':
|
||||
|
@ -660,6 +674,9 @@ static void parse_timestamp(
|
|||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
static char verType[32] = {0};
|
||||
sprintf(verType, "version: %s\n", version);
|
||||
argp_program_version = verType;
|
||||
|
||||
int ret = 0;
|
||||
/* Parse our arguments; every option seen by parse_opt will be
|
||||
|
@ -686,7 +703,6 @@ int main(int argc, char *argv[]) {
|
|||
printf("user: %s\n", g_args.user);
|
||||
printf("password: %s\n", g_args.password);
|
||||
printf("port: %u\n", g_args.port);
|
||||
printf("cversion: %s\n", g_args.cversion);
|
||||
printf("mysqlFlag: %d\n", g_args.mysqlFlag);
|
||||
printf("outpath: %s\n", g_args.outpath);
|
||||
printf("inpath: %s\n", g_args.inpath);
|
||||
|
@ -715,11 +731,6 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
}
|
||||
printf("==============================\n");
|
||||
|
||||
if (g_args.cversion[0] != 0){
|
||||
tstrncpy(version, g_args.cversion, 11);
|
||||
}
|
||||
|
||||
if (taosCheckParam(&g_args) < 0) {
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
@ -737,7 +748,6 @@ int main(int argc, char *argv[]) {
|
|||
fprintf(g_fpOfResult, "user: %s\n", g_args.user);
|
||||
fprintf(g_fpOfResult, "password: %s\n", g_args.password);
|
||||
fprintf(g_fpOfResult, "port: %u\n", g_args.port);
|
||||
fprintf(g_fpOfResult, "cversion: %s\n", g_args.cversion);
|
||||
fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag);
|
||||
fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath);
|
||||
fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath);
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
#if defined(WINDOWS)
|
||||
int main(int argc, char *argv[]) {
|
||||
printf("welcome to use taospack tools v1.3 for windows.\n");
|
||||
|
@ -148,7 +149,10 @@ float* read_float(const char* inFile, int* pcount){
|
|||
//printf(" buff=%s float=%.50f \n ", buf, floats[fi]);
|
||||
if ( ++fi == malloc_cnt ) {
|
||||
malloc_cnt += 100000;
|
||||
floats = realloc(floats, malloc_cnt*sizeof(float));
|
||||
float* floats1 = realloc(floats, malloc_cnt*sizeof(float));
|
||||
if(floats1 == NULL)
|
||||
break;
|
||||
floats = floats1;
|
||||
}
|
||||
memset(buf, 0, sizeof(buf));
|
||||
}
|
||||
|
@ -601,7 +605,6 @@ void test_threadsafe_double(int thread_count){
|
|||
|
||||
}
|
||||
|
||||
|
||||
void unitTestFloat() {
|
||||
|
||||
float ft1 [] = {1.11, 2.22, 3.333};
|
||||
|
@ -662,7 +665,50 @@ void unitTestFloat() {
|
|||
free(ft2);
|
||||
free(buff);
|
||||
free(output);
|
||||
|
||||
}
|
||||
|
||||
void leakFloat() {
|
||||
|
||||
int cnt = sizeof(g_ft1)/sizeof(float);
|
||||
float* floats = g_ft1;
|
||||
int algorithm = 2;
|
||||
|
||||
// compress
|
||||
const char* input = (const char*)floats;
|
||||
int input_len = cnt * sizeof(float);
|
||||
int output_len = input_len + 1024;
|
||||
char* output = (char*) malloc(output_len);
|
||||
char* buff = (char*) malloc(input_len);
|
||||
int buff_len = input_len;
|
||||
|
||||
int ret_len = 0;
|
||||
ret_len = tsCompressFloatLossy(input, input_len, cnt, output, output_len, algorithm, buff, buff_len);
|
||||
|
||||
if(ret_len == 0) {
|
||||
printf(" compress float error.\n");
|
||||
free(buff);
|
||||
free(output);
|
||||
return ;
|
||||
}
|
||||
|
||||
float* ft2 = (float*)malloc(input_len);
|
||||
ret_len = tsDecompressFloatLossy(output, ret_len, cnt, (char*)ft2, input_len, algorithm, buff, buff_len);
|
||||
if(ret_len == 0) {
|
||||
printf(" decompress float error.\n");
|
||||
}
|
||||
|
||||
free(ft2);
|
||||
free(buff);
|
||||
free(output);
|
||||
}
|
||||
|
||||
|
||||
void leakTest(){
|
||||
for(int i=0; i< 90000000000000; i++){
|
||||
if(i%10000==0)
|
||||
printf(" ---------- %d ---------------- \n", i);
|
||||
leakFloat();
|
||||
}
|
||||
}
|
||||
|
||||
#define DB_CNT 500
|
||||
|
@ -689,7 +735,7 @@ extern char Compressor [];
|
|||
// ----------------- main ----------------------
|
||||
//
|
||||
int main(int argc, char *argv[]) {
|
||||
printf("welcome to use taospack tools v1.3\n");
|
||||
printf("welcome to use taospack tools v1.6\n");
|
||||
|
||||
//printf(" sizeof(int)=%d\n", (int)sizeof(int));
|
||||
//printf(" sizeof(long)=%d\n", (int)sizeof(long));
|
||||
|
@ -753,6 +799,9 @@ int main(int argc, char *argv[]) {
|
|||
if(strcmp(argv[1], "-mem") == 0) {
|
||||
memTest();
|
||||
}
|
||||
else if(strcmp(argv[1], "-leak") == 0) {
|
||||
leakTest();
|
||||
}
|
||||
}
|
||||
else{
|
||||
unitTestFloat();
|
||||
|
|
|
@ -1518,6 +1518,13 @@ static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg) {
|
|||
// update
|
||||
SSchema *schema = (SSchema *) (pStable->schema + col);
|
||||
ASSERT(schema->type == TSDB_DATA_TYPE_BINARY || schema->type == TSDB_DATA_TYPE_NCHAR);
|
||||
|
||||
if (pAlter->schema[0].bytes <= schema->bytes) {
|
||||
mError("msg:%p, app:%p stable:%s, modify column len. column:%s, len from %d to %d", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pStable->info.tableId, name, schema->bytes, pAlter->schema[0].bytes);
|
||||
return TSDB_CODE_MND_INVALID_COLUMN_LENGTH;
|
||||
}
|
||||
|
||||
schema->bytes = pAlter->schema[0].bytes;
|
||||
pStable->sversion++;
|
||||
mInfo("msg:%p, app:%p stable %s, start to modify column %s len to %d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
|
||||
|
@ -1548,6 +1555,12 @@ static int32_t mnodeChangeSuperTableTag(SMnodeMsg *pMsg) {
|
|||
// update
|
||||
SSchema *schema = (SSchema *) (pStable->schema + col + pStable->numOfColumns);
|
||||
ASSERT(schema->type == TSDB_DATA_TYPE_BINARY || schema->type == TSDB_DATA_TYPE_NCHAR);
|
||||
if (pAlter->schema[0].bytes <= schema->bytes) {
|
||||
mError("msg:%p, app:%p stable:%s, modify tag len. tag:%s, len from %d to %d", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pStable->info.tableId, name, schema->bytes, pAlter->schema[0].bytes);
|
||||
return TSDB_CODE_MND_INVALID_TAG_LENGTH;
|
||||
}
|
||||
|
||||
schema->bytes = pAlter->schema[0].bytes;
|
||||
pStable->tversion++;
|
||||
mInfo("msg:%p, app:%p stable %s, start to modify tag len %s to %d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId,
|
||||
|
@ -2921,10 +2934,11 @@ static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray
|
|||
(*totalMallocLen) *= 2;
|
||||
}
|
||||
|
||||
pMultiMeta = realloc(pMultiMeta, *totalMallocLen);
|
||||
if (pMultiMeta == NULL) {
|
||||
SMultiTableMeta* pMultiMeta1 = realloc(pMultiMeta, *totalMallocLen);
|
||||
if (pMultiMeta1 == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
pMultiMeta = pMultiMeta1;
|
||||
}
|
||||
|
||||
return pMultiMeta;
|
||||
|
|
|
@ -504,8 +504,9 @@ void * taosTRealloc(void *ptr, size_t size) {
|
|||
|
||||
void * tptr = (void *)((char *)ptr - sizeof(size_t));
|
||||
size_t tsize = size + sizeof(size_t);
|
||||
tptr = realloc(tptr, tsize);
|
||||
if (tptr == NULL) return NULL;
|
||||
void* tptr1 = realloc(tptr, tsize);
|
||||
if (tptr1 == NULL) return NULL;
|
||||
tptr = tptr1;
|
||||
|
||||
*(size_t *)tptr = size;
|
||||
|
||||
|
|
|
@ -50,14 +50,20 @@ void osInit() {
|
|||
char* taosGetCmdlineByPID(int pid) {
|
||||
static char cmdline[1024];
|
||||
sprintf(cmdline, "/proc/%d/cmdline", pid);
|
||||
FILE* f = fopen(cmdline, "r");
|
||||
if (f) {
|
||||
size_t size;
|
||||
size = fread(cmdline, sizeof(char), 1024, f);
|
||||
if (size > 0) {
|
||||
if ('\n' == cmdline[size - 1]) cmdline[size - 1] = '\0';
|
||||
}
|
||||
fclose(f);
|
||||
|
||||
int fd = open(cmdline, O_RDONLY);
|
||||
if (fd >= 0) {
|
||||
int n = read(fd, cmdline, sizeof(cmdline) - 1);
|
||||
if (n < 0) n = 0;
|
||||
|
||||
if (n > 0 && cmdline[n - 1] == '\n') --n;
|
||||
|
||||
cmdline[n] = 0;
|
||||
|
||||
close(fd);
|
||||
} else {
|
||||
cmdline[0] = 0;
|
||||
}
|
||||
|
||||
return cmdline;
|
||||
}
|
||||
|
|
|
@ -81,11 +81,13 @@ int32_t getstr(char **lineptr, size_t *n, FILE *stream, char terminator, int32_t
|
|||
*n += MIN_CHUNK;
|
||||
|
||||
nchars_avail = (int32_t)(*n + *lineptr - read_pos);
|
||||
*lineptr = realloc(*lineptr, *n);
|
||||
if (!*lineptr) {
|
||||
char* lineptr1 = realloc(*lineptr, *n);
|
||||
if (!lineptr1) {
|
||||
errno = ENOMEM;
|
||||
return -1;
|
||||
}
|
||||
*lineptr = lineptr1;
|
||||
|
||||
read_pos = *n - nchars_avail + *lineptr;
|
||||
assert((*lineptr + *n) == (read_pos + nchars_avail));
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
|
|||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/common/inc)
|
||||
INCLUDE_DIRECTORIES(inc)
|
||||
AUX_SOURCE_DIRECTORY(src SRC)
|
||||
|
||||
|
|
|
@ -150,6 +150,7 @@ typedef struct HttpContext {
|
|||
char ipstr[22];
|
||||
char user[TSDB_USER_LEN]; // parsed from auth token or login message
|
||||
char pass[HTTP_PASSWORD_LEN];
|
||||
char db[/*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN];
|
||||
TAOS * taos;
|
||||
void * ppContext;
|
||||
HttpSession *session;
|
||||
|
|
|
@ -22,12 +22,12 @@
|
|||
#include "httpResp.h"
|
||||
#include "httpSql.h"
|
||||
|
||||
#define REST_ROOT_URL_POS 0
|
||||
#define REST_ACTION_URL_POS 1
|
||||
#define REST_USER_URL_POS 2
|
||||
#define REST_PASS_URL_POS 3
|
||||
#define REST_ROOT_URL_POS 0
|
||||
#define REST_ACTION_URL_POS 1
|
||||
#define REST_USER_USEDB_URL_POS 2
|
||||
#define REST_PASS_URL_POS 3
|
||||
|
||||
void restInitHandle(HttpServer* pServer);
|
||||
bool restProcessRequest(struct HttpContext* pContext);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "httpLog.h"
|
||||
#include "httpRestHandle.h"
|
||||
#include "httpRestJson.h"
|
||||
#include "tglobal.h"
|
||||
|
||||
static HttpDecodeMethod restDecodeMethod = {"rest", restProcessRequest};
|
||||
static HttpDecodeMethod restDecodeMethod2 = {"restful", restProcessRequest};
|
||||
|
@ -62,11 +63,11 @@ void restInitHandle(HttpServer* pServer) {
|
|||
|
||||
bool restGetUserFromUrl(HttpContext* pContext) {
|
||||
HttpParser* pParser = pContext->parser;
|
||||
if (pParser->path[REST_USER_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_URL_POS].pos <= 0) {
|
||||
if (pParser->path[REST_USER_USEDB_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_USEDB_URL_POS].pos <= 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
tstrncpy(pContext->user, pParser->path[REST_USER_URL_POS].str, TSDB_USER_LEN);
|
||||
tstrncpy(pContext->user, pParser->path[REST_USER_USEDB_URL_POS].str, TSDB_USER_LEN);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -107,6 +108,24 @@ bool restProcessSqlRequest(HttpContext* pContext, int32_t timestampFmt) {
|
|||
HttpSqlCmd* cmd = &(pContext->singleCmd);
|
||||
cmd->nativSql = sql;
|
||||
|
||||
/* find if there is db_name in url */
|
||||
pContext->db[0] = '\0';
|
||||
|
||||
HttpString *path = &pContext->parser->path[REST_USER_USEDB_URL_POS];
|
||||
if (tsHttpDbNameMandatory) {
|
||||
if (path->pos == 0) {
|
||||
httpError("context:%p, fd:%d, user:%s, database name is mandatory", pContext, pContext->fd, pContext->user);
|
||||
httpSendErrorResp(pContext, TSDB_CODE_HTTP_INVALID_URL);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (path->pos > 0 && !(strlen(sql) > 4 && (sql[0] == 'u' || sql[0] == 'U') &&
|
||||
(sql[1] == 's' || sql[1] == 'S') && (sql[2] == 'e' || sql[2] == 'E') && sql[3] == ' '))
|
||||
{
|
||||
snprintf(pContext->db, /*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN, "%s", path->str);
|
||||
}
|
||||
|
||||
pContext->reqType = HTTP_REQTYPE_SINGLE_SQL;
|
||||
if (timestampFmt == REST_TIMESTAMP_FMT_LOCAL_STRING) {
|
||||
pContext->encodeMethod = &restEncodeSqlLocalTimeStringMethod;
|
||||
|
|
|
@ -419,6 +419,11 @@ void httpProcessRequest(HttpContext *pContext) {
|
|||
&(pContext->taos));
|
||||
httpDebug("context:%p, fd:%d, user:%s, try connect tdengine, taos:%p", pContext, pContext->fd, pContext->user,
|
||||
pContext->taos);
|
||||
|
||||
if (pContext->taos != NULL) {
|
||||
STscObj *pObj = pContext->taos;
|
||||
pObj->from = TAOS_REQ_FROM_HTTP;
|
||||
}
|
||||
} else {
|
||||
httpExecCmd(pContext);
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
#define monTrace(...) { if (monDebugFlag & DEBUG_TRACE) { taosPrintLog("MON ", monDebugFlag, __VA_ARGS__); }}
|
||||
|
||||
#define SQL_LENGTH 1030
|
||||
#define LOG_LEN_STR 100
|
||||
#define LOG_LEN_STR 512
|
||||
#define IP_LEN_STR TSDB_EP_LEN
|
||||
#define CHECK_INTERVAL 1000
|
||||
|
||||
|
|
|
@ -221,6 +221,7 @@ typedef struct SQueryAttr {
|
|||
bool distinct; // distinct query or not
|
||||
bool stateWindow; // window State on sub/normal table
|
||||
bool createFilterOperator; // if filter operator is needed
|
||||
bool multigroupResult; // multigroup result can exist in one SSDataBlock
|
||||
int32_t interBufSize; // intermediate buffer sizse
|
||||
|
||||
int32_t havingNum; // having expr number
|
||||
|
@ -467,16 +468,23 @@ typedef struct SLimitOperatorInfo {
|
|||
} SLimitOperatorInfo;
|
||||
|
||||
typedef struct SSLimitOperatorInfo {
|
||||
int64_t groupTotal;
|
||||
int64_t currentGroupOffset;
|
||||
int64_t groupTotal;
|
||||
int64_t currentGroupOffset;
|
||||
|
||||
int64_t rowsTotal;
|
||||
int64_t currentOffset;
|
||||
SLimitVal limit;
|
||||
SLimitVal slimit;
|
||||
int64_t rowsTotal;
|
||||
int64_t currentOffset;
|
||||
SLimitVal limit;
|
||||
SLimitVal slimit;
|
||||
|
||||
char **prevRow;
|
||||
SArray *orderColumnList;
|
||||
char **prevRow;
|
||||
SArray *orderColumnList;
|
||||
bool hasPrev;
|
||||
bool ignoreCurrentGroup;
|
||||
bool multigroupResult;
|
||||
SSDataBlock *pRes; // result buffer
|
||||
SSDataBlock *pPrevBlock;
|
||||
int64_t capacity;
|
||||
int64_t threshold;
|
||||
} SSLimitOperatorInfo;
|
||||
|
||||
typedef struct SFilterOperatorInfo {
|
||||
|
@ -488,8 +496,9 @@ typedef struct SFillOperatorInfo {
|
|||
SFillInfo *pFillInfo;
|
||||
SSDataBlock *pRes;
|
||||
int64_t totalInputRows;
|
||||
|
||||
void **p;
|
||||
SSDataBlock *existNewGroupBlock;
|
||||
bool multigroupResult;
|
||||
} SFillOperatorInfo;
|
||||
|
||||
typedef struct SGroupbyOperatorInfo {
|
||||
|
@ -551,9 +560,9 @@ typedef struct SMultiwayMergeInfo {
|
|||
bool hasDataBlockForNewGroup;
|
||||
SSDataBlock *pExistBlock;
|
||||
|
||||
bool hasPrev;
|
||||
bool groupMix;
|
||||
SArray *udfInfo;
|
||||
bool hasPrev;
|
||||
bool multiGroupResults;
|
||||
} SMultiwayMergeInfo;
|
||||
|
||||
// todo support the disk-based sort
|
||||
|
@ -575,7 +584,7 @@ SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI
|
|||
SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createAllTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, bool multigroupResult);
|
||||
SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
|
@ -584,10 +593,10 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf
|
|||
SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
|
||||
SOperatorInfo* createMultiwaySortOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput,
|
||||
int32_t numOfRows, void* merger, bool groupMix);
|
||||
SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo);
|
||||
int32_t numOfRows, void* merger);
|
||||
SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo, bool groupResultMixedUp);
|
||||
SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger);
|
||||
SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger, bool multigroupResult);
|
||||
SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr,
|
||||
int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter);
|
||||
|
||||
|
@ -604,6 +613,7 @@ bool doFilterDataBlock(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilter
|
|||
void doCompactSDataBlock(SSDataBlock* pBlock, int32_t numOfRows, int8_t* p);
|
||||
|
||||
SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows);
|
||||
|
||||
void* destroyOutputBuf(SSDataBlock* pBlock);
|
||||
void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols);
|
||||
|
||||
|
@ -612,6 +622,7 @@ int32_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx* pCtx, int3
|
|||
void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResultRowInfo* pResultRowInfo, int32_t* rowCellInfoOffset);
|
||||
void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOfInputRows);
|
||||
void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity);
|
||||
void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput);
|
||||
|
||||
void freeParam(SQueryParam *param);
|
||||
int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param);
|
||||
|
|
|
@ -220,6 +220,8 @@ tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrder
|
|||
|
||||
void tOrderDescDestroy(tOrderDescriptor *pDesc);
|
||||
|
||||
void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn);
|
||||
|
||||
void tColModelAppend(SColumnModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows,
|
||||
int32_t numOfRowsToWrite, int32_t srcCapacity);
|
||||
|
||||
|
|
|
@ -80,6 +80,7 @@ typedef struct tVariantListItem {
|
|||
} tVariantListItem;
|
||||
|
||||
typedef struct SIntervalVal {
|
||||
int32_t token;
|
||||
SStrToken interval;
|
||||
SStrToken offset;
|
||||
} SIntervalVal;
|
||||
|
|
|
@ -165,6 +165,7 @@ typedef struct SQueryInfo {
|
|||
bool orderProjectQuery;
|
||||
bool stateWindow;
|
||||
bool globalMerge;
|
||||
bool multigroupResult;
|
||||
} SQueryInfo;
|
||||
|
||||
/**
|
||||
|
|
|
@ -24,10 +24,10 @@ extern "C" {
|
|||
|
||||
extern uint32_t qDebugFlag;
|
||||
|
||||
#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", 255, __VA_ARGS__); }} while(0)
|
||||
#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", 255, __VA_ARGS__); }} while(0)
|
||||
#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", 255, __VA_ARGS__); }} while(0)
|
||||
#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", 255, __VA_ARGS__); }} while(0)
|
||||
#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define qDebug(...) do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define qTrace(...) do { if (qDebugFlag & DEBUG_TRACE) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define qDump(a, l) do { if (qDebugFlag & DEBUG_DUMP) { taosDumpData((unsigned char *)a, l); }} while(0)
|
||||
|
|
|
@ -162,7 +162,10 @@ cmd ::= DESCRIBE ids(X) cpxName(Y). {
|
|||
X.n += Y.n;
|
||||
setDCLSqlElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &X);
|
||||
}
|
||||
|
||||
cmd ::= DESC ids(X) cpxName(Y). {
|
||||
X.n += Y.n;
|
||||
setDCLSqlElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &X);
|
||||
}
|
||||
/////////////////////////////////THE ALTER STATEMENT////////////////////////////////////////
|
||||
cmd ::= ALTER USER ids(X) PASS ids(Y). { setAlterUserSql(pInfo, TSDB_ALTER_USER_PASSWD, &X, &Y, NULL); }
|
||||
cmd ::= ALTER USER ids(X) PRIVILEGE ids(Y). { setAlterUserSql(pInfo, TSDB_ALTER_USER_PRIVILEGES, &X, NULL, &Y);}
|
||||
|
@ -479,7 +482,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
|
|||
//////////////////////// The SELECT statement /////////////////////////////////
|
||||
%type select {SSqlNode*}
|
||||
%destructor select {destroySqlNode($$);}
|
||||
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) sliding_opt(S) session_option(H) windowstate_option(D) fill_opt(F)groupby_opt(P) having_opt(N) orderby_opt(Z) slimit_opt(G) limit_opt(L). {
|
||||
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_option(K) sliding_opt(S) session_option(H) windowstate_option(D) fill_opt(F)groupby_opt(P) having_opt(N) orderby_opt(Z) slimit_opt(G) limit_opt(L). {
|
||||
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &D, &S, F, &L, &G, N);
|
||||
}
|
||||
|
||||
|
@ -493,7 +496,7 @@ union(Y) ::= union(Z) UNION ALL select(X). { Y = appendSelectClause(Z, X); }
|
|||
cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
|
||||
|
||||
// Support for the SQL exprssion without from & where subclauses, e.g.,
|
||||
// select current_database()
|
||||
// select database()
|
||||
// select server_version()
|
||||
// select client_version()
|
||||
// select server_state()
|
||||
|
@ -569,10 +572,14 @@ tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z) ids(F). {
|
|||
%type tmvar {SStrToken}
|
||||
tmvar(A) ::= VARIABLE(X). {A = X;}
|
||||
|
||||
%type interval_opt {SIntervalVal}
|
||||
interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N.interval = E; N.offset.n = 0;}
|
||||
interval_opt(N) ::= INTERVAL LP tmvar(E) COMMA tmvar(X) RP. {N.interval = E; N.offset = X;}
|
||||
interval_opt(N) ::= . {memset(&N, 0, sizeof(N));}
|
||||
%type interval_option {SIntervalVal}
|
||||
interval_option(N) ::= intervalKey(A) LP tmvar(E) RP. {N.interval = E; N.offset.n = 0; N.token = A;}
|
||||
interval_option(N) ::= intervalKey(A) LP tmvar(E) COMMA tmvar(X) RP. {N.interval = E; N.offset = X; N.token = A;}
|
||||
interval_option(N) ::= . {memset(&N, 0, sizeof(N));}
|
||||
|
||||
%type intervalKey {int32_t}
|
||||
intervalKey(A) ::= INTERVAL. {A = TK_INTERVAL;}
|
||||
intervalKey(A) ::= EVERY. {A = TK_EVERY; }
|
||||
|
||||
%type session_option {SSessionWindowVal}
|
||||
session_option(X) ::= . {X.col.n = 0; X.gap.n = 0;}
|
||||
|
@ -581,6 +588,7 @@ session_option(X) ::= SESSION LP ids(V) cpxName(Z) COMMA tmvar(Y) RP. {
|
|||
X.col = V;
|
||||
X.gap = Y;
|
||||
}
|
||||
|
||||
%type windowstate_option {SWindowStateVal}
|
||||
windowstate_option(X) ::= . { X.col.n = 0; X.col.z = NULL;}
|
||||
windowstate_option(X) ::= STATE_WINDOW LP ids(V) RP. { X.col = V; }
|
||||
|
|
|
@ -1214,6 +1214,31 @@ static int32_t minmax_merge_impl(SQLFunctionCtx *pCtx, int32_t bytes, char *outp
|
|||
DUPATE_DATA_WITHOUT_TS(pCtx, *(int64_t *)output, v, notNullElems, isMin);
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_UTINYINT: {
|
||||
uint8_t v = GET_UINT8_VAL(input);
|
||||
DUPATE_DATA_WITHOUT_TS(pCtx, *(uint8_t *)output, v, notNullElems, isMin);
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_USMALLINT: {
|
||||
uint16_t v = GET_UINT16_VAL(input);
|
||||
DUPATE_DATA_WITHOUT_TS(pCtx, *(uint16_t *)output, v, notNullElems, isMin);
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_UINT: {
|
||||
uint32_t v = GET_UINT32_VAL(input);
|
||||
DUPATE_DATA_WITHOUT_TS(pCtx, *(uint32_t *)output, v, notNullElems, isMin);
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_UBIGINT: {
|
||||
uint64_t v = GET_UINT64_VAL(input);
|
||||
DUPATE_DATA_WITHOUT_TS(pCtx, *(uint64_t *)output, v, notNullElems, isMin);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -3670,6 +3695,8 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
|
|||
return;
|
||||
}
|
||||
|
||||
bool ascQuery = (pCtx->order == TSDB_ORDER_ASC);
|
||||
|
||||
if (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
*(TSKEY *)pCtx->pOutput = pCtx->startTs;
|
||||
} else if (type == TSDB_FILL_NULL) {
|
||||
|
@ -3677,7 +3704,7 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
|
|||
} else if (type == TSDB_FILL_SET_VALUE) {
|
||||
tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true);
|
||||
} else {
|
||||
if (pCtx->start.key != INT64_MIN && pCtx->start.key < pCtx->startTs && pCtx->end.key > pCtx->startTs) {
|
||||
if (pCtx->start.key != INT64_MIN && ((ascQuery && pCtx->start.key <= pCtx->startTs && pCtx->end.key >= pCtx->startTs) || ((!ascQuery) && pCtx->start.key >= pCtx->startTs && pCtx->end.key <= pCtx->startTs))) {
|
||||
if (type == TSDB_FILL_PREV) {
|
||||
if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) {
|
||||
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val);
|
||||
|
@ -3716,13 +3743,14 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
|
|||
TSKEY skey = GET_TS_DATA(pCtx, 0);
|
||||
|
||||
if (type == TSDB_FILL_PREV) {
|
||||
if (skey > pCtx->startTs) {
|
||||
if ((ascQuery && skey > pCtx->startTs) || ((!ascQuery) && skey < pCtx->startTs)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (pCtx->size > 1) {
|
||||
TSKEY ekey = GET_TS_DATA(pCtx, 1);
|
||||
if (ekey > skey && ekey <= pCtx->startTs) {
|
||||
if ((ascQuery && ekey > skey && ekey <= pCtx->startTs) ||
|
||||
((!ascQuery) && ekey < skey && ekey >= pCtx->startTs)){
|
||||
skey = ekey;
|
||||
}
|
||||
}
|
||||
|
@ -3731,10 +3759,10 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
|
|||
TSKEY ekey = skey;
|
||||
char* val = NULL;
|
||||
|
||||
if (ekey < pCtx->startTs) {
|
||||
if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) {
|
||||
if (pCtx->size > 1) {
|
||||
ekey = GET_TS_DATA(pCtx, 1);
|
||||
if (ekey < pCtx->startTs) {
|
||||
if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -3755,12 +3783,11 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
|
|||
TSKEY ekey = GET_TS_DATA(pCtx, 1);
|
||||
|
||||
// no data generated yet
|
||||
if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) {
|
||||
if ((ascQuery && !(skey <= pCtx->startTs && ekey >= pCtx->startTs))
|
||||
|| ((!ascQuery) && !(skey >= pCtx->startTs && ekey <= pCtx->startTs))) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs);
|
||||
|
||||
char *start = GET_INPUT_DATA(pCtx, 0);
|
||||
char *end = GET_INPUT_DATA(pCtx, 1);
|
||||
|
||||
|
@ -3788,11 +3815,37 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) {
|
|||
static void interp_function(SQLFunctionCtx *pCtx) {
|
||||
// at this point, the value is existed, return directly
|
||||
if (pCtx->size > 0) {
|
||||
// impose the timestamp check
|
||||
TSKEY key = GET_TS_DATA(pCtx, 0);
|
||||
bool ascQuery = (pCtx->order == TSDB_ORDER_ASC);
|
||||
TSKEY key;
|
||||
char *pData;
|
||||
int32_t typedData = 0;
|
||||
|
||||
if (ascQuery) {
|
||||
key = GET_TS_DATA(pCtx, 0);
|
||||
pData = GET_INPUT_DATA(pCtx, 0);
|
||||
} else {
|
||||
key = pCtx->start.key;
|
||||
if (key == INT64_MIN) {
|
||||
key = GET_TS_DATA(pCtx, 0);
|
||||
pData = GET_INPUT_DATA(pCtx, 0);
|
||||
} else {
|
||||
if (!(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL)) {
|
||||
pData = pCtx->start.ptr;
|
||||
} else {
|
||||
typedData = 1;
|
||||
pData = (char *)&pCtx->start.val;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//if (key == pCtx->startTs && (ascQuery || !(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL))) {
|
||||
if (key == pCtx->startTs) {
|
||||
char *pData = GET_INPUT_DATA(pCtx, 0);
|
||||
assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType);
|
||||
if (typedData) {
|
||||
SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, *(double *)pData);
|
||||
} else {
|
||||
assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType);
|
||||
}
|
||||
|
||||
SET_VAL(pCtx, 1, 1);
|
||||
} else {
|
||||
interp_function_impl(pCtx);
|
||||
|
@ -4061,7 +4114,7 @@ static void mergeTableBlockDist(SResultRowCellInfo* pResInfo, const STableBlockD
|
|||
} else {
|
||||
pDist->maxRows = pSrc->maxRows;
|
||||
pDist->minRows = pSrc->minRows;
|
||||
|
||||
|
||||
int32_t maxSteps = TSDB_MAX_MAX_ROW_FBLOCK/TSDB_BLOCK_DIST_STEP_ROWS;
|
||||
if (TSDB_MAX_MAX_ROW_FBLOCK % TSDB_BLOCK_DIST_STEP_ROWS != 0) {
|
||||
++maxSteps;
|
||||
|
@ -4195,7 +4248,7 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) {
|
|||
taosArrayDestroy(pDist->dataBlockInfos);
|
||||
pDist->dataBlockInfos = NULL;
|
||||
}
|
||||
|
||||
|
||||
// cannot set the numOfIteratedElems again since it is set during previous iteration
|
||||
pResInfo->numOfRes = 1;
|
||||
pResInfo->hasResult = DATA_SET_FLAG;
|
||||
|
|
|
@ -38,15 +38,12 @@
|
|||
#define SET_REVERSE_SCAN_FLAG(runtime) ((runtime)->scanFlag = REVERSE_SCAN)
|
||||
|
||||
#define TSWINDOW_IS_EQUAL(t1, t2) (((t1).skey == (t2).skey) && ((t1).ekey == (t2).ekey))
|
||||
|
||||
#define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC))
|
||||
|
||||
#define SDATA_BLOCK_INITIALIZER (SDataBlockInfo) {{0}, 0}
|
||||
|
||||
#define MULTI_KEY_DELIM "-"
|
||||
|
||||
#define HASH_CAPACITY_LIMIT 10000000
|
||||
|
||||
#define TIME_WINDOW_COPY(_dst, _src) do {\
|
||||
(_dst).skey = (_src).skey;\
|
||||
(_dst).ekey = (_src).ekey;\
|
||||
|
@ -233,6 +230,12 @@ static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput);
|
|||
static void destroyAggOperatorInfo(void* param, int32_t numOfOutput);
|
||||
static void destroyOperatorInfo(SOperatorInfo* pOperator);
|
||||
|
||||
static void doSetOperatorCompleted(SOperatorInfo* pOperator) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
if (pOperator->pRuntimeEnv != NULL) {
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock);
|
||||
|
||||
|
@ -1327,6 +1330,16 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo,
|
|||
|
||||
pCtx[k].end.key = curTs;
|
||||
pCtx[k].end.val = v2;
|
||||
|
||||
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
|
||||
if (prevRowIndex == -1) {
|
||||
pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[index];
|
||||
} else {
|
||||
pCtx[k].start.ptr = (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes;
|
||||
}
|
||||
|
||||
pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes;
|
||||
}
|
||||
}
|
||||
} else if (functionId == TSDB_FUNC_TWA) {
|
||||
SPoint point1 = (SPoint){.key = prevTs, .val = &v1};
|
||||
|
@ -1596,6 +1609,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
|
|||
SResultRow* pResult = NULL;
|
||||
int32_t forwardStep = 0;
|
||||
int32_t ret = 0;
|
||||
STimeWindow preWin = win;
|
||||
|
||||
while (1) {
|
||||
// null data, failed to allocate more memory buffer
|
||||
|
@ -1610,12 +1624,13 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
|
|||
|
||||
// window start(end) key interpolation
|
||||
doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
|
||||
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
|
||||
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
|
||||
preWin = win;
|
||||
|
||||
int32_t prevEndPos = (forwardStep - 1) * step + startPos;
|
||||
startPos = getNextQualifiedWindow(pQueryAttr, &win, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos);
|
||||
if (startPos < 0) {
|
||||
if (win.skey <= pQueryAttr->window.ekey) {
|
||||
if ((ascQuery && win.skey <= pQueryAttr->window.ekey) || ((!ascQuery) && win.ekey >= pQueryAttr->window.ekey)) {
|
||||
int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId,
|
||||
pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset);
|
||||
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
|
||||
|
@ -1626,7 +1641,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe
|
|||
|
||||
// window start(end) key interpolation
|
||||
doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep);
|
||||
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
|
||||
doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput);
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -2250,30 +2265,30 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
|
|||
|
||||
case OP_Fill: {
|
||||
SOperatorInfo* pInfo = pRuntimeEnv->proot;
|
||||
pRuntimeEnv->proot = createFillOperatorInfo(pRuntimeEnv, pInfo, pInfo->pExpr, pInfo->numOfOutput);
|
||||
pRuntimeEnv->proot = createFillOperatorInfo(pRuntimeEnv, pInfo, pInfo->pExpr, pInfo->numOfOutput, pQueryAttr->multigroupResult);
|
||||
break;
|
||||
}
|
||||
|
||||
case OP_MultiwayMergeSort: {
|
||||
bool groupMix = true;
|
||||
if (pQueryAttr->slimit.offset != 0 || pQueryAttr->slimit.limit != -1) {
|
||||
groupMix = false;
|
||||
}
|
||||
|
||||
pRuntimeEnv->proot = createMultiwaySortOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput,
|
||||
4096, merger, groupMix); // TODO hack it
|
||||
pRuntimeEnv->proot = createMultiwaySortOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, 4096, merger);
|
||||
break;
|
||||
}
|
||||
|
||||
case OP_GlobalAggregate: {
|
||||
case OP_GlobalAggregate: { // If fill operator exists, the result rows of different group can not be in the same SSDataBlock.
|
||||
bool multigroupResult = pQueryAttr->multigroupResult;
|
||||
if (pQueryAttr->multigroupResult) {
|
||||
multigroupResult = (pQueryAttr->fillType == TSDB_FILL_NONE);
|
||||
}
|
||||
|
||||
pRuntimeEnv->proot = createGlobalAggregateOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3,
|
||||
pQueryAttr->numOfExpr3, merger, pQueryAttr->pUdfInfo);
|
||||
pQueryAttr->numOfExpr3, merger, pQueryAttr->pUdfInfo, multigroupResult);
|
||||
break;
|
||||
}
|
||||
|
||||
case OP_SLimit: {
|
||||
pRuntimeEnv->proot = createSLimitOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3,
|
||||
pQueryAttr->numOfExpr3, merger);
|
||||
int32_t num = pRuntimeEnv->proot->numOfOutput;
|
||||
SExprInfo* pExpr = pRuntimeEnv->proot->pExpr;
|
||||
pRuntimeEnv->proot = createSLimitOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pExpr, num, merger, pQueryAttr->multigroupResult);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -3570,7 +3585,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i
|
|||
SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo;
|
||||
|
||||
int64_t tid = 0;
|
||||
pRuntimeEnv->keyBuf = realloc(pRuntimeEnv->keyBuf, sizeof(tid) + sizeof(int64_t) + POINTER_BYTES);
|
||||
pRuntimeEnv->keyBuf = realloc(pRuntimeEnv->keyBuf, sizeof(tid) + sizeof(int64_t) + POINTER_BYTES);
|
||||
SResultRow* pRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&tid, sizeof(tid), true, uid);
|
||||
|
||||
for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
|
||||
|
@ -3591,7 +3606,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i
|
|||
// set the timestamp output buffer for top/bottom/diff query
|
||||
int32_t fid = pCtx[i].functionId;
|
||||
if (fid == TSDB_FUNC_TOP || fid == TSDB_FUNC_BOTTOM || fid == TSDB_FUNC_DIFF || fid == TSDB_FUNC_DERIVATIVE) {
|
||||
pCtx[i].ptsOutputBuf = pCtx[0].pOutput;
|
||||
if (i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3619,14 +3634,46 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
|
||||
SColumnInfoData *pColInfo = taosArrayGet(pDataBlock->pDataBlock, i);
|
||||
pBInfo->pCtx[i].pOutput = pColInfo->pData + pColInfo->info.bytes * pDataBlock->info.rows;
|
||||
|
||||
// re-estabilish output buffer pointer.
|
||||
// set the correct pointer after the memory buffer reallocated.
|
||||
int32_t functionId = pBInfo->pCtx[i].functionId;
|
||||
|
||||
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
|
||||
pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput;
|
||||
if (i > 0) pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void copyTsColoum(SSDataBlock* pRes, SQLFunctionCtx* pCtx, int32_t numOfOutput) {
|
||||
bool needCopyTs = false;
|
||||
int32_t tsNum = 0;
|
||||
char *src = NULL;
|
||||
for (int32_t i = 0; i < numOfOutput; i++) {
|
||||
int32_t functionId = pCtx[i].functionId;
|
||||
if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
|
||||
needCopyTs = true;
|
||||
if (i > 0 && pCtx[i-1].functionId == TSDB_FUNC_TS_DUMMY){
|
||||
SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i - 1); // find ts data
|
||||
src = pColRes->pData;
|
||||
}
|
||||
}else if(functionId == TSDB_FUNC_TS_DUMMY) {
|
||||
tsNum++;
|
||||
}
|
||||
}
|
||||
|
||||
if (!needCopyTs) return;
|
||||
if (tsNum < 2) return;
|
||||
if (src == NULL) return;
|
||||
|
||||
for (int32_t i = 0; i < numOfOutput; i++) {
|
||||
int32_t functionId = pCtx[i].functionId;
|
||||
if(functionId == TSDB_FUNC_TS_DUMMY) {
|
||||
SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, i);
|
||||
memcpy(pColRes->pData, src, pColRes->info.bytes * pRes->info.rows);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3644,8 +3691,6 @@ void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
void initCtxOutputBuffer(SQLFunctionCtx* pCtx, int32_t size) {
|
||||
for (int32_t j = 0; j < size; ++j) {
|
||||
SResultRowCellInfo* pResInfo = GET_RES_INFO(&pCtx[j]);
|
||||
|
@ -3826,7 +3871,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe
|
|||
}
|
||||
|
||||
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
|
||||
pCtx[i].ptsOutputBuf = pCtx[0].pOutput;
|
||||
if(i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput;
|
||||
}
|
||||
|
||||
if (!pResInfo->initialized) {
|
||||
|
@ -3887,7 +3932,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF
|
|||
|
||||
int32_t functionId = pCtx[i].functionId;
|
||||
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
|
||||
pCtx[i].ptsOutputBuf = pCtx[0].pOutput;
|
||||
if(i > 0) pCtx[i].ptsOutputBuf = pCtx[i-1].pOutput;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4158,6 +4203,7 @@ static void toSSDataBlock(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv* pRunti
|
|||
|
||||
// refactor : extract method
|
||||
SColumnInfoData* pInfoData = taosArrayGet(pBlock->pDataBlock, 0);
|
||||
|
||||
//add condition (pBlock->info.rows >= 1) just to runtime happy
|
||||
if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP && pBlock->info.rows >= 1) {
|
||||
STimeWindow* w = &pBlock->info.window;
|
||||
|
@ -4272,15 +4318,15 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data
|
|||
}
|
||||
}
|
||||
|
||||
int32_t doFillTimeIntervalGapsInResults(SFillInfo* pFillInfo, SSDataBlock *pOutput, int32_t capacity) {
|
||||
void** p = calloc(pFillInfo->numOfCols, POINTER_BYTES);
|
||||
int32_t doFillTimeIntervalGapsInResults(SFillInfo* pFillInfo, SSDataBlock *pOutput, int32_t capacity, void** p) {
|
||||
for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pOutput->pDataBlock, i);
|
||||
p[i] = pColInfoData->pData;
|
||||
p[i] = pColInfoData->pData + (pColInfoData->info.bytes * pOutput->info.rows);
|
||||
}
|
||||
|
||||
pOutput->info.rows = (int32_t)taosFillResultDataBlock(pFillInfo, p, capacity);
|
||||
tfree(p);
|
||||
int32_t numOfRows = (int32_t)taosFillResultDataBlock(pFillInfo, p, capacity - pOutput->info.rows);
|
||||
pOutput->info.rows += numOfRows;
|
||||
|
||||
return pOutput->info.rows;
|
||||
}
|
||||
|
||||
|
@ -5324,11 +5370,12 @@ static void destroyGlobalAggOperatorInfo(void* param, int32_t numOfOutput) {
|
|||
static void destroySlimitOperatorInfo(void* param, int32_t numOfOutput) {
|
||||
SSLimitOperatorInfo *pInfo = (SSLimitOperatorInfo*) param;
|
||||
taosArrayDestroy(pInfo->orderColumnList);
|
||||
pInfo->pRes = destroyOutputBuf(pInfo->pRes);
|
||||
tfree(pInfo->prevRow);
|
||||
}
|
||||
|
||||
SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream,
|
||||
SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo) {
|
||||
SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo, bool groupResultMixedUp) {
|
||||
SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo));
|
||||
|
||||
pInfo->resultRowFactor =
|
||||
|
@ -5336,15 +5383,14 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv,
|
|||
|
||||
pRuntimeEnv->scanFlag = MERGE_STAGE; // TODO init when creating pCtx
|
||||
|
||||
pInfo->pMerge = param;
|
||||
pInfo->bufCapacity = 4096;
|
||||
pInfo->udfInfo = pUdfInfo;
|
||||
|
||||
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pInfo->bufCapacity * pInfo->resultRowFactor);
|
||||
pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset);
|
||||
|
||||
pInfo->orderColumnList = getOrderCheckColumns(pRuntimeEnv->pQueryAttr);
|
||||
pInfo->groupColumnList = getResultGroupCheckColumns(pRuntimeEnv->pQueryAttr);
|
||||
pInfo->multiGroupResults = groupResultMixedUp;
|
||||
pInfo->pMerge = param;
|
||||
pInfo->bufCapacity = 4096;
|
||||
pInfo->udfInfo = pUdfInfo;
|
||||
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pInfo->bufCapacity * pInfo->resultRowFactor);
|
||||
pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset);
|
||||
pInfo->orderColumnList = getOrderCheckColumns(pRuntimeEnv->pQueryAttr);
|
||||
pInfo->groupColumnList = getResultGroupCheckColumns(pRuntimeEnv->pQueryAttr);
|
||||
|
||||
// TODO refactor
|
||||
int32_t len = 0;
|
||||
|
@ -5397,17 +5443,15 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv,
|
|||
}
|
||||
|
||||
SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SExprInfo *pExpr, int32_t numOfOutput,
|
||||
int32_t numOfRows, void *merger, bool groupMix) {
|
||||
int32_t numOfRows, void *merger) {
|
||||
SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo));
|
||||
|
||||
pInfo->pMerge = merger;
|
||||
pInfo->groupMix = groupMix;
|
||||
pInfo->bufCapacity = numOfRows;
|
||||
|
||||
pInfo->pMerge = merger;
|
||||
pInfo->bufCapacity = numOfRows;
|
||||
pInfo->orderColumnList = getResultGroupCheckColumns(pRuntimeEnv->pQueryAttr);
|
||||
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, numOfRows);
|
||||
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, numOfRows);
|
||||
|
||||
{
|
||||
{ // todo extract method to create prev compare buffer
|
||||
int32_t len = 0;
|
||||
for(int32_t i = 0; i < numOfOutput; ++i) {
|
||||
len += pExpr[i].base.colBytes;
|
||||
|
@ -5415,8 +5459,8 @@ SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SEx
|
|||
|
||||
int32_t numOfCols = (pInfo->orderColumnList != NULL)? (int32_t) taosArrayGetSize(pInfo->orderColumnList):0;
|
||||
pInfo->prevRow = calloc(1, (POINTER_BYTES * numOfCols + len));
|
||||
int32_t offset = POINTER_BYTES * numOfCols;
|
||||
|
||||
int32_t offset = POINTER_BYTES * numOfCols;
|
||||
for(int32_t i = 0; i < numOfCols; ++i) {
|
||||
pInfo->prevRow[i] = (char*)pInfo->prevRow + offset;
|
||||
|
||||
|
@ -5432,7 +5476,8 @@ SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SEx
|
|||
pOperator->status = OP_IN_EXECUTING;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->pRuntimeEnv = pRuntimeEnv;
|
||||
pOperator->numOfOutput = pRuntimeEnv->pQueryAttr->numOfCols;
|
||||
pOperator->numOfOutput = numOfOutput;
|
||||
pOperator->pExpr = pExpr;
|
||||
pOperator->exec = doMultiwayMergeSort;
|
||||
pOperator->cleanup = destroyGlobalAggOperatorInfo;
|
||||
return pOperator;
|
||||
|
@ -5478,8 +5523,7 @@ static SSDataBlock* doSort(void* param, bool* newgroup) {
|
|||
|
||||
// start to flush data into disk and try do multiway merge sort
|
||||
if (pBlock == NULL) {
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
doSetOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -5590,8 +5634,7 @@ static SSDataBlock* doAggregate(void* param, bool* newgroup) {
|
|||
doAggregateImpl(pOperator, pQueryAttr->window.skey, pInfo->pCtx, pBlock);
|
||||
}
|
||||
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
|
||||
doSetOperatorCompleted(pOperator);
|
||||
|
||||
finalizeQueryResult(pOperator, pInfo->pCtx, &pInfo->resultRowInfo, pInfo->rowCellInfoOffset);
|
||||
pInfo->pRes->info.rows = getNumOfResult(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
|
||||
|
@ -5667,7 +5710,7 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) {
|
|||
|
||||
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->pRes);
|
||||
if (pInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
doSetOperatorCompleted(pOperator);
|
||||
}
|
||||
|
||||
return pInfo->pRes;
|
||||
|
@ -5708,6 +5751,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) {
|
|||
|
||||
pRes->info.rows = getNumOfResult(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput);
|
||||
if (pRes->info.rows >= pRuntimeEnv->resultInfo.threshold) {
|
||||
copyTsColoum(pRes, pInfo->pCtx, pOperator->numOfOutput);
|
||||
clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput);
|
||||
return pRes;
|
||||
}
|
||||
|
@ -5733,8 +5777,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) {
|
|||
if (*newgroup) {
|
||||
if (pRes->info.rows > 0) {
|
||||
pProjectInfo->existDataBlock = pBlock;
|
||||
clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput);
|
||||
return pInfo->pRes;
|
||||
break;
|
||||
} else { // init output buffer for a new group data
|
||||
for (int32_t j = 0; j < pOperator->numOfOutput; ++j) {
|
||||
aAggs[pInfo->pCtx[j].functionId].xFinalize(&pInfo->pCtx[j]);
|
||||
|
@ -5764,7 +5807,7 @@ static SSDataBlock* doProjectOperation(void* param, bool* newgroup) {
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
copyTsColoum(pRes, pInfo->pCtx, pOperator->numOfOutput);
|
||||
clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput);
|
||||
return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
|
||||
}
|
||||
|
@ -5785,8 +5828,7 @@ static SSDataBlock* doLimit(void* param, bool* newgroup) {
|
|||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
|
||||
|
||||
if (pBlock == NULL) {
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
doSetOperatorCompleted(pOperator);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -5814,8 +5856,7 @@ static SSDataBlock* doLimit(void* param, bool* newgroup) {
|
|||
pBlock->info.rows = (int32_t)(pInfo->limit - pInfo->total);
|
||||
pInfo->total = pInfo->limit;
|
||||
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
doSetOperatorCompleted(pOperator);
|
||||
} else {
|
||||
pInfo->total += pBlock->info.rows;
|
||||
}
|
||||
|
@ -5850,8 +5891,7 @@ static SSDataBlock* doFilter(void* param, bool* newgroup) {
|
|||
}
|
||||
}
|
||||
|
||||
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
doSetOperatorCompleted(pOperator);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -5866,9 +5906,8 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
|
|||
SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv;
|
||||
if (pOperator->status == OP_RES_TO_RETURN) {
|
||||
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes);
|
||||
|
||||
if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
doSetOperatorCompleted(pOperator);
|
||||
}
|
||||
|
||||
return pIntervalInfo->pRes;
|
||||
|
@ -5909,7 +5948,7 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
|
|||
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes);
|
||||
|
||||
if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
doSetOperatorCompleted(pOperator);
|
||||
}
|
||||
|
||||
return pIntervalInfo->pRes->info.rows == 0? NULL:pIntervalInfo->pRes;
|
||||
|
@ -5928,7 +5967,7 @@ static SSDataBlock* doAllIntervalAgg(void* param, bool* newgroup) {
|
|||
toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes);
|
||||
|
||||
if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
doSetOperatorCompleted(pOperator);
|
||||
}
|
||||
|
||||
return pIntervalInfo->pRes;
|
||||
|
@ -5986,9 +6025,10 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) {
|
|||
|
||||
if (pOperator->status == OP_RES_TO_RETURN) {
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
copyToSDataBlock(pRuntimeEnv, 3000, pIntervalInfo->pRes, pIntervalInfo->rowCellInfoOffset);
|
||||
if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainData(&pRuntimeEnv->groupResInfo)) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
doSetOperatorCompleted(pOperator);
|
||||
}
|
||||
|
||||
SQInfo* pQInfo = pRuntimeEnv->qinfo;
|
||||
|
@ -6348,19 +6388,13 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) {
|
|||
return pInfo->binfo.pRes;
|
||||
}
|
||||
|
||||
static SSDataBlock* doFill(void* param, bool* newgroup) {
|
||||
SOperatorInfo* pOperator = (SOperatorInfo*) param;
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SFillOperatorInfo *pInfo = pOperator->info;
|
||||
SQueryRuntimeEnv *pRuntimeEnv = pOperator->pRuntimeEnv;
|
||||
|
||||
static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo *pInfo, SQueryRuntimeEnv *pRuntimeEnv, bool *newgroup) {
|
||||
if (taosFillHasMoreResults(pInfo->pFillInfo)) {
|
||||
*newgroup = false;
|
||||
doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, (int32_t)pRuntimeEnv->resultInfo.capacity);
|
||||
return pInfo->pRes;
|
||||
doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, (int32_t)pRuntimeEnv->resultInfo.capacity, pInfo->p);
|
||||
if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || (!pInfo->multigroupResult)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// handle the cached new group data block
|
||||
|
@ -6372,11 +6406,47 @@ static SSDataBlock* doFill(void* param, bool* newgroup) {
|
|||
taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey);
|
||||
taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock);
|
||||
|
||||
doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity);
|
||||
doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p);
|
||||
pInfo->existNewGroupBlock = NULL;
|
||||
*newgroup = true;
|
||||
return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static SSDataBlock* doFill(void* param, bool* newgroup) {
|
||||
SOperatorInfo* pOperator = (SOperatorInfo*) param;
|
||||
|
||||
SFillOperatorInfo *pInfo = pOperator->info;
|
||||
pInfo->pRes->info.rows = 0;
|
||||
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SQueryRuntimeEnv *pRuntimeEnv = pOperator->pRuntimeEnv;
|
||||
doHandleRemainBlockFromNewGroup(pInfo, pRuntimeEnv, newgroup);
|
||||
if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || (!pInfo->multigroupResult && pInfo->pRes->info.rows > 0)) {
|
||||
return pInfo->pRes;
|
||||
}
|
||||
// if (taosFillHasMoreResults(pInfo->pFillInfo)) {
|
||||
// *newgroup = false;
|
||||
// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, (int32_t)pRuntimeEnv->resultInfo.capacity);
|
||||
// return pInfo->pRes;
|
||||
// }
|
||||
//
|
||||
// // handle the cached new group data block
|
||||
// if (pInfo->existNewGroupBlock) {
|
||||
// pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows;
|
||||
// int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey;
|
||||
// taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start);
|
||||
//
|
||||
// taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey);
|
||||
// taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock);
|
||||
//
|
||||
// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity);
|
||||
// pInfo->existNewGroupBlock = NULL;
|
||||
// *newgroup = true;
|
||||
// return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
|
||||
// }
|
||||
|
||||
while(1) {
|
||||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
|
||||
|
@ -6391,8 +6461,8 @@ static SSDataBlock* doFill(void* param, bool* newgroup) {
|
|||
pInfo->existNewGroupBlock = pBlock;
|
||||
*newgroup = false;
|
||||
|
||||
// fill the previous group data block
|
||||
// before handle a new data block, close the fill operation for previous group data block
|
||||
// Fill the previous group data block, before handle the data block of new group.
|
||||
// Close the fill operation for previous group data block
|
||||
taosFillSetStartInfo(pInfo->pFillInfo, 0, pRuntimeEnv->pQueryAttr->window.ekey);
|
||||
} else {
|
||||
if (pBlock == NULL) {
|
||||
|
@ -6404,28 +6474,61 @@ static SSDataBlock* doFill(void* param, bool* newgroup) {
|
|||
taosFillSetStartInfo(pInfo->pFillInfo, 0, pRuntimeEnv->pQueryAttr->window.ekey);
|
||||
} else {
|
||||
pInfo->totalInputRows += pBlock->info.rows;
|
||||
|
||||
int64_t ekey = /*Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED) ? pRuntimeEnv->pQueryAttr->window.ekey
|
||||
: */pBlock->info.window.ekey;
|
||||
|
||||
taosFillSetStartInfo(pInfo->pFillInfo, pBlock->info.rows, ekey);
|
||||
taosFillSetStartInfo(pInfo->pFillInfo, pBlock->info.rows, pBlock->info.window.ekey);
|
||||
taosFillSetInputDataBlock(pInfo->pFillInfo, pBlock);
|
||||
}
|
||||
}
|
||||
|
||||
doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity);
|
||||
if (pInfo->pRes->info.rows > 0) { // current group has no more result to return
|
||||
return pInfo->pRes;
|
||||
doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p);
|
||||
|
||||
// current group has no more result to return
|
||||
if (pInfo->pRes->info.rows > 0) {
|
||||
// 1. The result in current group not reach the threshold of output result, continue
|
||||
// 2. If multiple group results existing in one SSDataBlock is not allowed, return immediately
|
||||
if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || pBlock == NULL || (!pInfo->multigroupResult)) {
|
||||
return pInfo->pRes;
|
||||
}
|
||||
|
||||
doHandleRemainBlockFromNewGroup(pInfo, pRuntimeEnv, newgroup);
|
||||
if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold || pBlock == NULL) {
|
||||
return pInfo->pRes;
|
||||
}
|
||||
|
||||
// if (taosFillHasMoreResults(pInfo->pFillInfo)) {
|
||||
// *newgroup = false;
|
||||
// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, (int32_t)pRuntimeEnv->resultInfo.capacity);
|
||||
// return pInfo->pRes;
|
||||
// }
|
||||
//
|
||||
// // handle the cached new group data block
|
||||
// if (pInfo->existNewGroupBlock) {
|
||||
// pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows;
|
||||
// int64_t ekey = Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED)?pRuntimeEnv->pQueryAttr->window.ekey:pInfo->existNewGroupBlock->info.window.ekey;
|
||||
// taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start);
|
||||
//
|
||||
// taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey);
|
||||
// taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock);
|
||||
//
|
||||
// doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity);
|
||||
// pInfo->existNewGroupBlock = NULL;
|
||||
// *newgroup = true;
|
||||
//
|
||||
// if (pInfo->pRes->info.rows > pRuntimeEnv->resultInfo.threshold) {
|
||||
// return pInfo->pRes;
|
||||
// }
|
||||
//
|
||||
//// return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
|
||||
// }
|
||||
|
||||
} else if (pInfo->existNewGroupBlock) { // try next group
|
||||
pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows;
|
||||
int64_t ekey = /*Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED) ? pRuntimeEnv->pQueryAttr->window.ekey
|
||||
:*/ pInfo->existNewGroupBlock->info.window.ekey;
|
||||
int64_t ekey = pInfo->existNewGroupBlock->info.window.ekey;
|
||||
taosResetFillInfo(pInfo->pFillInfo, pInfo->pFillInfo->start);
|
||||
|
||||
taosFillSetStartInfo(pInfo->pFillInfo, pInfo->existNewGroupBlock->info.rows, ekey);
|
||||
taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->existNewGroupBlock);
|
||||
|
||||
doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity);
|
||||
doFillTimeIntervalGapsInResults(pInfo->pFillInfo, pInfo->pRes, pRuntimeEnv->resultInfo.capacity, pInfo->p);
|
||||
pInfo->existNewGroupBlock = NULL;
|
||||
*newgroup = true;
|
||||
|
||||
|
@ -6433,7 +6536,6 @@ static SSDataBlock* doFill(void* param, bool* newgroup) {
|
|||
} else {
|
||||
return NULL;
|
||||
}
|
||||
// return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6534,6 +6636,7 @@ static void destroySFillOperatorInfo(void* param, int32_t numOfOutput) {
|
|||
SFillOperatorInfo* pInfo = (SFillOperatorInfo*) param;
|
||||
pInfo->pFillInfo = taosDestroyFillInfo(pInfo->pFillInfo);
|
||||
pInfo->pRes = destroyOutputBuf(pInfo->pRes);
|
||||
tfree(pInfo->p);
|
||||
}
|
||||
|
||||
static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput) {
|
||||
|
@ -6877,10 +6980,10 @@ SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato
|
|||
return pOperator;
|
||||
}
|
||||
|
||||
SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr,
|
||||
int32_t numOfOutput) {
|
||||
SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, bool multigroupResult) {
|
||||
SFillOperatorInfo* pInfo = calloc(1, sizeof(SFillOperatorInfo));
|
||||
pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
|
||||
pInfo->multigroupResult = multigroupResult;
|
||||
|
||||
{
|
||||
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
|
||||
|
@ -6895,6 +6998,8 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn
|
|||
taosCreateFillInfo(pQueryAttr->order.order, w.skey, 0, (int32_t)pRuntimeEnv->resultInfo.capacity, numOfOutput,
|
||||
pQueryAttr->interval.sliding, pQueryAttr->interval.slidingUnit,
|
||||
(int8_t)pQueryAttr->precision, pQueryAttr->fillType, pColInfo, pRuntimeEnv->qinfo);
|
||||
|
||||
pInfo->p = calloc(pInfo->pFillInfo->numOfCols, POINTER_BYTES);
|
||||
}
|
||||
|
||||
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
|
||||
|
@ -6914,7 +7019,7 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn
|
|||
return pOperator;
|
||||
}
|
||||
|
||||
SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* pMerger) {
|
||||
SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* pMerger, bool multigroupResult) {
|
||||
SSLimitOperatorInfo* pInfo = calloc(1, sizeof(SSLimitOperatorInfo));
|
||||
|
||||
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
|
||||
|
@ -6922,9 +7027,11 @@ SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator
|
|||
pInfo->orderColumnList = getResultGroupCheckColumns(pQueryAttr);
|
||||
pInfo->slimit = pQueryAttr->slimit;
|
||||
pInfo->limit = pQueryAttr->limit;
|
||||
|
||||
pInfo->capacity = pRuntimeEnv->resultInfo.capacity;
|
||||
pInfo->threshold = (int64_t)(pInfo->capacity * 0.8);
|
||||
pInfo->currentOffset = pQueryAttr->limit.offset;
|
||||
pInfo->currentGroupOffset = pQueryAttr->slimit.offset;
|
||||
pInfo->currentOffset = pQueryAttr->limit.offset;
|
||||
pInfo->multigroupResult= multigroupResult;
|
||||
|
||||
// TODO refactor
|
||||
int32_t len = 0;
|
||||
|
@ -6932,10 +7039,10 @@ SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator
|
|||
len += pExpr[i].base.resBytes;
|
||||
}
|
||||
|
||||
int32_t numOfCols = pInfo->orderColumnList != NULL? (int32_t) taosArrayGetSize(pInfo->orderColumnList):0;
|
||||
int32_t numOfCols = (pInfo->orderColumnList != NULL)? (int32_t) taosArrayGetSize(pInfo->orderColumnList):0;
|
||||
pInfo->prevRow = calloc(1, (POINTER_BYTES * numOfCols + len));
|
||||
int32_t offset = POINTER_BYTES * numOfCols;
|
||||
|
||||
int32_t offset = POINTER_BYTES * numOfCols;
|
||||
for(int32_t i = 0; i < numOfCols; ++i) {
|
||||
pInfo->prevRow[i] = (char*)pInfo->prevRow + offset;
|
||||
|
||||
|
@ -6943,6 +7050,8 @@ SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator
|
|||
offset += pExpr[index->colIndex].base.resBytes;
|
||||
}
|
||||
|
||||
pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
|
||||
|
||||
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
|
||||
|
||||
pOperator->name = "SLimitOperator";
|
||||
|
@ -7127,14 +7236,14 @@ static bool initMultiDistinctInfo(SDistinctOperatorInfo *pInfo, SOperatorInfo* p
|
|||
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, j);
|
||||
if (pColDataInfo->info.colId == pOperator->pExpr[i].base.resColId) {
|
||||
SDistinctDataInfo item = {.index = j, .type = pColDataInfo->info.type, .bytes = pColDataInfo->info.bytes};
|
||||
taosArrayInsert(pInfo->pDistinctDataInfo, i, &item);
|
||||
taosArrayInsert(pInfo->pDistinctDataInfo, i, &item);
|
||||
}
|
||||
}
|
||||
}
|
||||
pInfo->totalBytes += (int32_t)strlen(MULTI_KEY_DELIM) * (pOperator->numOfOutput);
|
||||
pInfo->buf = calloc(1, pInfo->totalBytes);
|
||||
return taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput ? true : false;
|
||||
}
|
||||
}
|
||||
|
||||
static void buildMultiDistinctKey(SDistinctOperatorInfo *pInfo, SSDataBlock *pBlock, int32_t rowId) {
|
||||
char *p = pInfo->buf;
|
||||
|
@ -7159,11 +7268,13 @@ static void buildMultiDistinctKey(SDistinctOperatorInfo *pInfo, SSDataBlock *pBl
|
|||
p += strlen(MULTI_KEY_DELIM);
|
||||
}
|
||||
}
|
||||
|
||||
static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
|
||||
SOperatorInfo* pOperator = (SOperatorInfo*) param;
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SDistinctOperatorInfo* pInfo = pOperator->info;
|
||||
SSDataBlock* pRes = pInfo->pRes;
|
||||
|
||||
|
@ -7176,13 +7287,11 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
|
|||
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
|
||||
|
||||
if (pBlock == NULL) {
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
doSetOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
if (!initMultiDistinctInfo(pInfo, pOperator, pBlock)) {
|
||||
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
doSetOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
// ensure result output buf
|
||||
|
@ -7218,11 +7327,11 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
|
|||
pRes->info.rows += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (pRes->info.rows >= pInfo->threshold) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL;
|
||||
}
|
||||
|
||||
|
@ -7439,12 +7548,15 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) {
|
|||
pQueryMsg->numOfCols = htons(pQueryMsg->numOfCols);
|
||||
pQueryMsg->numOfOutput = htons(pQueryMsg->numOfOutput);
|
||||
pQueryMsg->numOfGroupCols = htons(pQueryMsg->numOfGroupCols);
|
||||
|
||||
pQueryMsg->tagCondLen = htons(pQueryMsg->tagCondLen);
|
||||
pQueryMsg->colCondLen = htons(pQueryMsg->colCondLen);
|
||||
|
||||
pQueryMsg->tsBuf.tsOffset = htonl(pQueryMsg->tsBuf.tsOffset);
|
||||
pQueryMsg->tsBuf.tsLen = htonl(pQueryMsg->tsBuf.tsLen);
|
||||
pQueryMsg->tsBuf.tsNumOfBlocks = htonl(pQueryMsg->tsBuf.tsNumOfBlocks);
|
||||
pQueryMsg->tsBuf.tsOrder = htonl(pQueryMsg->tsBuf.tsOrder);
|
||||
|
||||
pQueryMsg->numOfTags = htonl(pQueryMsg->numOfTags);
|
||||
pQueryMsg->tbnameCondLen = htonl(pQueryMsg->tbnameCondLen);
|
||||
pQueryMsg->secondStageOutput = htonl(pQueryMsg->secondStageOutput);
|
||||
|
|
|
@ -768,60 +768,6 @@ void tColDataQSort(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
|
|||
free(buf);
|
||||
}
|
||||
|
||||
void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) {
|
||||
assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols);
|
||||
|
||||
int32_t bytes = pSchema[index].bytes;
|
||||
int32_t size = bytes + sizeof(int32_t);
|
||||
|
||||
char* buf = calloc(1, size * numOfRows);
|
||||
|
||||
for(int32_t i = 0; i < numOfRows; ++i) {
|
||||
char* dest = buf + size * i;
|
||||
memcpy(dest, ((char*)pCols[index]) + bytes * i, bytes);
|
||||
*(int32_t*)(dest+bytes) = i;
|
||||
}
|
||||
|
||||
qsort(buf, numOfRows, size, compareFn);
|
||||
|
||||
int32_t prevLength = 0;
|
||||
char* p = NULL;
|
||||
|
||||
for(int32_t i = 0; i < numOfCols; ++i) {
|
||||
int32_t bytes1 = pSchema[i].bytes;
|
||||
|
||||
if (i == index) {
|
||||
for(int32_t j = 0; j < numOfRows; ++j){
|
||||
char* src = buf + (j * size);
|
||||
char* dest = (char*) pCols[i] + (j * bytes1);
|
||||
memcpy(dest, src, bytes1);
|
||||
}
|
||||
} else {
|
||||
// make sure memory buffer is enough
|
||||
if (prevLength < bytes1) {
|
||||
char *tmp = realloc(p, bytes1 * numOfRows);
|
||||
assert(tmp);
|
||||
|
||||
p = tmp;
|
||||
prevLength = bytes1;
|
||||
}
|
||||
|
||||
memcpy(p, pCols[i], bytes1 * numOfRows);
|
||||
|
||||
for(int32_t j = 0; j < numOfRows; ++j){
|
||||
char* dest = (char*) pCols[i] + bytes1 * j;
|
||||
|
||||
int32_t newPos = *(int32_t*)(buf + (j * size) + bytes);
|
||||
char* src = p + (newPos * bytes1);
|
||||
memcpy(dest, src, bytes1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tfree(buf);
|
||||
tfree(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* deep copy of sschema
|
||||
*/
|
||||
|
@ -1157,3 +1103,57 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) {
|
|||
destroyColumnModel(pDesc->pColumnModel);
|
||||
tfree(pDesc);
|
||||
}
|
||||
|
||||
void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) {
|
||||
assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols);
|
||||
|
||||
int32_t bytes = pSchema[index].bytes;
|
||||
int32_t size = bytes + sizeof(int32_t);
|
||||
|
||||
char* buf = calloc(1, size * numOfRows);
|
||||
|
||||
for(int32_t i = 0; i < numOfRows; ++i) {
|
||||
char* dest = buf + size * i;
|
||||
memcpy(dest, ((char*) pCols[index]) + bytes * i, bytes);
|
||||
*(int32_t*)(dest+bytes) = i;
|
||||
}
|
||||
|
||||
qsort(buf, numOfRows, size, compareFn);
|
||||
|
||||
int32_t prevLength = 0;
|
||||
char* p = NULL;
|
||||
|
||||
for(int32_t i = 0; i < numOfCols; ++i) {
|
||||
int32_t bytes1 = pSchema[i].bytes;
|
||||
|
||||
if (i == index) {
|
||||
for(int32_t j = 0; j < numOfRows; ++j){
|
||||
char* src = buf + (j * size);
|
||||
char* dest = ((char*)pCols[i]) + (j * bytes1);
|
||||
memcpy(dest, src, bytes1);
|
||||
}
|
||||
} else {
|
||||
// make sure memory buffer is enough
|
||||
if (prevLength < bytes1) {
|
||||
char *tmp = realloc(p, bytes1 * numOfRows);
|
||||
assert(tmp);
|
||||
|
||||
p = tmp;
|
||||
prevLength = bytes1;
|
||||
}
|
||||
|
||||
memcpy(p, pCols[i], bytes1 * numOfRows);
|
||||
|
||||
for(int32_t j = 0; j < numOfRows; ++j){
|
||||
char* dest = ((char*)pCols[i]) + bytes1 * j;
|
||||
|
||||
int32_t newPos = *(int32_t*)(buf + (j * size) + bytes);
|
||||
char* src = p + (newPos * bytes1);
|
||||
memcpy(dest, src, bytes1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tfree(buf);
|
||||
tfree(p);
|
||||
}
|
||||
|
|
|
@ -430,7 +430,7 @@ void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const SSDataBlock* pInput)
|
|||
SColumnInfoData* pColData = taosArrayGet(pInput->pDataBlock, i);
|
||||
pFillInfo->pData[i] = pColData->pData;
|
||||
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)/* || IS_VAR_DATA_TYPE(pCol->col.type)*/) { // copy the tag value to tag value buffer
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) { // copy the tag value to tag value buffer
|
||||
SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex];
|
||||
assert (pTag->col.colId == pCol->col.colId);
|
||||
memcpy(pTag->tagVal, pColData->pData, pCol->col.bytes); // TODO not memcpy??
|
||||
|
|
|
@ -698,7 +698,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
|
|||
}
|
||||
|
||||
// fill operator
|
||||
if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) {
|
||||
if (pQueryAttr->fillType != TSDB_FILL_NONE && pQueryAttr->interval.interval > 0) {
|
||||
op = OP_Fill;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
|
|
|
@ -766,7 +766,7 @@ SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelat
|
|||
pSqlNode->pSortOrder = pSortOrder;
|
||||
pSqlNode->pWhere = pWhere;
|
||||
pSqlNode->fillType = pFill;
|
||||
pSqlNode->pHaving = pHaving;
|
||||
pSqlNode->pHaving = pHaving;
|
||||
|
||||
if (pLimit != NULL) {
|
||||
pSqlNode->limit = *pLimit;
|
||||
|
|
|
@ -223,8 +223,11 @@ static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) {
|
|||
static void shrinkBuffer(STSList* ptsData) {
|
||||
// shrink tmp buffer size if it consumes too many memory compared to the pre-defined size
|
||||
if (ptsData->allocSize >= ptsData->threshold * 2) {
|
||||
ptsData->rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE);
|
||||
ptsData->allocSize = MEM_BUF_SIZE;
|
||||
char* rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE);
|
||||
if(rawBuf) {
|
||||
ptsData->rawBuf = rawBuf;
|
||||
ptsData->allocSize = MEM_BUF_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
4164
src/query/src/sql.c
4164
src/query/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -18,6 +18,9 @@
|
|||
|
||||
#define TSDB_FS_VERSION 0
|
||||
|
||||
// ================== TSDB global config
|
||||
extern bool tsdbForceKeepFile;
|
||||
|
||||
// ================== CURRENT file header info
|
||||
typedef struct {
|
||||
uint32_t version; // Current file system version (relating to code)
|
||||
|
@ -110,4 +113,4 @@ static FORCE_INLINE int tsdbUnLockFS(STsdbFS* pFs) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
#endif /* _TD_TSDB_FS_H_ */
|
||||
#endif /* _TD_TSDB_FS_H_ */
|
||||
|
|
|
@ -37,6 +37,7 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired);
|
|||
static int tsdbProcessExpiredFS(STsdbRepo *pRepo);
|
||||
static int tsdbCreateMeta(STsdbRepo *pRepo);
|
||||
|
||||
// For backward compatibility
|
||||
// ================== CURRENT file header info
|
||||
static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) {
|
||||
int tlen = 0;
|
||||
|
|
|
@ -1572,7 +1572,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
|
|||
int32_t numOfColsOfRow1 = 0;
|
||||
|
||||
if (pSchema1 == NULL) {
|
||||
pSchema1 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row1));
|
||||
pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1));
|
||||
}
|
||||
if(isRow1DataRow) {
|
||||
numOfColsOfRow1 = schemaNCols(pSchema1);
|
||||
|
@ -1584,7 +1584,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
|
|||
if(row2) {
|
||||
isRow2DataRow = isDataRow(row2);
|
||||
if (pSchema2 == NULL) {
|
||||
pSchema2 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row2));
|
||||
pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2));
|
||||
}
|
||||
if(isRow2DataRow) {
|
||||
numOfColsOfRow2 = schemaNCols(pSchema2);
|
||||
|
@ -2460,7 +2460,7 @@ int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist
|
|||
|
||||
// current file are not overlapped with query time window, ignore remain files
|
||||
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && win.skey > pQueryHandle->window.ekey) ||
|
||||
(!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) {
|
||||
(!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) {
|
||||
tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb));
|
||||
tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, 0x%"PRIx64, pQueryHandle,
|
||||
pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qId);
|
||||
|
@ -3474,18 +3474,19 @@ void filterPrepare(void* expr, void* param) {
|
|||
|
||||
if (pInfo->optr == TSDB_RELATION_IN) {
|
||||
int dummy = -1;
|
||||
SHashObj *pObj = NULL;
|
||||
SHashObj *pObj = NULL;
|
||||
if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
pObj = taosHashInit(256, taosGetDefaultHashFunction(pInfo->sch.type), true, false);
|
||||
SArray *arr = (SArray *)(pCond->arr);
|
||||
for (size_t i = 0; i < taosArrayGetSize(arr); i++) {
|
||||
char* p = taosArrayGetP(arr, i);
|
||||
taosHashPut(pObj, varDataVal(p),varDataLen(p), &dummy, sizeof(dummy));
|
||||
strntolower_s(varDataVal(p), varDataVal(p), varDataLen(p));
|
||||
taosHashPut(pObj, varDataVal(p), varDataLen(p), &dummy, sizeof(dummy));
|
||||
}
|
||||
} else {
|
||||
buildFilterSetFromBinary((void **)&pObj, pCond->pz, pCond->nLen);
|
||||
}
|
||||
pInfo->q = (char *)pObj;
|
||||
pInfo->q = (char *)pObj;
|
||||
} else if (pCond != NULL) {
|
||||
uint32_t size = pCond->nLen * TSDB_NCHAR_SIZE;
|
||||
if (size < (uint32_t)pSchema->bytes) {
|
||||
|
|
|
@ -25,8 +25,8 @@ extern "C" {
|
|||
#define TSDB_PATTERN_MATCH 0
|
||||
#define TSDB_PATTERN_NOMATCH 1
|
||||
#define TSDB_PATTERN_NOWILDCARDMATCH 2
|
||||
#define TSDB_PATTERN_STRING_MAX_LEN 100
|
||||
#define TSDB_REGEX_STRING_MAX_LEN 128
|
||||
#define TSDB_PATTERN_STRING_DEFAULT_LEN 100
|
||||
#define TSDB_REGEX_STRING_DEFAULT_LEN 128
|
||||
|
||||
#define FLT_COMPAR_TOL_FACTOR 4
|
||||
#define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON))
|
||||
|
|
|
@ -32,6 +32,7 @@ char * strnchr(char *haystack, char needle, int32_t len, bool skipquote);
|
|||
char ** strsplit(char *src, const char *delim, int32_t *num);
|
||||
char * strtolower(char *dst, const char *src);
|
||||
char * strntolower(char *dst, const char *src, int32_t n);
|
||||
char * strntolower_s(char *dst, const char *src, int32_t n);
|
||||
int64_t strnatoi(char *num, int32_t len);
|
||||
char * strbetween(char *string, char *begin, char *end);
|
||||
char * paGetToken(char *src, char **token, int32_t *tokenLen);
|
||||
|
|
|
@ -537,7 +537,8 @@ void taosCacheCleanup(SCacheObj *pCacheObj) {
|
|||
pCacheObj->deleting = 1;
|
||||
|
||||
// wait for the refresh thread quit before destroying the cache object.
|
||||
while(atomic_load_8(&pCacheObj->deleting) != 0) {
|
||||
// But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for 2 seconds.
|
||||
for (int i = 0; i < 40&&atomic_load_8(&pCacheObj->deleting) != 0; i++) {
|
||||
taosMsleep(50);
|
||||
}
|
||||
|
||||
|
|
|
@ -145,8 +145,8 @@ int32_t compareFloatVal(const void *pLeft, const void *pRight) {
|
|||
}
|
||||
if (FLT_EQUAL(p1, p2)) {
|
||||
return 0;
|
||||
}
|
||||
return FLT_GREATER(p1, p2) ? 1: -1;
|
||||
}
|
||||
return FLT_GREATER(p1, p2) ? 1: -1;
|
||||
}
|
||||
|
||||
int32_t compareFloatValDesc(const void* pLeft, const void* pRight) {
|
||||
|
@ -170,8 +170,8 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight) {
|
|||
}
|
||||
if (FLT_EQUAL(p1, p2)) {
|
||||
return 0;
|
||||
}
|
||||
return FLT_GREATER(p1, p2) ? 1: -1;
|
||||
}
|
||||
return FLT_GREATER(p1, p2) ? 1: -1;
|
||||
}
|
||||
|
||||
int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) {
|
||||
|
@ -181,7 +181,7 @@ int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) {
|
|||
int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) {
|
||||
int32_t len1 = varDataLen(pLeft);
|
||||
int32_t len2 = varDataLen(pRight);
|
||||
|
||||
|
||||
if (len1 != len2) {
|
||||
return len1 > len2? 1:-1;
|
||||
} else {
|
||||
|
@ -230,33 +230,33 @@ int32_t compareLenPrefixedWStrDesc(const void* pLeft, const void* pRight) {
|
|||
*/
|
||||
int patternMatch(const char *patterStr, const char *str, size_t size, const SPatternCompareInfo *pInfo) {
|
||||
char c, c1;
|
||||
|
||||
|
||||
int32_t i = 0;
|
||||
int32_t j = 0;
|
||||
|
||||
|
||||
while ((c = patterStr[i++]) != 0) {
|
||||
if (c == pInfo->matchAll) { /* Match "*" */
|
||||
|
||||
|
||||
while ((c = patterStr[i++]) == pInfo->matchAll || c == pInfo->matchOne) {
|
||||
if (c == pInfo->matchOne && (j > size || str[j++] == 0)) {
|
||||
// empty string, return not match
|
||||
return TSDB_PATTERN_NOWILDCARDMATCH;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (c == 0) {
|
||||
return TSDB_PATTERN_MATCH; /* "*" at the end of the pattern matches */
|
||||
}
|
||||
|
||||
|
||||
char next[3] = {toupper(c), tolower(c), 0};
|
||||
while (1) {
|
||||
size_t n = strcspn(str, next);
|
||||
str += n;
|
||||
|
||||
|
||||
if (str[0] == 0 || (n >= size)) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
int32_t ret = patternMatch(&patterStr[i], ++str, size - n - 1, pInfo);
|
||||
if (ret != TSDB_PATTERN_NOMATCH) {
|
||||
return ret;
|
||||
|
@ -264,18 +264,19 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat
|
|||
}
|
||||
return TSDB_PATTERN_NOWILDCARDMATCH;
|
||||
}
|
||||
|
||||
|
||||
c1 = str[j++];
|
||||
|
||||
|
||||
if (j <= size) {
|
||||
if (c == '\\' && patterStr[i] == '_' && c1 == '_') { i++; continue; }
|
||||
if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return TSDB_PATTERN_NOMATCH;
|
||||
}
|
||||
|
||||
|
||||
return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH;
|
||||
}
|
||||
|
||||
|
@ -283,13 +284,13 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c
|
|||
wchar_t c, c1;
|
||||
wchar_t matchOne = L'_'; // "_"
|
||||
wchar_t matchAll = L'%'; // "%"
|
||||
|
||||
|
||||
int32_t i = 0;
|
||||
int32_t j = 0;
|
||||
|
||||
|
||||
while ((c = patterStr[i++]) != 0) {
|
||||
if (c == matchAll) { /* Match "%" */
|
||||
|
||||
|
||||
while ((c = patterStr[i++]) == matchAll || c == matchOne) {
|
||||
if (c == matchOne && (j > size || str[j++] == 0)) {
|
||||
return TSDB_PATTERN_NOWILDCARDMATCH;
|
||||
|
@ -298,33 +299,33 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c
|
|||
if (c == 0) {
|
||||
return TSDB_PATTERN_MATCH;
|
||||
}
|
||||
|
||||
|
||||
wchar_t accept[3] = {towupper(c), towlower(c), 0};
|
||||
while (1) {
|
||||
size_t n = wcscspn(str, accept);
|
||||
|
||||
|
||||
str += n;
|
||||
if (str[0] == 0 || (n >= size)) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
int32_t ret = WCSPatternMatch(&patterStr[i], ++str, size - n - 1, pInfo);
|
||||
if (ret != TSDB_PATTERN_NOMATCH) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return TSDB_PATTERN_NOWILDCARDMATCH;
|
||||
}
|
||||
|
||||
|
||||
c1 = str[j++];
|
||||
|
||||
|
||||
if (j <= size) {
|
||||
if (c == c1 || towlower(c) == towlower(c1) || (c == matchOne && c1 != 0)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return TSDB_PATTERN_NOMATCH;
|
||||
}
|
||||
|
||||
|
@ -401,12 +402,13 @@ int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
|
|||
SPatternCompareInfo pInfo = {'%', '_'};
|
||||
|
||||
assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE);
|
||||
wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
|
||||
|
||||
wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
|
||||
memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
|
||||
|
||||
int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo);
|
||||
free(pattern);
|
||||
|
||||
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
|
||||
}
|
||||
|
||||
|
@ -455,10 +457,10 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) {
|
|||
} else { /* normal relational comparFn */
|
||||
comparFn = compareLenPrefixedStr;
|
||||
}
|
||||
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
if (optr == TSDB_RELATION_MATCH) {
|
||||
comparFn = compareStrRegexComp;
|
||||
|
@ -481,13 +483,13 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) {
|
|||
comparFn = compareInt32Val;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
return comparFn;
|
||||
}
|
||||
|
||||
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) {
|
||||
__compar_fn_t comparFn = NULL;
|
||||
|
||||
|
||||
switch (keyType) {
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
|
@ -531,7 +533,7 @@ __compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) {
|
|||
comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
return comparFn;
|
||||
}
|
||||
|
||||
|
@ -564,7 +566,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) {
|
|||
default: { // todo refactor
|
||||
tstr* t1 = (tstr*) f1;
|
||||
tstr* t2 = (tstr*) f2;
|
||||
|
||||
|
||||
if (t1->len != t2->len) {
|
||||
return t1->len > t2->len? 1:-1;
|
||||
} else {
|
||||
|
|
|
@ -112,6 +112,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too lon
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FILE_EMPTY, "File is empty")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_LINE_SYNTAX_ERROR, "Syntax error in Line")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_META_CACHED, "No table meta cached")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DUP_COL_NAMES, "duplicated column names")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TAG_LENGTH, "Invalid tag length")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_COLUMN_LENGTH, "Invalid column length")
|
||||
|
||||
// mnode
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed")
|
||||
|
@ -194,6 +197,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_FUNC_ALREADY_EXIST, "Func already exists")
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_FUNC, "Invalid func")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_FUNC_BUFSIZE, "Invalid func bufSize")
|
||||
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TAG_LENGTH, "invalid tag length")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_COLUMN_LENGTH, "invalid column length")
|
||||
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_NOT_SELECTED, "Database not specified or available")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_ALREADY_EXIST, "Database already exists")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DB_OPTION, "Invalid database options")
|
||||
|
|
|
@ -671,7 +671,7 @@ void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen,
|
|||
taosNetCheckSpeed(host, port, pkgLen, pkgNum, strtolower(type, pkgType));
|
||||
}else if (0 == strcmp("fqdn", role)) {
|
||||
taosNetTestFqdn(host);
|
||||
}else {
|
||||
} else {
|
||||
taosNetTestStartup(host, port);
|
||||
}
|
||||
|
||||
|
|
|
@ -138,6 +138,7 @@ static SKeyword keywordTable[] = {
|
|||
{"COMMA", TK_COMMA},
|
||||
{"NULL", TK_NULL},
|
||||
{"SELECT", TK_SELECT},
|
||||
{"EVERY", TK_EVERY},
|
||||
{"FROM", TK_FROM},
|
||||
{"VARIABLE", TK_VARIABLE},
|
||||
{"INTERVAL", TK_INTERVAL},
|
||||
|
|
|
@ -64,12 +64,15 @@ int32_t strRmquote(char *z, int32_t len){
|
|||
int32_t j = 0;
|
||||
for (uint32_t k = 1; k < len - 1; ++k) {
|
||||
if (z[k] == '\\' || (z[k] == delim && z[k + 1] == delim)) {
|
||||
if (z[k] == '\\' && z[k + 1] == '_') {
|
||||
//match '_' self
|
||||
} else {
|
||||
z[j] = z[k + 1];
|
||||
|
||||
cnt++;
|
||||
j++;
|
||||
k++;
|
||||
continue;
|
||||
cnt++;
|
||||
j++;
|
||||
k++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
z[j] = z[k];
|
||||
|
@ -162,6 +165,8 @@ char *strnchr(char *haystack, char needle, int32_t len, bool skipquote) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
char* strtolower(char *dst, const char *src) {
|
||||
int esc = 0;
|
||||
char quote = 0, *p = dst, c;
|
||||
|
@ -197,7 +202,7 @@ char* strntolower(char *dst, const char *src, int32_t n) {
|
|||
if (n == 0) {
|
||||
*p = 0;
|
||||
return dst;
|
||||
}
|
||||
}
|
||||
for (c = *src++; n-- > 0; c = *src++) {
|
||||
if (esc) {
|
||||
esc = 0;
|
||||
|
@ -219,6 +224,26 @@ char* strntolower(char *dst, const char *src, int32_t n) {
|
|||
return dst;
|
||||
}
|
||||
|
||||
char* strntolower_s(char *dst, const char *src, int32_t n) {
|
||||
char *p = dst, c;
|
||||
|
||||
assert(dst != NULL);
|
||||
if (n == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
while (n-- > 0) {
|
||||
c = *src;
|
||||
if (c >= 'A' && c <= 'Z') {
|
||||
c -= 'A' - 'a';
|
||||
}
|
||||
*p++ = c;
|
||||
src++;
|
||||
}
|
||||
|
||||
return dst;
|
||||
}
|
||||
|
||||
char *paGetToken(char *string, char **token, int32_t *tokenLen) {
|
||||
char quote = 0;
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ void doubleSkipListTest() {
|
|||
}
|
||||
|
||||
void randKeyTest() {
|
||||
SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT),
|
||||
SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT, TSDB_ORDER_ASC),
|
||||
false, getkey);
|
||||
|
||||
int32_t size = 200000;
|
||||
|
|
|
@ -540,7 +540,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
|
|||
|
||||
pWal->version = pHead->version;
|
||||
|
||||
//wInfo("writeFp: %ld", offset);
|
||||
// wInfo("writeFp: %ld", offset);
|
||||
if (0 != walSMemRowCheck(pHead)) {
|
||||
wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64,
|
||||
pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset);
|
||||
|
|
|
@ -61,7 +61,7 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
time_t ct = time(0);
|
||||
int64_t ts = ct * 1000;
|
||||
char* lineFormat = "sta%d,t0=true,t1=127i8,t2=32767i16,t3=%di32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" %lldms";
|
||||
char* lineFormat = "sta%d,t0=true,t1=127i8,t2=32767i16,t3=%di32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=254u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" %lldms";
|
||||
|
||||
char** lines = calloc(numSuperTables * numChildTables * numRowsPerChildTable, sizeof(char*));
|
||||
int l = 0;
|
||||
|
@ -75,7 +75,7 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
}
|
||||
}
|
||||
shuffle(lines, numSuperTables * numChildTables * numRowsPerChildTable);
|
||||
//shuffle(lines, numSuperTables * numChildTables * numRowsPerChildTable);
|
||||
|
||||
printf("%s\n", "begin taos_insert_lines");
|
||||
int64_t begin = getTimeInUs();
|
||||
|
@ -83,119 +83,5 @@ int main(int argc, char* argv[]) {
|
|||
int64_t end = getTimeInUs();
|
||||
printf("code: %d, %s. time used: %"PRId64"\n", code, tstrerror(code), end-begin);
|
||||
|
||||
char* lines_000_0[] = {
|
||||
"sta1,id=sta1_1,t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=255u8,t6=32770u16,t7=2147483699u32,t8=9223372036854775899u64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639000us"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_000_0 , sizeof(lines_000_0)/sizeof(char*));
|
||||
if (0 == code) {
|
||||
printf("taos_insert_lines() lines_000_0 should return error\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* lines_000_1[] = {
|
||||
"sta2,id=\"sta2_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=255u8,t6=32770u16,t7=2147483699u32,t8=9223372036854775899u64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639001"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_000_1 , sizeof(lines_000_1)/sizeof(char*));
|
||||
if (0 == code) {
|
||||
printf("taos_insert_lines() lines_000_1 should return error\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* lines_000_2[] = {
|
||||
"sta3,id=\"sta3_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 0"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_000_2 , sizeof(lines_000_2)/sizeof(char*));
|
||||
if (0 != code) {
|
||||
printf("taos_insert_lines() lines_000_2 return code:%d (%s)\n", code, (char*)tstrerror(code));
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* lines_001_0[] = {
|
||||
"sta4,t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639000us",
|
||||
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_001_0 , sizeof(lines_001_0)/sizeof(char*));
|
||||
if (0 != code) {
|
||||
printf("taos_insert_lines() lines_001_0 return code:%d (%s)\n", code, (char*)tstrerror(code));
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* lines_001_1[] = {
|
||||
"sta5,id=\"sta5_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639001"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_001_1 , sizeof(lines_001_1)/sizeof(char*));
|
||||
if (0 != code) {
|
||||
printf("taos_insert_lines() lines_001_1 return code:%d (%s)\n", code, (char*)tstrerror(code));
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* lines_001_2[] = {
|
||||
"sta6,id=\"sta6_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 0"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_001_2 , sizeof(lines_001_2)/sizeof(char*));
|
||||
if (0 != code) {
|
||||
printf("taos_insert_lines() lines_001_2 return code:%d (%s)\n", code, (char*)tstrerror(code));
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* lines_002[] = {
|
||||
"stb,id=\"stb_1\",t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639000000ns",
|
||||
"stc,id=\"stc_1\",t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639019us",
|
||||
"stc,id=\"stc_1\",t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833640ms",
|
||||
"stc,id=\"stc_1\",t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006834s"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_002 , sizeof(lines_002)/sizeof(char*));
|
||||
if (0 != code) {
|
||||
printf("taos_insert_lines() lines_002 return code:%d (%s)\n", code, (char*)tstrerror(code));
|
||||
return -1;
|
||||
}
|
||||
|
||||
//Duplicate key check;
|
||||
char* lines_003_1[] = {
|
||||
"std,id=\"std_3_1\",t1=4i64,Id=\"std\",t2=true c1=true 1626006834s"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_003_1 , sizeof(lines_003_1)/sizeof(char*));
|
||||
if (0 == code) {
|
||||
printf("taos_insert_lines() lines_003_1 return code:%d (%s)\n", code, (char*)tstrerror(code));
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* lines_003_2[] = {
|
||||
"std,id=\"std_3_2\",tag1=4i64,Tag2=true,tAg3=2,TaG2=\"dup!\" c1=true 1626006834s"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_003_2 , sizeof(lines_003_2)/sizeof(char*));
|
||||
if (0 == code) {
|
||||
printf("taos_insert_lines() lines_003_2 return code:%d (%s)\n", code, (char*)tstrerror(code));
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* lines_003_3[] = {
|
||||
"std,id=\"std_3_3\",tag1=4i64 field1=true,Field2=2,FIElD1=\"dup!\",fIeLd4=true 1626006834s"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_003_3 , sizeof(lines_003_3)/sizeof(char*));
|
||||
if (0 == code) {
|
||||
printf("taos_insert_lines() lines_003_3 return code:%d (%s)\n", code, (char*)tstrerror(code));
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* lines_003_4[] = {
|
||||
"std,id=\"std_3_4\",tag1=4i64,dupkey=4i16,tag2=T field1=true,dUpkEy=1e3f32,field2=\"1234\" 1626006834s"
|
||||
};
|
||||
|
||||
code = taos_insert_lines(taos, lines_003_4 , sizeof(lines_003_4)/sizeof(char*));
|
||||
if (0 == code) {
|
||||
printf("taos_insert_lines() lines_003_4 return code:%d (%s)\n", code, (char*)tstrerror(code));
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,429 @@
|
|||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <netinet/in.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
|
||||
|
||||
#define RECV_MAX_LINE 2048
|
||||
#define ITEM_MAX_LINE 128
|
||||
#define REQ_MAX_LINE 2048
|
||||
#define REQ_CLI_COUNT 100
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
uninited,
|
||||
connecting,
|
||||
connected,
|
||||
datasent
|
||||
} conn_stat;
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
false,
|
||||
true
|
||||
} bool;
|
||||
|
||||
|
||||
typedef unsigned short u16_t;
|
||||
typedef unsigned int u32_t;
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int sockfd;
|
||||
int index;
|
||||
conn_stat state;
|
||||
size_t nsent;
|
||||
size_t nrecv;
|
||||
size_t nlen;
|
||||
bool error;
|
||||
bool success;
|
||||
struct sockaddr_in serv_addr;
|
||||
} socket_ctx;
|
||||
|
||||
|
||||
int set_nonblocking(int sockfd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
|
||||
if (ret == -1) {
|
||||
printf("failed to fcntl for %d\r\n", sockfd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (ip == NULL || port == 0 || pctx == NULL) {
|
||||
printf("invalid parameter\r\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
if (pctx->sockfd == -1) {
|
||||
printf("failed to create socket\r\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
|
||||
|
||||
pctx->serv_addr.sin_family = AF_INET;
|
||||
pctx->serv_addr.sin_port = htons(port);
|
||||
|
||||
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
|
||||
if (ret <= 0) {
|
||||
printf("inet_pton error, ip: %s\r\n", ip);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = set_nonblocking(pctx->sockfd);
|
||||
if (ret == -1) {
|
||||
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return pctx->sockfd;
|
||||
}
|
||||
|
||||
|
||||
void close_sockets(socket_ctx *pctx, int cnt)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (pctx == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
if (pctx[i].sockfd > 0) {
|
||||
close(pctx[i].sockfd);
|
||||
pctx[i].sockfd = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int proc_pending_error(socket_ctx *ctx)
|
||||
{
|
||||
int ret;
|
||||
int err;
|
||||
socklen_t len;
|
||||
|
||||
if (ctx == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
len = sizeof(int);
|
||||
|
||||
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
|
||||
if (ret == -1) {
|
||||
err = errno;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
printf("failed to connect at index: %d\r\n", ctx->index);
|
||||
|
||||
close(ctx->sockfd);
|
||||
ctx->sockfd = -1;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
|
||||
{
|
||||
char req_line[ITEM_MAX_LINE];
|
||||
char req_host[ITEM_MAX_LINE];
|
||||
char req_cont_type[ITEM_MAX_LINE];
|
||||
char req_cont_len[ITEM_MAX_LINE];
|
||||
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
|
||||
|
||||
if (ip == NULL || port == 0 ||
|
||||
url == NULL || url[0] == '\0' ||
|
||||
sql == NULL || sql[0] == '\0' ||
|
||||
req_buf == NULL || len <= 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
|
||||
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
|
||||
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
|
||||
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
|
||||
|
||||
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
|
||||
}
|
||||
|
||||
|
||||
int add_event(int epfd, int sockfd, u32_t events, void *data)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.data.ptr = data;
|
||||
evs_op.events = events;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int mod_event(int epfd, int sockfd, u32_t events, void *data)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.data.ptr = data;
|
||||
evs_op.events = events;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int del_event(int epfd, int sockfd)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.events = 0;
|
||||
evs_op.data.ptr = NULL;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int main()
|
||||
{
|
||||
int i;
|
||||
int ret, n, nsent, nrecv;
|
||||
int epfd;
|
||||
u32_t events;
|
||||
char *str;
|
||||
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
|
||||
char *ip = "127.0.0.1";
|
||||
char *url = "/rest/sql";
|
||||
u16_t port = 6041;
|
||||
struct epoll_event evs[REQ_CLI_COUNT];
|
||||
char sql[REQ_MAX_LINE];
|
||||
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
|
||||
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
|
||||
int count;
|
||||
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ctx[i].sockfd = -1;
|
||||
ctx[i].index = i;
|
||||
ctx[i].state = uninited;
|
||||
ctx[i].nsent = 0;
|
||||
ctx[i].nrecv = 0;
|
||||
ctx[i].error = false;
|
||||
ctx[i].success = false;
|
||||
|
||||
memset(sql, 0, REQ_MAX_LINE);
|
||||
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
|
||||
memset(recv_buf[i], 0, RECV_MAX_LINE);
|
||||
|
||||
snprintf(sql, REQ_MAX_LINE, "create database if not exists db%d precision 'us'", i);
|
||||
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
|
||||
|
||||
ctx[i].nlen = strlen(send_buf[i]);
|
||||
}
|
||||
|
||||
epfd = epoll_create(REQ_CLI_COUNT);
|
||||
if (epfd <= 0) {
|
||||
printf("failed to create epoll\r\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ret = create_socket(ip, port, &ctx[i]);
|
||||
if (ret == -1) {
|
||||
printf("failed to create socket ar %d\r\n", i);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
events = EPOLLET | EPOLLIN | EPOLLOUT;
|
||||
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
|
||||
if (ret == -1) {
|
||||
printf("failed to add sockfd at %d to epoll\r\n", i);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
count = 0;
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
|
||||
if (ret == -1) {
|
||||
if (errno != EINPROGRESS) {
|
||||
printf("connect error, index: %d\r\n", ctx[i].index);
|
||||
(void) del_event(epfd, ctx[i].sockfd);
|
||||
close(ctx[i].sockfd);
|
||||
ctx[i].sockfd = -1;
|
||||
} else {
|
||||
ctx[i].state = connecting;
|
||||
count++;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
ctx[i].state = connected;
|
||||
count++;
|
||||
}
|
||||
|
||||
printf("clients: %d\r\n", count);
|
||||
|
||||
while (count > 0) {
|
||||
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
|
||||
if (n == -1) {
|
||||
if (errno != EINTR) {
|
||||
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < n; i++) {
|
||||
if (evs[i].events & EPOLLERR) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
printf("event error, index: %d\r\n", pctx->index);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
} else if (evs[i].events & EPOLLIN) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
if (pctx->state == connecting) {
|
||||
ret = proc_pending_error(pctx);
|
||||
if (ret == 0) {
|
||||
printf("client connected, index: %d\r\n", pctx->index);
|
||||
pctx->state = connected;
|
||||
} else {
|
||||
printf("client connect failed, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
|
||||
if (nrecv == -1) {
|
||||
if (errno != EAGAIN && errno != EINTR) {
|
||||
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
|
||||
break;
|
||||
} else if (nrecv == 0) {
|
||||
printf("peer closed connection, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
break;
|
||||
}
|
||||
|
||||
pctx->nrecv += nrecv;
|
||||
if (pctx->nrecv > 12) {
|
||||
if (pctx->error == false && pctx->success == false) {
|
||||
str = recv_buf[pctx->index] + 9;
|
||||
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
|
||||
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
|
||||
pctx->error = true;
|
||||
} else {
|
||||
printf("response ok, index: %d\r\n", pctx->index);
|
||||
pctx->success = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (evs[i].events & EPOLLOUT) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
if (pctx->state == connecting) {
|
||||
ret = proc_pending_error(pctx);
|
||||
if (ret == 0) {
|
||||
printf("client connected, index: %d\r\n", pctx->index);
|
||||
pctx->state = connected;
|
||||
} else {
|
||||
printf("client connect failed, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
|
||||
if (nsent == -1) {
|
||||
if (errno != EAGAIN && errno != EINTR) {
|
||||
printf("failed to send, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
|
||||
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
|
||||
|
||||
pctx->state = datasent;
|
||||
|
||||
events = EPOLLET | EPOLLIN;
|
||||
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
|
||||
|
||||
break;
|
||||
} else {
|
||||
pctx->nsent += nsent;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
failed:
|
||||
|
||||
if (epfd > 0) {
|
||||
close(epfd);
|
||||
}
|
||||
|
||||
close_sockets(ctx, REQ_CLI_COUNT);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,433 @@
|
|||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <netinet/in.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
|
||||
|
||||
#define RECV_MAX_LINE 2048
|
||||
#define ITEM_MAX_LINE 128
|
||||
#define REQ_MAX_LINE 2048
|
||||
#define REQ_CLI_COUNT 100
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
uninited,
|
||||
connecting,
|
||||
connected,
|
||||
datasent
|
||||
} conn_stat;
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
false,
|
||||
true
|
||||
} bool;
|
||||
|
||||
|
||||
typedef unsigned short u16_t;
|
||||
typedef unsigned int u32_t;
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int sockfd;
|
||||
int index;
|
||||
conn_stat state;
|
||||
size_t nsent;
|
||||
size_t nrecv;
|
||||
size_t nlen;
|
||||
bool error;
|
||||
bool success;
|
||||
struct sockaddr_in serv_addr;
|
||||
} socket_ctx;
|
||||
|
||||
|
||||
int set_nonblocking(int sockfd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
|
||||
if (ret == -1) {
|
||||
printf("failed to fcntl for %d\r\n", sockfd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (ip == NULL || port == 0 || pctx == NULL) {
|
||||
printf("invalid parameter\r\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
if (pctx->sockfd == -1) {
|
||||
printf("failed to create socket\r\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
|
||||
|
||||
pctx->serv_addr.sin_family = AF_INET;
|
||||
pctx->serv_addr.sin_port = htons(port);
|
||||
|
||||
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
|
||||
if (ret <= 0) {
|
||||
printf("inet_pton error, ip: %s\r\n", ip);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = set_nonblocking(pctx->sockfd);
|
||||
if (ret == -1) {
|
||||
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return pctx->sockfd;
|
||||
}
|
||||
|
||||
|
||||
void close_sockets(socket_ctx *pctx, int cnt)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (pctx == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
if (pctx[i].sockfd > 0) {
|
||||
close(pctx[i].sockfd);
|
||||
pctx[i].sockfd = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int proc_pending_error(socket_ctx *ctx)
|
||||
{
|
||||
int ret;
|
||||
int err;
|
||||
socklen_t len;
|
||||
|
||||
if (ctx == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
len = sizeof(int);
|
||||
|
||||
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
|
||||
if (ret == -1) {
|
||||
err = errno;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
printf("failed to connect at index: %d\r\n", ctx->index);
|
||||
|
||||
close(ctx->sockfd);
|
||||
ctx->sockfd = -1;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
|
||||
{
|
||||
char req_line[ITEM_MAX_LINE];
|
||||
char req_host[ITEM_MAX_LINE];
|
||||
char req_cont_type[ITEM_MAX_LINE];
|
||||
char req_cont_len[ITEM_MAX_LINE];
|
||||
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
|
||||
|
||||
if (ip == NULL || port == 0 ||
|
||||
url == NULL || url[0] == '\0' ||
|
||||
sql == NULL || sql[0] == '\0' ||
|
||||
req_buf == NULL || len <= 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
|
||||
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
|
||||
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
|
||||
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
|
||||
|
||||
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
|
||||
}
|
||||
|
||||
|
||||
int add_event(int epfd, int sockfd, u32_t events, void *data)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.data.ptr = data;
|
||||
evs_op.events = events;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int mod_event(int epfd, int sockfd, u32_t events, void *data)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.data.ptr = data;
|
||||
evs_op.events = events;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int del_event(int epfd, int sockfd)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.events = 0;
|
||||
evs_op.data.ptr = NULL;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int main()
|
||||
{
|
||||
int i;
|
||||
int ret, n, nsent, nrecv;
|
||||
int epfd;
|
||||
u32_t events;
|
||||
char *str;
|
||||
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
|
||||
char *ip = "127.0.0.1";
|
||||
char *url_prefix = "/rest/sql";
|
||||
char url[ITEM_MAX_LINE];
|
||||
u16_t port = 6041;
|
||||
struct epoll_event evs[REQ_CLI_COUNT];
|
||||
char sql[REQ_MAX_LINE];
|
||||
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
|
||||
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
|
||||
int count;
|
||||
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ctx[i].sockfd = -1;
|
||||
ctx[i].index = i;
|
||||
ctx[i].state = uninited;
|
||||
ctx[i].nsent = 0;
|
||||
ctx[i].nrecv = 0;
|
||||
ctx[i].error = false;
|
||||
ctx[i].success = false;
|
||||
|
||||
memset(url, 0, ITEM_MAX_LINE);
|
||||
memset(sql, 0, REQ_MAX_LINE);
|
||||
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
|
||||
memset(recv_buf[i], 0, RECV_MAX_LINE);
|
||||
|
||||
snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
|
||||
snprintf(sql, REQ_MAX_LINE, "create table if not exists tb%d (ts timestamp, index int, val binary(40))", i);
|
||||
|
||||
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
|
||||
|
||||
ctx[i].nlen = strlen(send_buf[i]);
|
||||
}
|
||||
|
||||
epfd = epoll_create(REQ_CLI_COUNT);
|
||||
if (epfd <= 0) {
|
||||
printf("failed to create epoll\r\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ret = create_socket(ip, port, &ctx[i]);
|
||||
if (ret == -1) {
|
||||
printf("failed to create socket, index: %d\r\n", i);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
events = EPOLLET | EPOLLIN | EPOLLOUT;
|
||||
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
|
||||
if (ret == -1) {
|
||||
printf("failed to add sockfd to epoll, index: %d\r\n", i);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
count = 0;
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
|
||||
if (ret == -1) {
|
||||
if (errno != EINPROGRESS) {
|
||||
printf("connect error, index: %d\r\n", ctx[i].index);
|
||||
(void) del_event(epfd, ctx[i].sockfd);
|
||||
close(ctx[i].sockfd);
|
||||
ctx[i].sockfd = -1;
|
||||
} else {
|
||||
ctx[i].state = connecting;
|
||||
count++;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
ctx[i].state = connected;
|
||||
count++;
|
||||
}
|
||||
|
||||
printf("clients: %d\r\n", count);
|
||||
|
||||
while (count > 0) {
|
||||
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
|
||||
if (n == -1) {
|
||||
if (errno != EINTR) {
|
||||
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < n; i++) {
|
||||
if (evs[i].events & EPOLLERR) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
printf("event error, index: %d\r\n", pctx->index);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
} else if (evs[i].events & EPOLLIN) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
if (pctx->state == connecting) {
|
||||
ret = proc_pending_error(pctx);
|
||||
if (ret == 0) {
|
||||
printf("client connected, index: %d\r\n", pctx->index);
|
||||
pctx->state = connected;
|
||||
} else {
|
||||
printf("client connect failed, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
|
||||
if (nrecv == -1) {
|
||||
if (errno != EAGAIN && errno != EINTR) {
|
||||
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
|
||||
break;
|
||||
} else if (nrecv == 0) {
|
||||
printf("peer closed connection, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
break;
|
||||
}
|
||||
|
||||
pctx->nrecv += nrecv;
|
||||
if (pctx->nrecv > 12) {
|
||||
if (pctx->error == false && pctx->success == false) {
|
||||
str = recv_buf[pctx->index] + 9;
|
||||
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
|
||||
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
|
||||
pctx->error = true;
|
||||
} else {
|
||||
printf("response ok, index: %d\r\n", pctx->index);
|
||||
pctx->success = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (evs[i].events & EPOLLOUT) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
if (pctx->state == connecting) {
|
||||
ret = proc_pending_error(pctx);
|
||||
if (ret == 0) {
|
||||
printf("client connected, index: %d\r\n", pctx->index);
|
||||
pctx->state = connected;
|
||||
} else {
|
||||
printf("client connect failed, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
|
||||
if (nsent == -1) {
|
||||
if (errno != EAGAIN && errno != EINTR) {
|
||||
printf("failed to send, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
|
||||
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
|
||||
|
||||
pctx->state = datasent;
|
||||
|
||||
events = EPOLLET | EPOLLIN;
|
||||
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
|
||||
|
||||
break;
|
||||
} else {
|
||||
pctx->nsent += nsent;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
failed:
|
||||
|
||||
if (epfd > 0) {
|
||||
close(epfd);
|
||||
}
|
||||
|
||||
close_sockets(ctx, REQ_CLI_COUNT);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,433 @@
|
|||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <netinet/in.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
|
||||
|
||||
#define RECV_MAX_LINE 2048
|
||||
#define ITEM_MAX_LINE 128
|
||||
#define REQ_MAX_LINE 2048
|
||||
#define REQ_CLI_COUNT 100
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
uninited,
|
||||
connecting,
|
||||
connected,
|
||||
datasent
|
||||
} conn_stat;
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
false,
|
||||
true
|
||||
} bool;
|
||||
|
||||
|
||||
typedef unsigned short u16_t;
|
||||
typedef unsigned int u32_t;
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int sockfd;
|
||||
int index;
|
||||
conn_stat state;
|
||||
size_t nsent;
|
||||
size_t nrecv;
|
||||
size_t nlen;
|
||||
bool error;
|
||||
bool success;
|
||||
struct sockaddr_in serv_addr;
|
||||
} socket_ctx;
|
||||
|
||||
|
||||
int set_nonblocking(int sockfd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
|
||||
if (ret == -1) {
|
||||
printf("failed to fcntl for %d\r\n", sockfd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (ip == NULL || port == 0 || pctx == NULL) {
|
||||
printf("invalid parameter\r\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
if (pctx->sockfd == -1) {
|
||||
printf("failed to create socket\r\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
|
||||
|
||||
pctx->serv_addr.sin_family = AF_INET;
|
||||
pctx->serv_addr.sin_port = htons(port);
|
||||
|
||||
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
|
||||
if (ret <= 0) {
|
||||
printf("inet_pton error, ip: %s\r\n", ip);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = set_nonblocking(pctx->sockfd);
|
||||
if (ret == -1) {
|
||||
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return pctx->sockfd;
|
||||
}
|
||||
|
||||
|
||||
void close_sockets(socket_ctx *pctx, int cnt)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (pctx == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
if (pctx[i].sockfd > 0) {
|
||||
close(pctx[i].sockfd);
|
||||
pctx[i].sockfd = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int proc_pending_error(socket_ctx *ctx)
|
||||
{
|
||||
int ret;
|
||||
int err;
|
||||
socklen_t len;
|
||||
|
||||
if (ctx == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
len = sizeof(int);
|
||||
|
||||
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
|
||||
if (ret == -1) {
|
||||
err = errno;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
printf("failed to connect at index: %d\r\n", ctx->index);
|
||||
|
||||
close(ctx->sockfd);
|
||||
ctx->sockfd = -1;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
|
||||
{
|
||||
char req_line[ITEM_MAX_LINE];
|
||||
char req_host[ITEM_MAX_LINE];
|
||||
char req_cont_type[ITEM_MAX_LINE];
|
||||
char req_cont_len[ITEM_MAX_LINE];
|
||||
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
|
||||
|
||||
if (ip == NULL || port == 0 ||
|
||||
url == NULL || url[0] == '\0' ||
|
||||
sql == NULL || sql[0] == '\0' ||
|
||||
req_buf == NULL || len <= 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
|
||||
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
|
||||
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
|
||||
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
|
||||
|
||||
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
|
||||
}
|
||||
|
||||
|
||||
int add_event(int epfd, int sockfd, u32_t events, void *data)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.data.ptr = data;
|
||||
evs_op.events = events;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int mod_event(int epfd, int sockfd, u32_t events, void *data)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.data.ptr = data;
|
||||
evs_op.events = events;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int del_event(int epfd, int sockfd)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.events = 0;
|
||||
evs_op.data.ptr = NULL;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int main()
|
||||
{
|
||||
int i;
|
||||
int ret, n, nsent, nrecv;
|
||||
int epfd;
|
||||
u32_t events;
|
||||
char *str;
|
||||
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
|
||||
char *ip = "127.0.0.1";
|
||||
char *url_prefix = "/rest/sql";
|
||||
char url[ITEM_MAX_LINE];
|
||||
u16_t port = 6041;
|
||||
struct epoll_event evs[REQ_CLI_COUNT];
|
||||
char sql[REQ_MAX_LINE];
|
||||
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
|
||||
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
|
||||
int count;
|
||||
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ctx[i].sockfd = -1;
|
||||
ctx[i].index = i;
|
||||
ctx[i].state = uninited;
|
||||
ctx[i].nsent = 0;
|
||||
ctx[i].nrecv = 0;
|
||||
ctx[i].error = false;
|
||||
ctx[i].success = false;
|
||||
|
||||
memset(url, 0, ITEM_MAX_LINE);
|
||||
memset(sql, 0, REQ_MAX_LINE);
|
||||
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
|
||||
memset(recv_buf[i], 0, RECV_MAX_LINE);
|
||||
|
||||
snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
|
||||
snprintf(sql, REQ_MAX_LINE, "drop database if exists db%d", i);
|
||||
|
||||
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
|
||||
|
||||
ctx[i].nlen = strlen(send_buf[i]);
|
||||
}
|
||||
|
||||
epfd = epoll_create(REQ_CLI_COUNT);
|
||||
if (epfd <= 0) {
|
||||
printf("failed to create epoll\r\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ret = create_socket(ip, port, &ctx[i]);
|
||||
if (ret == -1) {
|
||||
printf("failed to create socket, index: %d\r\n", i);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
events = EPOLLET | EPOLLIN | EPOLLOUT;
|
||||
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
|
||||
if (ret == -1) {
|
||||
printf("failed to add sockfd to epoll, index: %d\r\n", i);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
count = 0;
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
|
||||
if (ret == -1) {
|
||||
if (errno != EINPROGRESS) {
|
||||
printf("connect error, index: %d\r\n", ctx[i].index);
|
||||
(void) del_event(epfd, ctx[i].sockfd);
|
||||
close(ctx[i].sockfd);
|
||||
ctx[i].sockfd = -1;
|
||||
} else {
|
||||
ctx[i].state = connecting;
|
||||
count++;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
ctx[i].state = connected;
|
||||
count++;
|
||||
}
|
||||
|
||||
printf("clients: %d\r\n", count);
|
||||
|
||||
while (count > 0) {
|
||||
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
|
||||
if (n == -1) {
|
||||
if (errno != EINTR) {
|
||||
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < n; i++) {
|
||||
if (evs[i].events & EPOLLERR) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
printf("event error, index: %d\r\n", pctx->index);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
} else if (evs[i].events & EPOLLIN) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
if (pctx->state == connecting) {
|
||||
ret = proc_pending_error(pctx);
|
||||
if (ret == 0) {
|
||||
printf("client connected, index: %d\r\n", pctx->index);
|
||||
pctx->state = connected;
|
||||
} else {
|
||||
printf("client connect failed, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
|
||||
if (nrecv == -1) {
|
||||
if (errno != EAGAIN && errno != EINTR) {
|
||||
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
|
||||
break;
|
||||
} else if (nrecv == 0) {
|
||||
printf("peer closed connection, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
break;
|
||||
}
|
||||
|
||||
pctx->nrecv += nrecv;
|
||||
if (pctx->nrecv > 12) {
|
||||
if (pctx->error == false && pctx->success == false) {
|
||||
str = recv_buf[pctx->index] + 9;
|
||||
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
|
||||
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
|
||||
pctx->error = true;
|
||||
} else {
|
||||
printf("response ok, index: %d\r\n", pctx->index);
|
||||
pctx->success = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (evs[i].events & EPOLLOUT) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
if (pctx->state == connecting) {
|
||||
ret = proc_pending_error(pctx);
|
||||
if (ret == 0) {
|
||||
printf("client connected, index: %d\r\n", pctx->index);
|
||||
pctx->state = connected;
|
||||
} else {
|
||||
printf("client connect failed, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
|
||||
if (nsent == -1) {
|
||||
if (errno != EAGAIN && errno != EINTR) {
|
||||
printf("failed to send, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
|
||||
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
|
||||
|
||||
pctx->state = datasent;
|
||||
|
||||
events = EPOLLET | EPOLLIN;
|
||||
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
|
||||
|
||||
break;
|
||||
} else {
|
||||
pctx->nsent += nsent;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
failed:
|
||||
|
||||
if (epfd > 0) {
|
||||
close(epfd);
|
||||
}
|
||||
|
||||
close_sockets(ctx, REQ_CLI_COUNT);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,455 @@
|
|||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <unistd.h>
|
||||
#include <inttypes.h>
|
||||
#include <fcntl.h>
|
||||
#include <netinet/in.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <sys/time.h>
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
|
||||
|
||||
#define RECV_MAX_LINE 2048
|
||||
#define ITEM_MAX_LINE 128
|
||||
#define REQ_MAX_LINE 4096
|
||||
#define REQ_CLI_COUNT 100
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
uninited,
|
||||
connecting,
|
||||
connected,
|
||||
datasent
|
||||
} conn_stat;
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
false,
|
||||
true
|
||||
} bool;
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int sockfd;
|
||||
int index;
|
||||
conn_stat state;
|
||||
size_t nsent;
|
||||
size_t nrecv;
|
||||
size_t nlen;
|
||||
bool error;
|
||||
bool success;
|
||||
struct sockaddr_in serv_addr;
|
||||
} socket_ctx;
|
||||
|
||||
|
||||
int set_nonblocking(int sockfd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
|
||||
if (ret == -1) {
|
||||
printf("failed to fcntl for %d\r\n", sockfd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (ip == NULL || port == 0 || pctx == NULL) {
|
||||
printf("invalid parameter\r\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
if (pctx->sockfd == -1) {
|
||||
printf("failed to create socket\r\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
|
||||
|
||||
pctx->serv_addr.sin_family = AF_INET;
|
||||
pctx->serv_addr.sin_port = htons(port);
|
||||
|
||||
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
|
||||
if (ret <= 0) {
|
||||
printf("inet_pton error, ip: %s\r\n", ip);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = set_nonblocking(pctx->sockfd);
|
||||
if (ret == -1) {
|
||||
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return pctx->sockfd;
|
||||
}
|
||||
|
||||
|
||||
void close_sockets(socket_ctx *pctx, int cnt)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (pctx == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
if (pctx[i].sockfd > 0) {
|
||||
close(pctx[i].sockfd);
|
||||
pctx[i].sockfd = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int proc_pending_error(socket_ctx *ctx)
|
||||
{
|
||||
int ret;
|
||||
int err;
|
||||
socklen_t len;
|
||||
|
||||
if (ctx == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
len = sizeof(int);
|
||||
|
||||
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
|
||||
if (ret == -1) {
|
||||
err = errno;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
printf("failed to connect at index: %d\r\n", ctx->index);
|
||||
|
||||
close(ctx->sockfd);
|
||||
ctx->sockfd = -1;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len)
|
||||
{
|
||||
char req_line[ITEM_MAX_LINE];
|
||||
char req_host[ITEM_MAX_LINE];
|
||||
char req_cont_type[ITEM_MAX_LINE];
|
||||
char req_cont_len[ITEM_MAX_LINE];
|
||||
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
|
||||
|
||||
if (ip == NULL || port == 0 ||
|
||||
url == NULL || url[0] == '\0' ||
|
||||
sql == NULL || sql[0] == '\0' ||
|
||||
req_buf == NULL || len <= 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
|
||||
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
|
||||
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
|
||||
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
|
||||
|
||||
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
|
||||
}
|
||||
|
||||
|
||||
int add_event(int epfd, int sockfd, uint32_t events, void *data)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.data.ptr = data;
|
||||
evs_op.events = events;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int mod_event(int epfd, int sockfd, uint32_t events, void *data)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.data.ptr = data;
|
||||
evs_op.events = events;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int del_event(int epfd, int sockfd)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.events = 0;
|
||||
evs_op.data.ptr = NULL;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int main()
|
||||
{
|
||||
int i;
|
||||
int ret, n, nsent, nrecv, offset;
|
||||
int epfd;
|
||||
uint32_t events;
|
||||
char *str;
|
||||
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
|
||||
char *ip = "127.0.0.1";
|
||||
char *url_prefix = "/rest/sql";
|
||||
char url[ITEM_MAX_LINE];
|
||||
uint16_t port = 6041;
|
||||
struct epoll_event evs[REQ_CLI_COUNT];
|
||||
struct timeval now;
|
||||
int64_t start_time;
|
||||
char sql[REQ_MAX_LINE];
|
||||
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
|
||||
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
|
||||
int count;
|
||||
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
|
||||
gettimeofday(&now, NULL);
|
||||
start_time = now.tv_sec * 1000000 + now.tv_usec;
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ctx[i].sockfd = -1;
|
||||
ctx[i].index = i;
|
||||
ctx[i].state = uninited;
|
||||
ctx[i].nsent = 0;
|
||||
ctx[i].nrecv = 0;
|
||||
ctx[i].error = false;
|
||||
ctx[i].success = false;
|
||||
|
||||
memset(url, 0, ITEM_MAX_LINE);
|
||||
memset(sql, 0, REQ_MAX_LINE);
|
||||
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
|
||||
memset(recv_buf[i], 0, RECV_MAX_LINE);
|
||||
|
||||
snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
|
||||
|
||||
offset = 0;
|
||||
|
||||
ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "insert into tb%d values ", i);
|
||||
if (ret <= 0) {
|
||||
printf("failed to snprintf for sql(prefix), index: %d\r\n ", i);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
offset += ret;
|
||||
|
||||
while (offset < REQ_MAX_LINE - 128) {
|
||||
ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "(%"PRId64", %d, 'test_string_%d') ", start_time + i, i, i);
|
||||
if (ret <= 0) {
|
||||
printf("failed to snprintf for sql(values), index: %d\r\n ", i);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
offset += ret;
|
||||
}
|
||||
|
||||
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
|
||||
|
||||
ctx[i].nlen = strlen(send_buf[i]);
|
||||
}
|
||||
|
||||
epfd = epoll_create(REQ_CLI_COUNT);
|
||||
if (epfd <= 0) {
|
||||
printf("failed to create epoll\r\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ret = create_socket(ip, port, &ctx[i]);
|
||||
if (ret == -1) {
|
||||
printf("failed to create socket, index: %d\r\n", i);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
events = EPOLLET | EPOLLIN | EPOLLOUT;
|
||||
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
|
||||
if (ret == -1) {
|
||||
printf("failed to add sockfd to epoll, index: %d\r\n", i);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
count = 0;
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
|
||||
if (ret == -1) {
|
||||
if (errno != EINPROGRESS) {
|
||||
printf("connect error, index: %d\r\n", ctx[i].index);
|
||||
(void) del_event(epfd, ctx[i].sockfd);
|
||||
close(ctx[i].sockfd);
|
||||
ctx[i].sockfd = -1;
|
||||
} else {
|
||||
ctx[i].state = connecting;
|
||||
count++;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
ctx[i].state = connected;
|
||||
count++;
|
||||
}
|
||||
|
||||
printf("clients: %d\r\n", count);
|
||||
|
||||
while (count > 0) {
|
||||
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0);
|
||||
if (n == -1) {
|
||||
if (errno != EINTR) {
|
||||
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < n; i++) {
|
||||
if (evs[i].events & EPOLLERR) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
printf("event error, index: %d\r\n", pctx->index);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
} else if (evs[i].events & EPOLLIN) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
if (pctx->state == connecting) {
|
||||
ret = proc_pending_error(pctx);
|
||||
if (ret == 0) {
|
||||
printf("client connected, index: %d\r\n", pctx->index);
|
||||
pctx->state = connected;
|
||||
} else {
|
||||
printf("client connect failed, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
|
||||
if (nrecv == -1) {
|
||||
if (errno != EAGAIN && errno != EINTR) {
|
||||
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
|
||||
break;
|
||||
} else if (nrecv == 0) {
|
||||
printf("peer closed connection, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
break;
|
||||
}
|
||||
|
||||
pctx->nrecv += nrecv;
|
||||
if (pctx->nrecv > 12) {
|
||||
if (pctx->error == false && pctx->success == false) {
|
||||
str = recv_buf[pctx->index] + 9;
|
||||
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
|
||||
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
|
||||
pctx->error = true;
|
||||
} else {
|
||||
printf("response ok, index: %d\r\n", pctx->index);
|
||||
pctx->success = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (evs[i].events & EPOLLOUT) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
if (pctx->state == connecting) {
|
||||
ret = proc_pending_error(pctx);
|
||||
if (ret == 0) {
|
||||
printf("client connected, index: %d\r\n", pctx->index);
|
||||
pctx->state = connected;
|
||||
} else {
|
||||
printf("client connect failed, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
|
||||
if (nsent == -1) {
|
||||
if (errno != EAGAIN && errno != EINTR) {
|
||||
printf("failed to send, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
|
||||
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
|
||||
|
||||
pctx->state = datasent;
|
||||
|
||||
events = EPOLLET | EPOLLIN;
|
||||
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
|
||||
|
||||
break;
|
||||
} else {
|
||||
pctx->nsent += nsent;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
failed:
|
||||
|
||||
if (epfd > 0) {
|
||||
close(epfd);
|
||||
}
|
||||
|
||||
close_sockets(ctx, REQ_CLI_COUNT);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,432 @@
|
|||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <unistd.h>
|
||||
#include <inttypes.h>
|
||||
#include <fcntl.h>
|
||||
#include <netinet/in.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <sys/time.h>
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
|
||||
|
||||
#define RECV_MAX_LINE 2048
|
||||
#define ITEM_MAX_LINE 128
|
||||
#define REQ_MAX_LINE 4096
|
||||
#define REQ_CLI_COUNT 100
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
uninited,
|
||||
connecting,
|
||||
connected,
|
||||
datasent
|
||||
} conn_stat;
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
false,
|
||||
true
|
||||
} bool;
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int sockfd;
|
||||
int index;
|
||||
conn_stat state;
|
||||
size_t nsent;
|
||||
size_t nrecv;
|
||||
size_t nlen;
|
||||
bool error;
|
||||
bool success;
|
||||
struct sockaddr_in serv_addr;
|
||||
} socket_ctx;
|
||||
|
||||
|
||||
int set_nonblocking(int sockfd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
|
||||
if (ret == -1) {
|
||||
printf("failed to fcntl for %d\r\n", sockfd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (ip == NULL || port == 0 || pctx == NULL) {
|
||||
printf("invalid parameter\r\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
if (pctx->sockfd == -1) {
|
||||
printf("failed to create socket\r\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
|
||||
|
||||
pctx->serv_addr.sin_family = AF_INET;
|
||||
pctx->serv_addr.sin_port = htons(port);
|
||||
|
||||
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
|
||||
if (ret <= 0) {
|
||||
printf("inet_pton error, ip: %s\r\n", ip);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = set_nonblocking(pctx->sockfd);
|
||||
if (ret == -1) {
|
||||
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return pctx->sockfd;
|
||||
}
|
||||
|
||||
|
||||
void close_sockets(socket_ctx *pctx, int cnt)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (pctx == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
if (pctx[i].sockfd > 0) {
|
||||
close(pctx[i].sockfd);
|
||||
pctx[i].sockfd = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int proc_pending_error(socket_ctx *ctx)
|
||||
{
|
||||
int ret;
|
||||
int err;
|
||||
socklen_t len;
|
||||
|
||||
if (ctx == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
len = sizeof(int);
|
||||
|
||||
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
|
||||
if (ret == -1) {
|
||||
err = errno;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
printf("failed to connect at index: %d\r\n", ctx->index);
|
||||
|
||||
close(ctx->sockfd);
|
||||
ctx->sockfd = -1;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len)
|
||||
{
|
||||
char req_line[ITEM_MAX_LINE];
|
||||
char req_host[ITEM_MAX_LINE];
|
||||
char req_cont_type[ITEM_MAX_LINE];
|
||||
char req_cont_len[ITEM_MAX_LINE];
|
||||
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
|
||||
|
||||
if (ip == NULL || port == 0 ||
|
||||
url == NULL || url[0] == '\0' ||
|
||||
sql == NULL || sql[0] == '\0' ||
|
||||
req_buf == NULL || len <= 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
|
||||
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
|
||||
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
|
||||
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
|
||||
|
||||
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
|
||||
}
|
||||
|
||||
|
||||
int add_event(int epfd, int sockfd, uint32_t events, void *data)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.data.ptr = data;
|
||||
evs_op.events = events;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int mod_event(int epfd, int sockfd, uint32_t events, void *data)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.data.ptr = data;
|
||||
evs_op.events = events;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int del_event(int epfd, int sockfd)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.events = 0;
|
||||
evs_op.data.ptr = NULL;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int main()
|
||||
{
|
||||
int i;
|
||||
int ret, n, nsent, nrecv;
|
||||
int epfd;
|
||||
uint32_t events;
|
||||
char *str;
|
||||
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
|
||||
char *ip = "127.0.0.1";
|
||||
char *url_prefix = "/rest/sql";
|
||||
char url[ITEM_MAX_LINE];
|
||||
uint16_t port = 6041;
|
||||
struct epoll_event evs[REQ_CLI_COUNT];
|
||||
char sql[REQ_MAX_LINE];
|
||||
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
|
||||
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
|
||||
int count;
|
||||
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ctx[i].sockfd = -1;
|
||||
ctx[i].index = i;
|
||||
ctx[i].state = uninited;
|
||||
ctx[i].nsent = 0;
|
||||
ctx[i].nrecv = 0;
|
||||
ctx[i].error = false;
|
||||
ctx[i].success = false;
|
||||
|
||||
memset(url, 0, ITEM_MAX_LINE);
|
||||
memset(sql, 0, REQ_MAX_LINE);
|
||||
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
|
||||
memset(recv_buf[i], 0, RECV_MAX_LINE);
|
||||
|
||||
snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i);
|
||||
|
||||
snprintf(sql, REQ_MAX_LINE, "select count(*) from tb%d", i);
|
||||
|
||||
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
|
||||
|
||||
ctx[i].nlen = strlen(send_buf[i]);
|
||||
}
|
||||
|
||||
epfd = epoll_create(REQ_CLI_COUNT);
|
||||
if (epfd <= 0) {
|
||||
printf("failed to create epoll\r\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ret = create_socket(ip, port, &ctx[i]);
|
||||
if (ret == -1) {
|
||||
printf("failed to create socket, index: %d\r\n", i);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
events = EPOLLET | EPOLLIN | EPOLLOUT;
|
||||
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
|
||||
if (ret == -1) {
|
||||
printf("failed to add sockfd to epoll, index: %d\r\n", i);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
count = 0;
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
|
||||
if (ret == -1) {
|
||||
if (errno != EINPROGRESS) {
|
||||
printf("connect error, index: %d\r\n", ctx[i].index);
|
||||
(void) del_event(epfd, ctx[i].sockfd);
|
||||
close(ctx[i].sockfd);
|
||||
ctx[i].sockfd = -1;
|
||||
} else {
|
||||
ctx[i].state = connecting;
|
||||
count++;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
ctx[i].state = connected;
|
||||
count++;
|
||||
}
|
||||
|
||||
printf("clients: %d\r\n", count);
|
||||
|
||||
while (count > 0) {
|
||||
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 2);
|
||||
if (n == -1) {
|
||||
if (errno != EINTR) {
|
||||
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < n; i++) {
|
||||
if (evs[i].events & EPOLLERR) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
printf("event error, index: %d\r\n", pctx->index);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
} else if (evs[i].events & EPOLLIN) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
if (pctx->state == connecting) {
|
||||
ret = proc_pending_error(pctx);
|
||||
if (ret == 0) {
|
||||
printf("client connected, index: %d\r\n", pctx->index);
|
||||
pctx->state = connected;
|
||||
} else {
|
||||
printf("client connect failed, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
|
||||
if (nrecv == -1) {
|
||||
if (errno != EAGAIN && errno != EINTR) {
|
||||
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
|
||||
break;
|
||||
} else if (nrecv == 0) {
|
||||
printf("peer closed connection, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
break;
|
||||
}
|
||||
|
||||
pctx->nrecv += nrecv;
|
||||
if (pctx->nrecv > 12) {
|
||||
if (pctx->error == false && pctx->success == false) {
|
||||
str = recv_buf[pctx->index] + 9;
|
||||
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
|
||||
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
|
||||
pctx->error = true;
|
||||
} else {
|
||||
printf("response ok, index: %d\r\n", pctx->index);
|
||||
pctx->success = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (evs[i].events & EPOLLOUT) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
if (pctx->state == connecting) {
|
||||
ret = proc_pending_error(pctx);
|
||||
if (ret == 0) {
|
||||
printf("client connected, index: %d\r\n", pctx->index);
|
||||
pctx->state = connected;
|
||||
} else {
|
||||
printf("client connect failed, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
|
||||
if (nsent == -1) {
|
||||
if (errno != EAGAIN && errno != EINTR) {
|
||||
printf("failed to send, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
|
||||
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
|
||||
|
||||
pctx->state = datasent;
|
||||
|
||||
events = EPOLLET | EPOLLIN;
|
||||
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
|
||||
|
||||
break;
|
||||
} else {
|
||||
pctx->nsent += nsent;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
failed:
|
||||
|
||||
if (epfd > 0) {
|
||||
close(epfd);
|
||||
}
|
||||
|
||||
close_sockets(ctx, REQ_CLI_COUNT);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,430 @@
|
|||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <netinet/in.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
|
||||
|
||||
#define RECV_MAX_LINE 2048
|
||||
#define ITEM_MAX_LINE 128
|
||||
#define REQ_MAX_LINE 2048
|
||||
#define REQ_CLI_COUNT 100
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
uninited,
|
||||
connecting,
|
||||
connected,
|
||||
datasent
|
||||
} conn_stat;
|
||||
|
||||
|
||||
typedef enum
|
||||
{
|
||||
false,
|
||||
true
|
||||
} bool;
|
||||
|
||||
|
||||
typedef unsigned short u16_t;
|
||||
typedef unsigned int u32_t;
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int sockfd;
|
||||
int index;
|
||||
conn_stat state;
|
||||
size_t nsent;
|
||||
size_t nrecv;
|
||||
size_t nlen;
|
||||
bool error;
|
||||
bool success;
|
||||
struct sockaddr_in serv_addr;
|
||||
} socket_ctx;
|
||||
|
||||
|
||||
int set_nonblocking(int sockfd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK);
|
||||
if (ret == -1) {
|
||||
printf("failed to fcntl for %d\r\n", sockfd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int create_socket(const char *ip, const u16_t port, socket_ctx *pctx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (ip == NULL || port == 0 || pctx == NULL) {
|
||||
printf("invalid parameter\r\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
if (pctx->sockfd == -1) {
|
||||
printf("failed to create socket\r\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
bzero(&pctx->serv_addr, sizeof(struct sockaddr_in));
|
||||
|
||||
pctx->serv_addr.sin_family = AF_INET;
|
||||
pctx->serv_addr.sin_port = htons(port);
|
||||
|
||||
ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr);
|
||||
if (ret <= 0) {
|
||||
printf("inet_pton error, ip: %s\r\n", ip);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = set_nonblocking(pctx->sockfd);
|
||||
if (ret == -1) {
|
||||
printf("failed to set %d as nonblocking\r\n", pctx->sockfd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return pctx->sockfd;
|
||||
}
|
||||
|
||||
|
||||
void close_sockets(socket_ctx *pctx, int cnt)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (pctx == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
if (pctx[i].sockfd > 0) {
|
||||
close(pctx[i].sockfd);
|
||||
pctx[i].sockfd = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int proc_pending_error(socket_ctx *ctx)
|
||||
{
|
||||
int ret;
|
||||
int err;
|
||||
socklen_t len;
|
||||
|
||||
if (ctx == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
len = sizeof(int);
|
||||
|
||||
ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len);
|
||||
if (ret == -1) {
|
||||
err = errno;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
printf("failed to connect at index: %d\r\n", ctx->index);
|
||||
|
||||
close(ctx->sockfd);
|
||||
ctx->sockfd = -1;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len)
|
||||
{
|
||||
char req_line[ITEM_MAX_LINE];
|
||||
char req_host[ITEM_MAX_LINE];
|
||||
char req_cont_type[ITEM_MAX_LINE];
|
||||
char req_cont_len[ITEM_MAX_LINE];
|
||||
const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n";
|
||||
|
||||
if (ip == NULL || port == 0 ||
|
||||
url == NULL || url[0] == '\0' ||
|
||||
sql == NULL || sql[0] == '\0' ||
|
||||
req_buf == NULL || len <= 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url);
|
||||
snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port);
|
||||
snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain");
|
||||
snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql));
|
||||
|
||||
snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql);
|
||||
}
|
||||
|
||||
|
||||
int add_event(int epfd, int sockfd, u32_t events, void *data)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.data.ptr = data;
|
||||
evs_op.events = events;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int mod_event(int epfd, int sockfd, u32_t events, void *data)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.data.ptr = data;
|
||||
evs_op.events = events;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int del_event(int epfd, int sockfd)
|
||||
{
|
||||
struct epoll_event evs_op;
|
||||
|
||||
evs_op.events = 0;
|
||||
evs_op.data.ptr = NULL;
|
||||
|
||||
return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op);
|
||||
}
|
||||
|
||||
|
||||
int main()
|
||||
{
|
||||
int i;
|
||||
int ret, n, nsent, nrecv;
|
||||
int epfd;
|
||||
u32_t events;
|
||||
char *str;
|
||||
socket_ctx *pctx, ctx[REQ_CLI_COUNT];
|
||||
char *ip = "127.0.0.1";
|
||||
char *url = "/rest/sql";
|
||||
u16_t port = 6041;
|
||||
struct epoll_event evs[REQ_CLI_COUNT];
|
||||
char sql[REQ_MAX_LINE];
|
||||
char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE];
|
||||
char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE];
|
||||
int count;
|
||||
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ctx[i].sockfd = -1;
|
||||
ctx[i].index = i;
|
||||
ctx[i].state = uninited;
|
||||
ctx[i].nsent = 0;
|
||||
ctx[i].nrecv = 0;
|
||||
ctx[i].error = false;
|
||||
ctx[i].success = false;
|
||||
|
||||
memset(sql, 0, REQ_MAX_LINE);
|
||||
memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
|
||||
memset(recv_buf[i], 0, RECV_MAX_LINE);
|
||||
|
||||
snprintf(sql, REQ_MAX_LINE, "use db%d", i);
|
||||
|
||||
build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE);
|
||||
|
||||
ctx[i].nlen = strlen(send_buf[i]);
|
||||
}
|
||||
|
||||
epfd = epoll_create(REQ_CLI_COUNT);
|
||||
if (epfd <= 0) {
|
||||
printf("failed to create epoll\r\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ret = create_socket(ip, port, &ctx[i]);
|
||||
if (ret == -1) {
|
||||
printf("failed to create socket, index: %d\r\n", i);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
events = EPOLLET | EPOLLIN | EPOLLOUT;
|
||||
ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]);
|
||||
if (ret == -1) {
|
||||
printf("failed to add sockfd to epoll, index: %d\r\n", i);
|
||||
goto failed;
|
||||
}
|
||||
}
|
||||
|
||||
count = 0;
|
||||
|
||||
for (i = 0; i < REQ_CLI_COUNT; i++) {
|
||||
ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr));
|
||||
if (ret == -1) {
|
||||
if (errno != EINPROGRESS) {
|
||||
printf("connect error, index: %d\r\n", ctx[i].index);
|
||||
(void) del_event(epfd, ctx[i].sockfd);
|
||||
close(ctx[i].sockfd);
|
||||
ctx[i].sockfd = -1;
|
||||
} else {
|
||||
ctx[i].state = connecting;
|
||||
count++;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
ctx[i].state = connected;
|
||||
count++;
|
||||
}
|
||||
|
||||
printf("clients: %d\r\n", count);
|
||||
|
||||
while (count > 0) {
|
||||
n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 2);
|
||||
if (n == -1) {
|
||||
if (errno != EINTR) {
|
||||
printf("epoll_wait error, reason: %s\r\n", strerror(errno));
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < n; i++) {
|
||||
if (evs[i].events & EPOLLERR) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
printf("event error, index: %d\r\n", pctx->index);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
} else if (evs[i].events & EPOLLIN) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
if (pctx->state == connecting) {
|
||||
ret = proc_pending_error(pctx);
|
||||
if (ret == 0) {
|
||||
printf("client connected, index: %d\r\n", pctx->index);
|
||||
pctx->state = connected;
|
||||
} else {
|
||||
printf("client connect failed, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0);
|
||||
if (nrecv == -1) {
|
||||
if (errno != EAGAIN && errno != EINTR) {
|
||||
printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno));
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
|
||||
break;
|
||||
} else if (nrecv == 0) {
|
||||
printf("peer closed connection, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
break;
|
||||
}
|
||||
|
||||
pctx->nrecv += nrecv;
|
||||
if (pctx->nrecv > 12) {
|
||||
if (pctx->error == false && pctx->success == false) {
|
||||
str = recv_buf[pctx->index] + 9;
|
||||
if (str[0] != '2' || str[1] != '0' || str[2] != '0') {
|
||||
printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]);
|
||||
pctx->error = true;
|
||||
} else {
|
||||
printf("response ok, index: %d\r\n", pctx->index);
|
||||
pctx->success = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (evs[i].events & EPOLLOUT) {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
if (pctx->state == connecting) {
|
||||
ret = proc_pending_error(pctx);
|
||||
if (ret == 0) {
|
||||
printf("client connected, index: %d\r\n", pctx->index);
|
||||
pctx->state = connected;
|
||||
} else {
|
||||
printf("client connect failed, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for ( ;; ) {
|
||||
nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0);
|
||||
if (nsent == -1) {
|
||||
if (errno != EAGAIN && errno != EINTR) {
|
||||
printf("failed to send, index: %d\r\n", pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (nsent == (int) (pctx->nlen - pctx->nsent)) {
|
||||
printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index);
|
||||
|
||||
pctx->state = datasent;
|
||||
|
||||
events = EPOLLET | EPOLLIN;
|
||||
(void) mod_event(epfd, pctx->sockfd, events, (void *)pctx);
|
||||
|
||||
break;
|
||||
} else {
|
||||
pctx->nsent += nsent;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pctx = (socket_ctx *) evs[i].data.ptr;
|
||||
printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index);
|
||||
(void) del_event(epfd, pctx->sockfd);
|
||||
close(pctx->sockfd);
|
||||
pctx->sockfd = -1;
|
||||
count--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
failed:
|
||||
|
||||
if (epfd > 0) {
|
||||
close(epfd);
|
||||
}
|
||||
|
||||
close_sockets(ctx, REQ_CLI_COUNT);
|
||||
|
||||
return 0;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue