Merge branch 'develop' into feature/slguan

This commit is contained in:
slguan 2019-12-05 16:50:34 +08:00
commit 6fb9ee3395
31 changed files with 831 additions and 369 deletions

View File

@ -162,7 +162,7 @@ for irow in range(1,11):
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
</code></pre>
<li>写入数据</li>
<li>查询数据</li>
<code><pre>
c1.execute('select * from tb')
# 拉取查询结果

View File

@ -273,29 +273,93 @@ All the error codes and error messages can be found in `TSDBError.java` . For a
## Python Connector
### Install TDengine Python client
### Pre-requirement
* TDengine installed, TDengine-client installed if on Windows
* python 2.7 or >= 3.4
* pip installed
Users can find python client packages in our source code directory _src/connector/python_. There are two directories corresponding two python versions. Please choose the correct package to install. Users can use _pip_ command to install:
### Installation
#### Linux
Users can find python client packages in our source code directory _src/connector/python_. There are two directories corresponding to two python versions. Please choose the correct package to install. Users can use _pip_ command to install:
```cmd
pip install src/connector/python/[linux|Windows]/python2/
pip install src/connector/python/linux/python3/
```
or
```
pip install src/connector/python/[linux|Windows]/python3/
pip install src/connector/python/linux/python2/
```
#### Windows
Assumed the Windows TDengine client has been installed , copy the file "C:\TDengine\driver\taos.dll" to the folder "C:\windows\system32", and then enter the _cmd_ Windows command interface
```
cd C:\TDengine\connector\python\windows
pip install python3\
```
or
```
cd C:\TDengine\connector\python\windows
pip install python2\
```
*If _pip_ command is not installed on the system, users can choose to install pip or just copy the _taos_ directory in the python client directory to the application directory to use.
If _pip_ command is not installed on the system, users can choose to install pip or just copy the _taos_ directory in the python client directory to the application directory to use.
### Python client interfaces
To use TDengine Python client, import TDengine module at first:
### Usage
#### Examples
* import TDengine module
```python
import taos
```
* get the connection
```python
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
```
*<em>host</em> is the IP of TDengine server, and <em>config</em> is the directory where exists the TDengine client configure file
* insert records into the database
```python
import datetime
# create a database
c1.execute('create database db')
c1.execute('use db')
# create a table
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# insert a record
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# insert multiple records in a batch
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
```
* query the database
```python
c1.execute('select * from tb')
# fetch all returned results
data = c1.fetchall()
# data is a list of returned rows with each row being a tuple
numOfRows = c1.rowcount
numOfCols = len(c1.description)
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
# use the cursor as an iterator to retrieve all returned results
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
```
* close the connection
```python
c1.close()
conn.close()
```
#### Help information
Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below:

View File

@ -2,15 +2,15 @@
## 文件目录结构
安装TDengine后,默认会在操作系统中生成下列目录或文件:
安装TDengine的过程中,安装程序将在操作系统中创建以下目录或文件:
| 目录/文件 | 说明 |
| ---------------------- | :------------------------------------------------|
| /etc/taos/taos.cfg | TDengine默认[配置文件] |
| /usr/local/taos/driver | TDengine动态链接库目录 |
| /var/lib/taos | TDengine默认数据文件目录,可通过[配置文件]修改位置. |
| /var/log/taos | TDengine默认日志文件目录,可通过[配置文件]修改位置 |
| /usr/local/taos/bin | TDengine可执行文件目录 |
| /etc/taos/taos.cfg | 默认[配置文件] |
| /usr/local/taos/driver | 动态链接库目录 |
| /var/lib/taos | 默认数据文件目录,可通过[配置文件]修改位置. |
| /var/log/taos | 默认日志文件目录,可通过[配置文件]修改位置 |
| /usr/local/taos/bin | 可执行文件目录 |
### 可执行文件
@ -25,22 +25,78 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
## 服务端配置
TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修改配置参数以满足不同场景的需求。配置文件的缺省位置在/etc/taos目录可以通过taosd命令行执行参数-c指定配置文件目录。比如taosd -c /home/user来指定配置文件位于/home/user这个目录。
TDengine系统后台服务程序是`taosd`,其启动时候读取的配置文件缺省目录是`/etc/taos`。可以通过命令行执行参数-c指定配置文件目录比如
```
taosd -c /home/user
```
指定`taosd`启动的时候读取`/home/user`目录下的配置文件taos.cfg。
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节。**注意:配置修改后,需要重启*taosd*服务才能生效。**
- internalIp: 对外提供服务的IP地址默认取第一个IP地址
- mgmtShellPort管理节点与客户端通信使用的TCP/UDP端口号默认值是6030。此端口号在内向后连续的5个端口都会被UDP通信占用即UDP占用[6030-6034]同时TCP通信也会使用端口[6030]。
- vnodeShellPort数据节点与客户端通信使用的TCP/UDP端口号默认值是6035。此端口号在内向后连续的5个端口都会被UDP通信占用即UDP占用[6035-6039]同时TCP通信也会使用端口[6035]
- httpPort数据节点对外提供RESTful服务使用TCP端口号[6020]
- dataDir: 数据文件目录,缺省是/var/lib/taos
- maxUsers用户的最大数量
- maxDbs数据库的最大数量
- maxTables数据表的最大数量
- enableMonitor: 系统监测标志位0关闭1打开
- logDir: 日志文件目录,缺省是/var/log/taos
- numOfLogLines日志文件的最大行数
- debugFlag: 系统debug日志开关131仅错误和报警信息135所有
**internalIp**
- 默认值操作配置的IP地址列表中的第一个IP地址
对外提供服务的IP地址。
**mgmtShellPort**
- 默认值: _6030_
数据库服务中管理节点与客户端通信使用的TCP/UDP端口号。
> 此端口号在内向后连续的5个端口都会用于UDP通信即使用的端口是 _6030_ - _6034_ 。此外TCP还会使用端口 _6030_
**vnodeShellPort**
- 默认值: _6035_
数据节点与客户端通信使用的TCP/UDP端口号。
> 此端口号在内向后连续的5个端口都会用于UDP通信即使用的端口是 _6035_ - _6039_ 。此外TCP还会使用端口 _6035_
**httpPort**
- 默认值: _6020_
RESTful服务使用的端口号所有的HTTP请求TCP都需要向该接口发起查询/写入请求。
**dataDir**
- 默认值:/var/lib/taos
数据文件目录,所有的数据文件都将写入该目录。
**logDir**
- 默认值:/var/log/taos
日志文件目录,客户端和服务器的运行日志将写入该目录。
**maxUsers**
- 默认值10,000
系统允许创建用户数量的上限
**maxDbs**
- 默认值1,000
系统允许的创建数据库的上限
**maxTables**
- 默认值650,000
系统允许创建数据表的上限。
>系统能够创建的表受到多种因素的限制,单纯地增大该参数并不能直接增加系统能够创建的表数量。例如,由于每个表创建均需要消耗一定量的缓存空间,系统可用内存一定的情况下,创建表的总数的上限是一个固定的值。
**monitor**
- 默认值1激活状态
服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况包括CPU、内存、硬盘、网络带宽、HTTP请求量的监控记录记录信息存储在`LOG`库中。0表示关闭监控服务1表示激活监控服务。
**numOfLogLines**
- 默认值10,000,000
单个日志文件允许的最大行数10,000,000行
**debugFlag**
- 默认值131仅输出错误和警告信息
系统(服务端和客户端)运行日志开关:
- 131 仅输出错误和警告信息
- 135 输入错误ERROR、警告WARN、信息Info
不同应用场景的数据往往具有不同的数据特征比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率TDengine提供如下存储相关的系统配置参数
@ -66,19 +122,139 @@ TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修
## 客户端配置
TDengine系统的前台交互客户端应用程序为taos它与taosd共享同一个配置文件taos.cfg。运行taos时使用参数-c指定配置文件目录如taos -c /home/cfg表示使用/home/cfg/目录下的taos.cfg配置文件中的参数缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。
TDengine系统的前台交互客户端应用程序为taosWindows平台上为taos.exe。与服务端程序一样也可以通过设置taos.cfg来配置`taos`启动和运行的配置项。启动的时候如果不指定taos加载配置文件路径默认读取`/etc/taos/`路径下的`taos.cfg`文件。指定配置文件来启动`taos`的命令如下:
客户端配置参数列表及解释
```
taos -c /home/cfg/
```
**注意:启动设置的是配置文件所在目录,而不是配置文件本身**
- masterIP客户端默认发起请求的服务器的IP地址
- charset指明客户端所使用的字符集默认值为UTF-8。TDengine存储nchar类型数据时使用的是unicode存储因此客户端需要告知服务自己所使用的字符集也即客户端所在系统的字符集。
- locale设置系统语言环境。Linux上客户端与服务端共享
- defaultUser默认登录用户默认值root
- defaultPass默认登录密码默认值taosdata
如果`/home/cfg/`目录下没有配置文件,程序会继续启动并打印如下告警信息:
```markdown
Welcome to the TDengine shell from linux, client version:1.6.4.0
option file:/home/cfg/taos.cfg not found, all options are set to system default
```
更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。
TCP/UDP端口以及日志的配置参数与server的配置参数完全一样。
客户端配置参数说明
启动taos时你也可以从命令行指定IP地址、端口号用户名和密码否则就从taos.cfg读取。
**masterIP**
- 默认值127.0.0.1
客户端连接的TDengine服务器IP地址如果不设置默认连接127.0.0.1的节点。以下两个命令等效:
```
taos
taos -h 127.0.0.1
```
其中的IP地址是从配置文件中读取的masterIP的值。
**locale**
- 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
TDengine为存储中文、日文、韩文等非ASCII编码的宽字符提供一种专门的字段类型`nchar`。写入`nchar`字段的数据将统一采用`UCS4-LE`格式进行编码并发送到服务器。需要注意的是,**编码正确性**是客户端来保证。因此,如果用户想要正常使用`nchar`字段来存储诸如中文、日文、韩文等非ASCII字符需要正确设置客户端的编码格式。
客户端的输入的字符均采用操作系统当前默认的编码格式在Linux系统上多为`UTF-8`,部分中文系统编码则可能是`GB18030`或`GBK`等。在docker环境中默认的编码是`POSIX`。在中文版Windows系统中编码则是`CP936`。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证`nchar`中的数据正确转换为`UCS4-LE`编码格式。
在 Linux 中 locale 的命名规则为:
`<语言>_<地区>.<字符集编码>`
如:`zh_CN.UTF-8`zh代表中文CN代表大陆地区UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与Mac OSX系统可以通过设置locale来确定系统的字符编码由于Windows使用的locale中不是POSIX标准的locale格式因此在Windows下需要采用另一个配置参数`charset`来指定字符编码。在Linux系统中也可以使用charset来指定字符编码。
**charset**
- 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
如果配置文件中不设置`charset`在Linux系统中taos在启动时候自动读取系统当前的locale信息并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败则尝试读取charset配置如果读取charset配置也失败**则中断启动过程**。
在Linux系统中locale信息包含了字符编码信息因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如
```
locale zh_CN.UTF-8
```
在Windows系统中无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息`taos`默认设置为字符编码为`CP936`。其等效在配置文件中添加如下配置:
```
charset CP936
```
如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。
在Linux系统中如果用户同时设置了locale和字符集编码charset并且locale和charset的不一致后设置的值将覆盖前面设置的值。
```
locale zh_CN.UTF-8
charset GBK
```
则`charset`的有效值是`GBK`。
```
charset GBK
locale zh_CN.UTF-8
```
`charset`的有效值是`UTF-8`。
**sockettype**
- 默认值UDP
客户端连接服务端的套接字的方式,可以使用`UDP`和`TCP`两种配置。
在客户端和服务端之间的通讯需要经过恶劣的网络环境下如公共网络、互联网、客户端与数据库服务端连接不稳定由于MTU的问题导致UDP丢包的情况下可以将连接的套接字类型调整为`TCP`
>注意:客户端套接字的类型需要和服务端的套接字类型相同,否则无法连接数据库。
**compressMsgSize**
- 默认值:-1不压缩
客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值,默认值为-1不压缩。如果要压缩消息建议设置为64330字节即大于64330字节的消息体才进行压缩。在配置文件中增加如下配置项即可
```
compressMsgSize 64330
```
如果配置项设置为0`compressMsgSize 0`表示对所有的消息均进行压缩。
**timezone**
- 默认值:从系统中动态获取当前的时区设置
客户端运行系统所在的时区。为应对多时区的数据写入和查询问题TDengine采用Unix时间戳([Unix Timestamp](https://en.wikipedia.org/wiki/Unix_time))来记录和存储时间戳。Unix时间戳的特点决定了任一时刻不论在任何时区产生的时间戳均一致。需要注意的是Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的Unix时间戳需要设置正确的时区。
在Linux系统中客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如
```
timezone UTC-8
timezone GMT-8
timezone Asia/Shanghai
```
均是合法的设置东八区时区的格式。
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容时间戳字符串、关键词`now`的解析)产生影响。例如:
```
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
```
在东八区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554955268000;
```
在UTC时区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554984068000;
```
为了避免使用字符串时间格式带来的不确定性也可以直接使用Unix时间戳。此外还可以在SQL语句中使用带有时区的时间戳字符串例如RFC3339格式的时间戳字符串`2013-04-12T15:52:01.123+08:00`或者ISO-8601格式时间戳字符串`2013-04-12T15:52:01.123+0800`。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。
**defaultUser**
- 默认值root
登录用户名,客户端登录的时候,如果不指定用户名,则自动使用该用户名登录。默认情况下,以下的两个命令等效
```
taos
taos -u root
```
用户名为从配置中读取的`defaultUser`配置项。如果更改`defaultUser abc`,则以下两个命令等效:
```
taos
taos -u abc
```
**defaultPass**
- 默认值taosdata
登录用户名,客户端登录的时候,如果不指定密码,则自动使用该密码登录。默认情况下,以下的两个命令等效
```
taos
taos -ptaosdata
```
TCP/UDP端口以及日志的配置参数与server的配置参数完全一样。使用命令`taos -?` 可查看`taos`允许的可选项。
## 用户管理
@ -191,6 +367,6 @@ KILL STREAM <stream-id>
## 系统监控
TDengine启动后会自动创建一个监测数据库SYS并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作比如登录、创建、删除数据库等日志以及各种错误报警信息记录下来存放在SYS库里。系统管理员可以从CLI直接查看这个数据库也可以在WEB通过图形化界面查看这些监测信息
TDengine启动后会自动创建一个监测数据库`LOG`并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作比如登录、创建、删除数据库等日志以及各种错误报警信息记录下来存放在`LOG`库里。系统管理员可以通过客户端程序查看记录库中的运行负载信息,(在企业版中)还可以通过浏览器查看数据的图标可视化结果
这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。
这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项`monitor`将其关闭或打开。

View File

@ -265,26 +265,97 @@ public Connection getConn() throws Exception{
## Python Connector
### 安装准备
* 已安装TDengine, 如果客户端在Windows上需要安装Windows 版本的TDengine客户端
* 已安装python 2.7 or >= 3.4
* 已安装pip
### Python客户端安装
#### Linux
用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包。用户可以通过pip命令安装
`pip install src/connector/python/[linux|windows]/python2/`
`pip install src/connector/python/linux/python2/`
`pip install src/connector/python/[linux|windows]/python3/`
如果机器上没有pip命令用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。
对于windows 客户端安装TDengine windows 客户端后将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。所有TDengine的连接器均需依赖taos.dll。
### Python客户端接口
在使用TDengine的python接口时需导入TDengine客户端模块
`pip install src/connector/python/linux/python3/`
#### Windows
在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos <em>cmd</em> 命令行界面
```cmd
cd C:\TDengine\connector\python\windows
pip install python2\
```
```cmd
cd C:\TDengine\connector\python\windows
pip install python3\
```
*如果机器上没有pip命令用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。
对于windows 客户端安装TDengine windows 客户端后将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。
### 使用
#### 代码示例
* 导入TDengine客户端模块
```python
import taos
```
* 获取连接
```python
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
```
*<em>host</em> 是TDengine 服务端所有IP, <em>config</em> 为客户端配置文件所在目录
* 写入数据
```python
import datetime
# 创建数据库
c1.execute('create database db')
c1.execute('use db')
# 建表
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# 插入数据
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# 批量插入数据
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
```
* 查询数据
```python
c1.execute('select * from tb')
# 拉取查询结果
data = c1.fetchall()
# 返回的结果是一个列表,每一行构成列表的一个元素
numOfRows = c1.rowcount
numOfCols = len(c1.description)
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
# 直接使用cursor 循环拉取查询结果
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
```
* 关闭连接
```python
c1.close()
conn.close()
```
#### 帮助信息
用户可通过python的帮助信息直接查看模块的使用信息或者参考code/examples/python中的示例程序。以下为部分常用类和方法
@ -562,6 +633,142 @@ taosSql驱动包内采用cgo模式调用了TDengine的C/C++同步接口,与
在创建好了数据库后就可以开始创建表和写入查询数据了。这些操作的基本思路都是首先组装SQL语句然后调用db.Exec执行并检查错误信息和执行相应的处理。可以参考上面的样例代码
## Node.js Connector
TDengine 同时也提供了node.js 的连接器。用户可以通过[npm](https://www.npmjs.com/)来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。[具体安装步骤如下](https://github.com/taosdata/tdengine/tree/master/src/connector/nodejs)
首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器.
```cmd
npm install td-connector
```
我们建议用户使用npm 安装node.js连接器。如果您没有安装npm, 可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下
To interact with TDengine, we make use of the [node-gyp](https://github.com/nodejs/node-gyp) library. To install, you will need to install the following depending on platform (the following instructions are quoted from node-gyp)我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件:
### Unix
- `python` (建议`v2.7` , `v3.x.x` 目前还不支持)
- `make`
- c语言编译器比如[GCC](https://gcc.gnu.org)
### macOS
- `python` (建议`v2.7` , `v3.x.x` 目前还不支持)
- Xcode
- 然后通过Xcode安装
```
Command Line Tools
```
```
Xcode -> Preferences -> Locations
```
目录下可以找到这个工具。或者在终端里执行
```
xcode-select --install
```
- 该步执行后 `gcc``make`就被安装上了
### Windows
#### 安装方法1
使用微软的[windows-build-tools](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具
#### 安装方法2
手动安装以下工具:
- 安装Visual Studio相关[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community)
- 安装 [Python 2.7](https://www.python.org/downloads/) (`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7`
- 进入`cmd`命令行界面, `npm config set msvs_version 2017`
如果以上步骤不能成功执行, 可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules)
如果在Windows 10 ARM 上使用ARM64 Node.js, 还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64".
### 使用方法
(http://docs.taosdata.com/node)
以下是node.js 连接器的一些基本使用方法,详细的使用方法可参考[该文档](http://docs.taosdata.com/node)
#### 连接
使用node.js连接器时必须先require```td-connector```,然后使用 ```taos.connect``` 函数。```taos.connect``` 函数必须提供的参数是```host```,其它参数在没有提供的情况下会使用如下的默认值。最后需要初始化```cursor``` 来和TDengine服务端通信
```javascript
const taos = require('td-connector');
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
var cursor = conn.cursor(); // Initializing a new cursor
```
关闭连接可执行
```javascript
conn.close();
```
#### 查询
可通过 ```cursor.query``` 函数来查询数据库。
```javascript
var query = cursor.query('show databases;')
```
查询的结果可以通过 ```query.execute()``` 函数获取并打印出来
```javascript
var promise = query.execute();
promise.then(function(result) {
result.pretty();
});
```
格式化查询语句还可以使用```query```的```bind```方法。如下面的示例:```query```会自动将提供的数值填入查询语句的```?```里。
```javascript
var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5);
query.execute().then(function(result) {
result.pretty();
})
```
如果在```query```语句里提供第二个参数并设为```true```也可以立即获取查询结果。如下:
```javascript
var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true)
promise.then(function(result) {
result.pretty();
})
```
#### 异步函数
异步查询数据库的操作和上面类似,只需要在`cursor.execute`, `TaosQuery.execute`等函数后面加上`_a`。
```javascript
var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a()
var promise2 = cursor.query('select count(*), avg(v1), avg(v2) from meter2;').execute_a();
promise1.then(function(result) {
result.pretty();
})
promise2.then(function(result) {
result.pretty();
})
```
### 示例
[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js)提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例
[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`.
## CSharp Connector
在Windows系统上C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作后续版本将提供ORMdapper框架驱动。
@ -672,6 +879,3 @@ TDengine在Window系统上提供的API与Linux系统是相同的 应用程序
+ 将Windows开发包(taos.dll)放置到system32目录下。

View File

@ -100,6 +100,9 @@
# default system charset
# charset UTF-8
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# enable/disable commit log
# clog 1

View File

@ -17,7 +17,6 @@ top_dir="$(readlink -m ${script_dir}/../..)"
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
community_dir="${script_dir}/../../../community/src"
#package_name='linux'
install_dir="${release_dir}/TDengine-client-${version}"
@ -25,7 +24,7 @@ install_dir="${release_dir}/TDengine-client-${version}"
# Directories and files.
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh"
lib_files="${build_dir}/lib/libtaos.so.${version}"
header_files="${community_dir}/inc/taos.h ${community_dir}/inc/taoserror.h"
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
cfg_dir="${top_dir}/packaging/cfg"
install_files="${script_dir}/install_client.sh"
@ -55,7 +54,7 @@ mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
# Copy connector
connector_dir="${community_dir}/connector"
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp -r ${connector_dir}/grafana ${install_dir}/connector/

View File

@ -107,14 +107,6 @@ void tscAddSpecialColumnForSelect(SSqlCmd* pCmd, int32_t outputColIndex, int16_t
void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex, int32_t tableIndex);
//TODO refactor, remove
void SStringFree(SString* str);
void SStringCopy(SString* pDest, const SString* pSrc);
SString SStringCreate(const char* str);
int32_t SStringAlloc(SString* pStr, int32_t size);
int32_t SStringEnsureRemain(SString* pStr, int32_t size);
int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex);
void tscClearInterpInfo(SSqlCmd* pCmd);
@ -226,7 +218,7 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIn
void doAddGroupColumnForSubquery(SSqlCmd* pCmd, int32_t tagIndex);
int16_t tscGetJoinTagColIndexByUid(SSqlCmd* pCmd, uint64_t uid);
int16_t tscGetJoinTagColIndexByUid(STagCond* pTagCond, uint64_t uid);
TAOS* taos_connect_a(char* ip, char* user, char* pass, char* db, uint16_t port, void (*fp)(void*, TAOS_RES*, int),
void* param, void** taos);

View File

@ -188,7 +188,7 @@ typedef struct SString {
typedef struct SCond {
uint64_t uid;
SString cond;
char* cond;
} SCond;
typedef struct SJoinNode {

View File

@ -51,7 +51,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *,
}
int32_t sqlLen = strlen(sqlstr);
if (sqlLen > TSDB_MAX_SQL_LEN) {
if (sqlLen > tsMaxSQLStringLen) {
tscError("sql string too long");
tscQueueAsyncError(fp, param);
return;

View File

@ -307,7 +307,7 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) {
SSqlExpr* pExpr = tscSqlExprGet(&pNew->cmd, 0);
assert(pNew->cmd.tagCond.joinInfo.hasJoin);
int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd, pMeterMetaInfo->pMeterMeta->uid);
int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd.tagCond, pMeterMetaInfo->pMeterMeta->uid);
pExpr->param[0].i64Key = tagColIndex;
pExpr->numOfParams = 1;

View File

@ -21,6 +21,7 @@
#include "taosmsg.h"
#include "tstoken.h"
#include "ttime.h"
#include "tstrbuild.h"
#include "tscUtil.h"
#include "tschemautil.h"
@ -3103,26 +3104,23 @@ static int32_t optrToString(tSQLExpr* pExpr, char** exprString) {
return TSDB_CODE_SUCCESS;
}
static int32_t tablenameListToString(tSQLExpr* pExpr, char* str) {
static int32_t tablenameListToString(tSQLExpr* pExpr, /*char* str*/SStringBuilder* sb) {
tSQLExprList* pList = pExpr->pParam;
if (pList->nExpr <= 0) {
return TSDB_CODE_INVALID_SQL;
}
if (pList->nExpr > 0) {
strcpy(str, QUERY_COND_REL_PREFIX_IN);
str += QUERY_COND_REL_PREFIX_IN_LEN;
taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN);
}
int32_t len = 0;
for (int32_t i = 0; i < pList->nExpr; ++i) {
tSQLExpr* pSub = pList->a[i].pNode;
strncpy(str + len, pSub->val.pz, pSub->val.nLen);
len += pSub->val.nLen;
taosStringBuilderAppendStringLen(sb, pSub->val.pz, pSub->val.nLen);
if (i < pList->nExpr - 1) {
str[len++] = TBNAME_LIST_SEP[0];
taosStringBuilderAppendString(sb, TBNAME_LIST_SEP);
}
if (pSub->val.nLen <= 0 || pSub->val.nLen > TSDB_METER_NAME_LEN) {
@ -3133,11 +3131,9 @@ static int32_t tablenameListToString(tSQLExpr* pExpr, char* str) {
return TSDB_CODE_SUCCESS;
}
static int32_t tablenameCondToString(tSQLExpr* pExpr, char* str) {
strcpy(str, QUERY_COND_REL_PREFIX_LIKE);
str += strlen(QUERY_COND_REL_PREFIX_LIKE);
strcpy(str, pExpr->val.pz);
static int32_t tablenameCondToString(tSQLExpr* pExpr, /*char* str*/SStringBuilder* sb) {
taosStringBuilderAppendStringLen(sb, QUERY_COND_REL_PREFIX_LIKE, QUERY_COND_REL_PREFIX_LIKE_LEN);
taosStringBuilderAppendString(sb, pExpr->val.pz);
return TSDB_CODE_SUCCESS;
}
@ -3241,7 +3237,7 @@ static int32_t getTagCondString(SSqlCmd* pCmd, tSQLExpr* pExpr, char** str) {
return tSQLExprLeafToString(pExpr, true, str);
}
static int32_t getTablenameCond(SSqlCmd* pCmd, tSQLExpr* pTableCond, char* str) {
static int32_t getTablenameCond(SSqlCmd* pCmd, tSQLExpr* pTableCond, /*char* str*/SStringBuilder* sb) {
const char* msg0 = "invalid table name list";
if (pTableCond == NULL) {
@ -3258,9 +3254,9 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, tSQLExpr* pTableCond, char* str)
int32_t ret = TSDB_CODE_SUCCESS;
if (pTableCond->nSQLOptr == TK_IN) {
ret = tablenameListToString(pRight, str);
ret = tablenameListToString(pRight, sb);
} else if (pTableCond->nSQLOptr == TK_LIKE) {
ret = tablenameCondToString(pRight, str);
ret = tablenameCondToString(pRight, sb);
}
if (ret != TSDB_CODE_SUCCESS) {
@ -3828,8 +3824,7 @@ int tableNameCompar(const void* lhs, const void* rhs) {
return ret > 0 ? 1 : -1;
}
static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_t tableCondIndex,
char* tmpTableCondBuf) {
static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_t tableCondIndex, SStringBuilder* sb) {
SSqlCmd* pCmd = &pSql->cmd;
const char* msg = "meter name too long";
@ -3842,26 +3837,25 @@ static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_
STagCond* pTagCond = &pSql->cmd.tagCond;
pTagCond->tbnameCond.uid = pMeterMetaInfo->pMeterMeta->uid;
SString* pTableCond = &pCmd->tagCond.tbnameCond.cond;
SStringAlloc(pTableCond, 4096);
assert(pExpr->nSQLOptr == TK_LIKE || pExpr->nSQLOptr == TK_IN);
if (pExpr->nSQLOptr == TK_LIKE) {
strcpy(pTableCond->z, tmpTableCondBuf);
pTableCond->n = strlen(pTableCond->z);
char* str = taosStringBuilderGetResult(sb, NULL);
pCmd->tagCond.tbnameCond.cond = strdup(str);
return TSDB_CODE_SUCCESS;
}
strcpy(pTableCond->z, QUERY_COND_REL_PREFIX_IN);
pTableCond->n += strlen(QUERY_COND_REL_PREFIX_IN);
SStringBuilder sb1 = {0};
taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN);
char db[TSDB_METER_ID_LEN] = {0};
// remove the duplicated input table names
int32_t num = 0;
char** segments = strsplit(tmpTableCondBuf + QUERY_COND_REL_PREFIX_IN_LEN, TBNAME_LIST_SEP, &num);
qsort(segments, num, sizeof(void*), tableNameCompar);
char* tableNameString = taosStringBuilderGetResult(sb, NULL);
char** segments = strsplit(tableNameString + QUERY_COND_REL_PREFIX_IN_LEN, TBNAME_LIST_SEP, &num);
qsort(segments, num, POINTER_BYTES, tableNameCompar);
int32_t j = 1;
for (int32_t i = 1; i < num; ++i) {
@ -3875,25 +3869,30 @@ static int32_t setTableCondForMetricQuery(SSqlObj* pSql, tSQLExpr* pExpr, int16_
char* acc = getAccountId(pSql);
for (int32_t i = 0; i < num; ++i) {
SStringEnsureRemain(pTableCond, TSDB_METER_ID_LEN);
if (i >= 1) {
pTableCond->z[pTableCond->n++] = TBNAME_LIST_SEP[0];
taosStringBuilderAppendStringLen(&sb1, TBNAME_LIST_SEP, 1);
}
char idBuf[TSDB_METER_ID_LEN + 1] = {0};
int32_t xlen = strlen(segments[i]);
SSQLToken t = {.z = segments[i], .n = xlen, .type = TK_STRING};
int32_t ret = setObjFullName(pTableCond->z + pTableCond->n, acc, &dbToken, &t, &xlen);
int32_t ret = setObjFullName(idBuf, acc, &dbToken, &t, &xlen);
if (ret != TSDB_CODE_SUCCESS) {
taosStringBuilderDestroy(&sb1);
tfree(segments);
invalidSqlErrMsg(pCmd, msg);
return ret;
}
pTableCond->n += xlen;
taosStringBuilderAppendString(&sb1, idBuf);
}
char* str = taosStringBuilderGetResult(&sb1, NULL);
pCmd->tagCond.tbnameCond.cond = strdup(str);
taosStringBuilderDestroy(&sb1);
tfree(segments);
return TSDB_CODE_SUCCESS;
}
@ -4071,10 +4070,9 @@ int32_t doParseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr, SCondExpr* condExpr)
SSqlCmd* pCmd = &pSql->cmd;
/*
* tags query condition may be larger than 512bytes,
* therefore, we need to prepare enough large space
* tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space
*/
char tableNameCond[TSDB_MAX_SQL_LEN] = {0};
SStringBuilder sb = {0};
int32_t ret = TSDB_CODE_SUCCESS;
if ((ret = getQueryCondExpr(pCmd, pExpr, condExpr, &type, (*pExpr)->nSQLOptr)) != TSDB_CODE_SUCCESS) {
@ -4119,7 +4117,7 @@ int32_t doParseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr, SCondExpr* condExpr)
}
// 4. get the table name query condition
if ((ret = getTablenameCond(pCmd, condExpr->pTableCond, tableNameCond)) != TSDB_CODE_SUCCESS) {
if ((ret = getTablenameCond(pCmd, condExpr->pTableCond, &sb)) != TSDB_CODE_SUCCESS) {
return ret;
}
@ -4135,7 +4133,10 @@ int32_t doParseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr, SCondExpr* condExpr)
// 7. query condition for table name
pCmd->tagCond.relType = (condExpr->relType == TK_AND) ? TSDB_RELATION_AND : TSDB_RELATION_OR;
ret = setTableCondForMetricQuery(pSql, condExpr->pTableCond, condExpr->tableCondIndex, tableNameCond);
ret = setTableCondForMetricQuery(pSql, condExpr->pTableCond, condExpr->tableCondIndex, &sb);
taosStringBuilderDestroy(&sb);
if (!validateFilterExpr(pCmd)) {
return invalidSqlErrMsg(pCmd, msg);
}
@ -5156,7 +5157,7 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIn
if (pExpr->functionId != TSDB_FUNC_TAG) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
int16_t columnInfo = tscGetJoinTagColIndexByUid(pCmd, pMeterMetaInfo->pMeterMeta->uid);
int16_t columnInfo = tscGetJoinTagColIndexByUid(&pCmd->tagCond, pMeterMetaInfo->pMeterMeta->uid);
SColumnIndex index = {.tableIndex = 0, .columnIndex = columnInfo};
SSchema* pSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta);

View File

@ -689,7 +689,7 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, int16_t vnodeId
SSqlExpr *pExpr = tscSqlExprGet(&pNew->cmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0);
int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd, pMeterMetaInfo->pMeterMeta->uid);
int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pSupporter->tagCond, pMeterMetaInfo->pMeterMeta->uid);
pExpr->param->i64Key = tagColIndex;
pExpr->numOfParams = 1;
@ -2741,10 +2741,14 @@ static int32_t tscEstimateMetricMetaMsgSize(SSqlCmd *pCmd) {
int32_t n = 0;
for (int32_t i = 0; i < pCmd->tagCond.numOfTagCond; ++i) {
n += pCmd->tagCond.cond[i].cond.n;
n += strlen(pCmd->tagCond.cond[i].cond);
}
int32_t tagLen = n * TSDB_NCHAR_SIZE + pCmd->tagCond.tbnameCond.cond.n * TSDB_NCHAR_SIZE;
int32_t tagLen = n * TSDB_NCHAR_SIZE;
if (pCmd->tagCond.tbnameCond.cond != NULL) {
tagLen += strlen(pCmd->tagCond.tbnameCond.cond) * TSDB_NCHAR_SIZE;
}
int32_t joinCondLen = (TSDB_METER_ID_LEN + sizeof(int16_t)) * 2;
int32_t elemSize = sizeof(SMetricMetaElemMsg) * pCmd->numOfTables;
@ -2816,8 +2820,9 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) {
if (pTagCond->numOfTagCond > 0) {
SCond *pCond = tsGetMetricQueryCondPos(pTagCond, uid);
if (pCond != NULL) {
condLen = pCond->cond.n + 1;
bool ret = taosMbsToUcs4(pCond->cond.z, pCond->cond.n, pMsg, pCond->cond.n * TSDB_NCHAR_SIZE);
condLen = strlen(pCond->cond) + 1;
bool ret = taosMbsToUcs4(pCond->cond, condLen, pMsg, condLen * TSDB_NCHAR_SIZE);
if (!ret) {
tscError("%p mbs to ucs4 failed:%s", pSql, tsGetMetricQueryCondPos(pTagCond, uid));
return 0;
@ -2836,15 +2841,17 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) {
offset = pMsg - (char *)pMetaMsg;
pElem->tableCond = htonl(offset);
pElem->tableCondLen = htonl(pTagCond->tbnameCond.cond.n);
uint32_t len = strlen(pTagCond->tbnameCond.cond);
pElem->tableCondLen = htonl(len);
memcpy(pMsg, pTagCond->tbnameCond.cond.z, pTagCond->tbnameCond.cond.n);
pMsg += pTagCond->tbnameCond.cond.n;
memcpy(pMsg, pTagCond->tbnameCond.cond, len);
pMsg += len;
}
SSqlGroupbyExpr *pGroupby = &pCmd->groupbyExpr;
if (pGroupby->tableIndex != i) {
if (pGroupby->tableIndex != i && pGroupby->numOfGroupCols > 0) {
pElem->orderType = 0;
pElem->orderIndex = 0;
pElem->numOfGroupCols = 0;

View File

@ -270,7 +270,7 @@ int taos_query(TAOS *taos, const char *sqlstr) {
SSqlRes *pRes = &pSql->res;
size_t sqlLen = strlen(sqlstr);
if (sqlLen > TSDB_MAX_SQL_LEN) {
if (sqlLen > tsMaxSQLStringLen) {
pRes->code = tscInvalidSQLErrMsg(pSql->cmd.payload, "sql too long", NULL); // set the additional error msg for invalid sql
tscError("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj);
@ -786,7 +786,6 @@ int taos_errno(TAOS *taos) {
char *taos_errstr(TAOS *taos) {
STscObj *pObj = (STscObj *)taos;
uint8_t code;
// char temp[256] = {0};
if (pObj == NULL || pObj->signature != pObj) return tsError[globalCode];
@ -797,11 +796,13 @@ char *taos_errstr(TAOS *taos) {
// for invalid sql, additional information is attached to explain why the sql is invalid
if (code == TSDB_CODE_INVALID_SQL) {
// snprintf(temp, tListLen(temp), "invalid SQL: %s", pObj->pSql->cmd.payload);
// strcpy(pObj->pSql->cmd.payload, temp);
return pObj->pSql->cmd.payload;
} else {
return tsError[code];
if (code < 0 || code > TSDB_CODE_MAX_ERROR_CODE) {
return tsError[TSDB_CODE_SUCCESS];
} else {
return tsError[code];
}
}
}
@ -924,7 +925,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
tscTrace("%p Valid SQL: %s pObj:%p", pSql, sql, pObj);
int32_t sqlLen = strlen(sql);
if (sqlLen > TSDB_MAX_SQL_LEN) {
if (sqlLen > tsMaxSQLStringLen) {
tscError("%p sql too long", pSql);
pRes->code = TSDB_CODE_INVALID_SQL;
return pRes->code;

View File

@ -51,7 +51,6 @@ void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) {
assert(len < tListLen(tagIdBuf));
const int32_t maxKeySize = TSDB_MAX_TAGS_LEN; // allowed max key size
char* tmp = calloc(1, TSDB_MAX_SQL_LEN);
SCond* cond = tsGetMetricQueryCondPos(pTagCond, uid);
@ -60,12 +59,24 @@ void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) {
sprintf(join, "%s,%s", pTagCond->joinInfo.left.meterId, pTagCond->joinInfo.right.meterId);
}
int32_t keyLen =
snprintf(tmp, TSDB_MAX_SQL_LEN, "%s,%s,%s,%d,%s,[%s],%d", pMeterMetaInfo->name,
(cond != NULL ? cond->cond.z : NULL), pTagCond->tbnameCond.cond.n > 0 ? pTagCond->tbnameCond.cond.z : NULL,
// estimate the buffer size
size_t tbnameCondLen = pTagCond->tbnameCond.cond != NULL? strlen(pTagCond->tbnameCond.cond):0;
size_t redundantLen = 20;
size_t bufSize = strlen(pMeterMetaInfo->name) + tbnameCondLen + strlen(join) + strlen(tagIdBuf);
if (cond != NULL) {
bufSize += strlen(cond->cond);
}
bufSize = (size_t) ((bufSize + redundantLen) * 1.5);
char* tmp = calloc(1, bufSize);
int32_t keyLen = snprintf(tmp, bufSize, "%s,%s,%s,%d,%s,[%s],%d", pMeterMetaInfo->name,
(cond != NULL ? cond->cond : NULL),
(tbnameCondLen > 0 ? pTagCond->tbnameCond.cond : NULL),
pTagCond->relType, join, tagIdBuf, pCmd->groupbyExpr.orderType);
assert(keyLen <= TSDB_MAX_SQL_LEN);
assert(keyLen <= bufSize);
if (keyLen < maxKeySize) {
strcpy(str, tmp);
@ -99,7 +110,7 @@ void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str) {
SCond* pDest = &pTagCond->cond[pTagCond->numOfTagCond];
pDest->uid = uid;
pDest->cond = SStringCreate(str);
pDest->cond = strdup(str);
pTagCond->numOfTagCond += 1;
}
@ -1340,14 +1351,20 @@ bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId) {
void tscTagCondCopy(STagCond* dest, const STagCond* src) {
memset(dest, 0, sizeof(STagCond));
if (src->tbnameCond.cond != NULL) {
dest->tbnameCond.cond = strdup(src->tbnameCond.cond);
}
SStringCopy(&dest->tbnameCond.cond, &src->tbnameCond.cond);
dest->tbnameCond.uid = src->tbnameCond.uid;
memcpy(&dest->joinInfo, &src->joinInfo, sizeof(SJoinInfo));
for (int32_t i = 0; i < src->numOfTagCond; ++i) {
SStringCopy(&dest->cond[i].cond, &src->cond[i].cond);
if (src->cond[i].cond != NULL) {
dest->cond[i].cond = strdup(src->cond[i].cond);
}
dest->cond[i].uid = src->cond[i].uid;
}
@ -1356,10 +1373,9 @@ void tscTagCondCopy(STagCond* dest, const STagCond* src) {
}
void tscTagCondRelease(STagCond* pCond) {
SStringFree(&pCond->tbnameCond.cond);
free(pCond->tbnameCond.cond);
for (int32_t i = 0; i < pCond->numOfTagCond; ++i) {
SStringFree(&pCond->cond[i].cond);
free(pCond->cond[i].cond);
}
memset(pCond, 0, sizeof(STagCond));
@ -1571,123 +1587,6 @@ void tscResetForNextRetrieve(SSqlRes* pRes) {
pRes->numOfRows = 0;
}
SString SStringCreate(const char* str) {
size_t len = strlen(str);
SString dest = {.n = len, .alloc = len + 1};
dest.z = calloc(1, dest.alloc);
strcpy(dest.z, str);
return dest;
}
void SStringCopy(SString* pDest, const SString* pSrc) {
if (pSrc->n > 0) {
pDest->n = pSrc->n;
pDest->alloc = pDest->n + 1; // one additional space for null terminate
pDest->z = calloc(1, pDest->alloc);
memcpy(pDest->z, pSrc->z, pDest->n);
} else {
memset(pDest, 0, sizeof(SString));
}
}
void SStringFree(SString* pStr) {
if (pStr->alloc > 0) {
tfree(pStr->z);
pStr->alloc = 0;
}
}
void SStringShrink(SString* pStr) {
if (pStr->alloc > (pStr->n + 1) && pStr->alloc > (pStr->n * 2)) {
pStr->z = realloc(pStr->z, pStr->n + 1);
assert(pStr->z != NULL);
pStr->alloc = pStr->n + 1;
}
}
int32_t SStringAlloc(SString* pStr, int32_t size) {
if (pStr->alloc >= size) {
return TSDB_CODE_SUCCESS;
}
size = ALIGN8(size);
char* tmp = NULL;
if (pStr->z != NULL) {
tmp = realloc(pStr->z, size);
memset(pStr->z + pStr->n, 0, size - pStr->n);
} else {
tmp = calloc(1, size);
}
if (tmp == NULL) {
#ifdef WINDOWS
LPVOID lpMsgBuf;
FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL,
GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language
(LPTSTR)&lpMsgBuf, 0, NULL);
tscTrace("failed to allocate memory, reason:%s", lpMsgBuf);
LocalFree(lpMsgBuf);
#else
char errmsg[256] = {0};
strerror_r(errno, errmsg, tListLen(errmsg));
tscTrace("failed to allocate memory, reason:%s", errmsg);
#endif
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
pStr->z = tmp;
pStr->alloc = size;
return TSDB_CODE_SUCCESS;
}
#define MIN_ALLOC_SIZE 8
int32_t SStringEnsureRemain(SString* pStr, int32_t size) {
if (pStr->alloc - pStr->n > size) {
return TSDB_CODE_SUCCESS;
}
// remain space is insufficient, allocate more spaces
int32_t inc = (size >= MIN_ALLOC_SIZE) ? size : MIN_ALLOC_SIZE;
if (inc < (pStr->alloc >> 1)) {
inc = (pStr->alloc >> 1);
}
// get the new size
int32_t newsize = pStr->alloc + inc;
char* tmp = realloc(pStr->z, newsize);
if (tmp == NULL) {
#ifdef WINDOWS
LPVOID lpMsgBuf;
FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL,
GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language
(LPTSTR)&lpMsgBuf, 0, NULL);
tscTrace("failed to allocate memory, reason:%s", lpMsgBuf);
LocalFree(lpMsgBuf);
#else
char errmsg[256] = {0};
strerror_r(errno, errmsg, tListLen(errmsg));
tscTrace("failed to allocate memory, reason:%s", errmsg);
#endif
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
memset(tmp + pStr->n, 0, inc);
pStr->alloc = newsize;
pStr->z = tmp;
return TSDB_CODE_SUCCESS;
}
SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex, void (*fp)(), void* param,
SSqlObj* pPrevSql) {
SSqlCmd* pCmd = &pSql->cmd;
@ -1822,9 +1721,7 @@ void tscDoQuery(SSqlObj* pSql) {
}
}
int16_t tscGetJoinTagColIndexByUid(SSqlCmd* pCmd, uint64_t uid) {
STagCond* pTagCond = &pCmd->tagCond;
int16_t tscGetJoinTagColIndexByUid(STagCond* pTagCond, uint64_t uid) {
if (pTagCond->joinInfo.left.uid == uid) {
return pTagCond->joinInfo.left.tagCol;
} else {

View File

@ -128,7 +128,7 @@ extern "C" {
#define TSDB_CODE_CACHE_BLOCK_TS_DISORDERED 107 // time stamp in cache block is disordered
#define TSDB_CODE_FILE_BLOCK_TS_DISORDERED 108 // time stamp in file block is disordered
#define TSDB_CODE_INVALID_COMMIT_LOG 109 // commit log init failed
#define TSDB_CODE_SERVER_NO_SPACE 110
#define TSDB_CODE_SERV_NO_DISKSPACE 110
#define TSDB_CODE_NOT_SUPER_TABLE 111 // operation only available for super table
#define TSDB_CODE_DUPLICATE_TAGS 112 // tags value for join not unique
#define TSDB_CODE_INVALID_SUBMIT_MSG 113
@ -137,6 +137,8 @@ extern "C" {
#define TSDB_CODE_INVALID_VNODE_STATUS 116
#define TSDB_CODE_FAILED_TO_LOCK_RESOURCES 117
#define TSDB_CODE_MAX_ERROR_CODE 118
#ifdef __cplusplus
}
#endif

View File

@ -106,7 +106,6 @@ extern int tsMaxDbs;
extern int tsMaxTables;
extern int tsMaxDnodes;
extern int tsMaxVGroups;
extern int tsShellActivityTimer;
extern char tsMgmtZone[];
extern char tsLocalIp[];
@ -127,6 +126,7 @@ extern int tsEnableHttpModule;
extern int tsEnableMonitorModule;
extern int tsRestRowLimit;
extern int tsCompressMsgSize;
extern int tsMaxSQLStringLen;
extern char tsSocketType[4];

View File

@ -100,6 +100,7 @@ extern "C" {
#define TSDB_COL_NAME_LEN 64
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 16
#define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE
#define TSDB_MAX_ALLOWED_SQL_LEN (8*1024*1024U) // sql length should be less than 6mb
#define TSDB_MAX_BYTES_PER_ROW TSDB_MAX_COLUMNS * 16
#define TSDB_MAX_TAGS_LEN 512

View File

@ -238,7 +238,7 @@ char *tsError[] = {"success",
"only super table has metric meta info",
"tags value not unique for join",
"invalid submit message",
"not active table(not created yet or deleted already)", //114
"not active table(not created yet or dropped already)", //114
"invalid table id",
"invalid vnode status", //116
"failed to lock resources",

View File

@ -27,7 +27,13 @@ extern "C" {
#define GET_QINFO_ADDR(x) ((char*)(x)-offsetof(SQInfo, query))
#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0)
/*
* set the output buffer page size is 16k
* The page size should be sufficient for at least one output result or intermediate result.
* Some intermediate results may be extremely large, such as top/bottom(100) query.
*/
#define DEFAULT_INTERN_BUF_SIZE 16384L
#define INIT_ALLOCATE_DISK_PAGES 60L
#define DEFAULT_DATA_FILE_MAPPING_PAGES 2L
#define DEFAULT_DATA_FILE_MMAP_WINDOW_SIZE (DEFAULT_DATA_FILE_MAPPING_PAGES * DEFAULT_INTERN_BUF_SIZE)
@ -160,7 +166,7 @@ void pointInterpSupporterDestroy(SPointInterpoSupporter* pPointInterpSupport);
void pointInterpSupporterSetData(SQInfo* pQInfo, SPointInterpoSupporter* pPointInterpSupport);
int64_t loadRequiredBlockIntoMem(SQueryRuntimeEnv* pRuntimeEnv, SPositionInfo* position);
void doCloseAllOpenedResults(SMeterQuerySupportObj* pSupporter);
int32_t doCloseAllOpenedResults(SMeterQuerySupportObj* pSupporter);
void disableFunctForSuppleScan(SQueryRuntimeEnv* pRuntimeEnv, int32_t order);
void enableFunctForMasterScan(SQueryRuntimeEnv* pRuntimeEnv, int32_t order);
@ -185,7 +191,7 @@ void freeMeterBlockInfoEx(SMeterDataBlockInfoEx* pDataBlockInfoEx, int32_t len);
void setExecutionContext(SMeterQuerySupportObj* pSupporter, SOutputRes* outputRes, int32_t meterIdx, int32_t groupIdx,
SMeterQueryInfo* sqinfo);
void setIntervalQueryExecutionContext(SMeterQuerySupportObj* pSupporter, int32_t meterIdx, SMeterQueryInfo* sqinfo);
int32_t setIntervalQueryExecutionContext(SMeterQuerySupportObj* pSupporter, int32_t meterIdx, SMeterQueryInfo* sqinfo);
int64_t getQueryStartPositionInCache(SQueryRuntimeEnv* pRuntimeEnv, int32_t* slot, int32_t* pos, bool ignoreQueryRange);
int64_t getNextAccessedKeyInData(SQuery* pQuery, int64_t* pPrimaryCol, SBlockInfo* pBlockInfo, int32_t blockStatus);
@ -224,11 +230,11 @@ void changeMeterQueryInfoForSuppleQuery(SMeterQueryInfo *pMeterQueryInfo, TSKEY
/**
* add the new allocated disk page to meter query info
* the new allocated disk page is used to keep the intermediate (interval) results
*
* @param pQuery
* @param pMeterQueryInfo
* @param pSupporter
*/
tFilePage* addDataPageForMeterQueryInfo(SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportObj *pSupporter);
tFilePage* addDataPageForMeterQueryInfo(SQuery* pQuery, SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportObj *pSupporter);
/**
* save the query range data into SMeterQueryInfo

View File

@ -56,7 +56,7 @@ static int32_t tabObjVGIDComparator(const void* pLeft, const void* pRight) {
// monotonic inc in memory address
static int32_t tabObjPointerComparator(const void* pLeft, const void* pRight) {
int64_t ret = (int64_t)pLeft - (int64_t)pRight;
int64_t ret = (*(STabObj**)(pLeft))->uid - (*(STabObj**)(pRight))->uid;
if (ret == 0) {
return 0;
} else {
@ -427,11 +427,11 @@ static tQueryResultset* doNestedLoopIntersect(tQueryResultset* pRes1, tQueryResu
}
static tQueryResultset* doSortIntersect(tQueryResultset* pRes1, tQueryResultset* pRes2) {
size_t sizePtr = sizeof(void*);
size_t sizePtr = sizeof(void *);
qsort(pRes1->pRes, pRes1->num, sizePtr, tabObjPointerComparator);
qsort(pRes2->pRes, pRes2->num, sizePtr, tabObjPointerComparator);
int32_t i = 0;
int32_t j = 0;

View File

@ -67,13 +67,13 @@ static void doApplyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMete
SBlockInfo *pBlockInfo, int64_t *pPrimaryCol, char *sdata, SField *pFields,
__block_search_fn_t searchFn);
static void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, int32_t numOfResult);
static int32_t saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, int32_t numOfResult);
static void applyIntervalQueryOnBlock(SMeterQuerySupportObj *pSupporter, SMeterDataInfo *pInfoEx, char *data,
int64_t *pPrimaryData, SBlockInfo *pBlockInfo, int32_t blockStatus,
SField *pFields, __block_search_fn_t searchFn);
static void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx);
static void flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery,
static int32_t flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery,
const SQueryRuntimeEnv *pRuntimeEnv);
static void validateTimestampForSupplementResult(SQueryRuntimeEnv *pRuntimeEnv, int64_t numOfIncrementRes);
static void getBasicCacheInfoSnapshot(SQuery *pQuery, SCacheInfo *pCacheInfo, int32_t vid);
@ -413,7 +413,7 @@ char *vnodeGetHeaderFileData(SQueryRuntimeEnv *pRuntimeEnv, int32_t vnodeId, int
vnodeSetOpenedFileNames(pVnodeFileInfo);
if (doOpenQueryFileData(pQInfo, pVnodeFileInfo, vnodeId) != TSDB_CODE_SUCCESS) {
doCloseOpenedFileData(pVnodeFileInfo); // there may be partially open fd, close it anyway.
doCloseOpenedFileData(pVnodeFileInfo); // all the fds may be partially opened, close them anyway.
return pVnodeFileInfo->pHeaderFileData;
}
}
@ -1291,9 +1291,6 @@ static int32_t blockwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, int32_t
for (int32_t k = 0; k < pQuery->numOfOutputCols; ++k) {
int32_t functionId = pQuery->pSelectExpr[k].pBase.functionId;
// if (!functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
// continue;
// }
SField dummyField = {0};
@ -3052,7 +3049,7 @@ static void vnodeRecordAllFiles(SQInfo *pQInfo, int32_t vnodeId) {
sprintf(pVnodeFilesInfo->dbFilePathPrefix, "%s/vnode%d/db/", tsDirectory, vnodeId);
DIR *pDir = opendir(pVnodeFilesInfo->dbFilePathPrefix);
if (pDir == NULL) {
dError("QInfo:%p failed to open directory:%s", pQInfo, pVnodeFilesInfo->dbFilePathPrefix);
dError("QInfo:%p failed to open directory:%s, %s", pQInfo, pVnodeFilesInfo->dbFilePathPrefix, strerror(errno));
return;
}
@ -3920,11 +3917,16 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param)
dError("QInfo:%p failed to create file: %s on disk. %s", pQInfo, pSupporter->extBufFile, strerror(errno));
return TSDB_CODE_SERV_OUT_OF_MEMORY;
}
// set 4k page for each meter
pSupporter->numOfPages = pSupporter->numOfMeters;
ftruncate(pSupporter->meterOutputFd, pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE);
ret = ftruncate(pSupporter->meterOutputFd, pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE);
if (ret != TSDB_CODE_SUCCESS) {
dError("QInfo:%p failed to create intermediate result output file:%s. %s", pQInfo, pSupporter->extBufFile,
strerror(errno));
return TSDB_CODE_SERV_NO_DISKSPACE;
}
pSupporter->runtimeEnv.numOfRowsPerPage = (DEFAULT_INTERN_BUF_SIZE - sizeof(tFilePage)) / pQuery->rowSize;
pSupporter->lastPageId = -1;
pSupporter->bufSize = pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE;
@ -3932,7 +3934,7 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param)
pSupporter->meterOutputMMapBuf =
mmap(NULL, pSupporter->bufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pSupporter->meterOutputFd, 0);
if (pSupporter->meterOutputMMapBuf == MAP_FAILED) {
dError("QInfo:%p failed to map data file: %s to disk. %s", pQInfo, pSupporter->extBufFile, strerror(errno));
dError("QInfo:%p failed to map temp file: %s. %s", pQInfo, pSupporter->extBufFile, strerror(errno));
return TSDB_CODE_SERV_OUT_OF_MEMORY;
}
}
@ -4733,20 +4735,24 @@ int32_t mergeMetersResultToOneGroups(SMeterQuerySupportObj *pSupporter) {
SQuery * pQuery = pRuntimeEnv->pQuery;
int64_t st = taosGetTimestampMs();
int32_t ret = TSDB_CODE_SUCCESS;
while (pSupporter->subgroupIdx < pSupporter->pSidSet->numOfSubSet) {
int32_t start = pSupporter->pSidSet->starterPos[pSupporter->subgroupIdx];
int32_t end = pSupporter->pSidSet->starterPos[pSupporter->subgroupIdx + 1];
int32_t ret =
doMergeMetersResultsToGroupRes(pSupporter, pQuery, pRuntimeEnv, pSupporter->pMeterDataInfo, start, end);
ret = doMergeMetersResultsToGroupRes(pSupporter, pQuery, pRuntimeEnv, pSupporter->pMeterDataInfo, start, end);
if (ret < 0) { // not enough disk space to save the data into disk
return -1;
}
pSupporter->subgroupIdx += 1;
/* this group generates at least one result, return results */
// this group generates at least one result, return results
if (ret > 0) {
break;
}
assert(pSupporter->numOfGroupResultPages == 0);
dTrace("QInfo:%p no result in group %d, continue", GET_QINFO_ADDR(pQuery), pSupporter->subgroupIdx - 1);
}
@ -4754,7 +4760,7 @@ int32_t mergeMetersResultToOneGroups(SMeterQuerySupportObj *pSupporter) {
dTrace("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%lldms", GET_QINFO_ADDR(pQuery),
pSupporter->subgroupIdx - 1, pSupporter->pSidSet->numOfSubSet, taosGetTimestampMs() - st);
return pSupporter->numOfGroupResultPages;
return TSDB_CODE_SUCCESS;
}
void copyResToQueryResultBuf(SMeterQuerySupportObj *pSupporter, SQuery *pQuery) {
@ -4762,7 +4768,9 @@ void copyResToQueryResultBuf(SMeterQuerySupportObj *pSupporter, SQuery *pQuery)
pSupporter->numOfGroupResultPages = 0;
// current results of group has been sent to client, try next group
mergeMetersResultToOneGroups(pSupporter);
if (mergeMetersResultToOneGroups(pSupporter) != TSDB_CODE_SUCCESS) {
return; // failed to save data in the disk
}
// set current query completed
if (pSupporter->numOfGroupResultPages == 0 && pSupporter->subgroupIdx == pSupporter->pSidSet->numOfSubSet) {
@ -4840,7 +4848,10 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery
} else {
// copy data to disk buffer
if (buffer[0]->numOfElems == pQuery->pointsToRead) {
flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv);
if (flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv) != TSDB_CODE_SUCCESS) {
return -1;
}
resetMergeResultBuf(pQuery, pCtx);
}
@ -4887,7 +4898,14 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery
}
if (buffer[0]->numOfElems != 0) { // there are data in buffer
flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv);
if (flushFromResultBuf(pSupporter, pQuery, pRuntimeEnv) != TSDB_CODE_SUCCESS) {
dError("QInfo:%p failed to flush data into temp file, abort query", GET_QINFO_ADDR(pQuery), pSupporter->extBufFile);
tfree(pTree);
tfree(pValidMeter);
tfree(posArray);
return -1;
}
}
int64_t endt = taosGetTimestampMs();
@ -4906,25 +4924,44 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery
return pSupporter->numOfGroupResultPages;
}
static void extendDiskBuf(SMeterQuerySupportObj *pSupporter, int32_t numOfPages) {
static int32_t extendDiskBuf(const SQuery* pQuery, SMeterQuerySupportObj *pSupporter, int32_t numOfPages) {
assert(pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE == pSupporter->bufSize);
SQInfo* pQInfo = (SQInfo*) GET_QINFO_ADDR(pQuery);
int32_t ret = munmap(pSupporter->meterOutputMMapBuf, pSupporter->bufSize);
pSupporter->numOfPages = numOfPages;
// disk-based output buffer is exhausted, try to extend the disk-based buffer
/*
* disk-based output buffer is exhausted, try to extend the disk-based buffer, the available disk space may
* be insufficient
*/
ret = ftruncate(pSupporter->meterOutputFd, pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE);
if (ret != 0) {
perror("error in allocate the disk-based buffer");
return;
dError("QInfo:%p failed to create intermediate result output file:%s. %s", pQInfo, pSupporter->extBufFile,
strerror(errno));
pQInfo->code = -TSDB_CODE_SERV_NO_DISKSPACE;
pQInfo->killed = 1;
return pQInfo->code;
}
pSupporter->bufSize = pSupporter->numOfPages * DEFAULT_INTERN_BUF_SIZE;
pSupporter->meterOutputMMapBuf =
mmap(NULL, pSupporter->bufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pSupporter->meterOutputFd, 0);
if (pSupporter->meterOutputMMapBuf == MAP_FAILED) {
dError("QInfo:%p failed to map temp file: %s. %s", pQInfo, pSupporter->extBufFile, strerror(errno));
pQInfo->code = -TSDB_CODE_SERV_OUT_OF_MEMORY;
pQInfo->killed = 1;
return pQInfo->code;
}
return TSDB_CODE_SUCCESS;
}
void flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, const SQueryRuntimeEnv *pRuntimeEnv) {
int32_t flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery, const SQueryRuntimeEnv *pRuntimeEnv) {
int32_t numOfMeterResultBufPages = pSupporter->lastPageId + 1;
int64_t dstSize = numOfMeterResultBufPages * DEFAULT_INTERN_BUF_SIZE +
pSupporter->groupResultSize * (pSupporter->numOfGroupResultPages + 1);
@ -4935,7 +4972,9 @@ void flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery,
requiredPages += pSupporter->numOfMeters;
}
extendDiskBuf(pSupporter, requiredPages);
if (extendDiskBuf(pQuery, pSupporter, requiredPages) != TSDB_CODE_SUCCESS) {
return -1;
}
}
char *lastPosition = pSupporter->meterOutputMMapBuf + DEFAULT_INTERN_BUF_SIZE * numOfMeterResultBufPages +
@ -4949,6 +4988,7 @@ void flushFromResultBuf(SMeterQuerySupportObj *pSupporter, const SQuery *pQuery,
}
pSupporter->numOfGroupResultPages += 1;
return TSDB_CODE_SUCCESS;
}
void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx) {
@ -4966,7 +5006,7 @@ void setMeterDataInfo(SMeterDataInfo *pMeterDataInfo, SMeterObj *pMeterObj, int3
pMeterDataInfo->meterOrderIdx = meterIdx;
}
void doCloseAllOpenedResults(SMeterQuerySupportObj *pSupporter) {
int32_t doCloseAllOpenedResults(SMeterQuerySupportObj *pSupporter) {
SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv;
SQuery * pQuery = pRuntimeEnv->pQuery;
@ -4980,11 +5020,20 @@ void doCloseAllOpenedResults(SMeterQuerySupportObj *pSupporter) {
pRuntimeEnv->pMeterObj = getMeterObj(pSupporter->pMeterObj, pSupporter->pSidSet->pSids[index]->sid);
assert(pRuntimeEnv->pMeterObj == pMeterInfo[i].pMeterObj);
setIntervalQueryExecutionContext(pSupporter, i, pMeterInfo[i].pMeterQInfo);
saveResult(pSupporter, pMeterInfo[i].pMeterQInfo, pMeterInfo[i].pMeterQInfo->lastResRows);
int32_t ret = setIntervalQueryExecutionContext(pSupporter, i, pMeterInfo[i].pMeterQInfo);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
ret = saveResult(pSupporter, pMeterInfo[i].pMeterQInfo, pMeterInfo[i].pMeterQInfo->lastResRows);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
}
}
}
return TSDB_CODE_SUCCESS;
}
void disableFunctForSuppleScan(SQueryRuntimeEnv *pRuntimeEnv, int32_t order) {
@ -5690,18 +5739,24 @@ void changeMeterQueryInfoForSuppleQuery(SMeterQueryInfo *pMeterQueryInfo, TSKEY
}
}
static tFilePage *allocNewPage(SMeterQuerySupportObj *pSupporter, uint32_t *pageId) {
static tFilePage *allocNewPage(SQuery* pQuery, SMeterQuerySupportObj *pSupporter, uint32_t *pageId) {
if (pSupporter->lastPageId == pSupporter->numOfPages - 1) {
extendDiskBuf(pSupporter, pSupporter->numOfPages + pSupporter->numOfMeters);
if (extendDiskBuf(pQuery, pSupporter, pSupporter->numOfPages + pSupporter->numOfMeters) != TSDB_CODE_SUCCESS) {
return NULL;
}
}
*pageId = (++pSupporter->lastPageId);
return getFilePage(pSupporter, *pageId);
}
tFilePage *addDataPageForMeterQueryInfo(SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportObj *pSupporter) {
tFilePage *addDataPageForMeterQueryInfo(SQuery* pQuery, SMeterQueryInfo *pMeterQueryInfo, SMeterQuerySupportObj *pSupporter) {
uint32_t pageId = 0;
tFilePage *pPage = allocNewPage(pSupporter, &pageId);
tFilePage *pPage = allocNewPage(pQuery, pSupporter, &pageId);
if (pPage == NULL) { // failed to allocate disk-based buffer for intermediate results
return NULL;
}
if (pMeterQueryInfo->numOfPages >= pMeterQueryInfo->numOfAlloc) {
pMeterQueryInfo->numOfAlloc = pMeterQueryInfo->numOfAlloc << 1;
@ -6199,46 +6254,53 @@ void validateTimestampForSupplementResult(SQueryRuntimeEnv *pRuntimeEnv, int64_t
}
}
void setOutputBufferForIntervalQuery(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo) {
int32_t setOutputBufferForIntervalQuery(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv;
tFilePage * pData = NULL;
SQuery* pQuery = pRuntimeEnv->pQuery;
// in the first scan, new space needed for results
if (pMeterQueryInfo->numOfPages == 0) {
pData = addDataPageForMeterQueryInfo(pMeterQueryInfo, pSupporter);
pData = addDataPageForMeterQueryInfo(pQuery, pMeterQueryInfo, pSupporter);
} else {
int32_t lastPageId = pMeterQueryInfo->pageList[pMeterQueryInfo->numOfPages - 1];
pData = getFilePage(pSupporter, lastPageId);
if (pData->numOfElems >= pRuntimeEnv->numOfRowsPerPage) {
pData = addDataPageForMeterQueryInfo(pMeterQueryInfo, pSupporter);
assert(pData->numOfElems == 0); // number of elements must be 0 for new allocated buffer
pData = addDataPageForMeterQueryInfo(pRuntimeEnv->pQuery, pMeterQueryInfo, pSupporter);
if (pData != NULL) {
assert(pData->numOfElems == 0); // number of elements must be 0 for new allocated buffer
}
}
}
if (pData == NULL) {
return -1;
}
for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutputCols; ++i) {
pRuntimeEnv->pCtx[i].aOutputBuf = getOutputResPos(pRuntimeEnv, pData, pData->numOfElems, i);
pRuntimeEnv->pCtx[i].resultInfo = &pMeterQueryInfo->resultInfo[i];
}
return TSDB_CODE_SUCCESS;
}
void setIntervalQueryExecutionContext(SMeterQuerySupportObj *pSupporter, int32_t meterIdx,
int32_t setIntervalQueryExecutionContext(SMeterQuerySupportObj *pSupporter, int32_t meterIdx,
SMeterQueryInfo *pMeterQueryInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv;
SQuery * pQuery = pRuntimeEnv->pQuery;
if (IS_MASTER_SCAN(pRuntimeEnv)) {
setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo);
if (setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo) != TSDB_CODE_SUCCESS) {
// not enough disk space or memory buffer for intermediate results
return -1;
}
if (pMeterQueryInfo->lastResRows == 0) {
initCtxOutputBuf(pRuntimeEnv);
}
// reset the number of iterated elements, once this function is called. since the pCtx for different
for (int32_t j = 0; j < pQuery->numOfOutputCols; ++j) {
// pRuntimeEnv->pCtx[j].numOfIteratedElems = 0;
}
} else {
if (pMeterQueryInfo->reverseFillRes) {
setCtxOutputPointerForSupplementScan(pSupporter, pMeterQueryInfo);
@ -6249,7 +6311,9 @@ void setIntervalQueryExecutionContext(SMeterQuerySupportObj *pSupporter, int32_t
*
* If the master scan does not produce any results, new spaces needed to be allocated during supplement scan
*/
setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo);
if (setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo) != TSDB_CODE_SUCCESS) {
return -1;
}
}
}
@ -6659,14 +6723,14 @@ static void validateResultBuf(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo
}
}
void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, int32_t numOfResult) {
int32_t saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryInfo, int32_t numOfResult) {
SQueryRuntimeEnv *pRuntimeEnv = &pSupporter->runtimeEnv;
SQuery * pQuery = pRuntimeEnv->pQuery;
// no results generated, do nothing for master scan
if (numOfResult <= 0) {
if (IS_MASTER_SCAN(pRuntimeEnv)) {
return;
return TSDB_CODE_SUCCESS;
} else {
/*
* There is a case that no result generated during the the supplement scan, and during the main
@ -6691,7 +6755,7 @@ void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryI
setCtxOutputPointerForSupplementScan(pSupporter, pMeterQueryInfo);
}
return;
return TSDB_CODE_SUCCESS;
}
}
@ -6720,7 +6784,9 @@ void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryI
pMeterQueryInfo->numOfRes += numOfResult;
assert(pData->numOfElems <= pRuntimeEnv->numOfRowsPerPage);
setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo);
if (setOutputBufferForIntervalQuery(pSupporter, pMeterQueryInfo) != TSDB_CODE_SUCCESS) {
return -1;
}
for (int32_t i = 0; i < pQuery->numOfOutputCols; ++i) {
resetResultInfo(&pMeterQueryInfo->resultInfo[i]);
@ -6743,6 +6809,8 @@ void saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQueryI
tColModelDisplay(cm, outputPage->data, outputPage->numOfElems, pRuntimeEnv->numOfRowsPerPage);
#endif
}
return TSDB_CODE_SUCCESS;
}
static int32_t getSubsetNumber(SMeterQuerySupportObj *pSupporter) {

View File

@ -157,7 +157,11 @@ static SMeterDataInfo *queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMe
setExecutionContext(pSupporter, pSupporter->pResult, k, pMeterInfo[k].groupIdx, pMeterQueryInfo);
} else {
setIntervalQueryExecutionContext(pSupporter, k, pMeterQueryInfo);
int32_t ret = setIntervalQueryExecutionContext(pSupporter, k, pMeterQueryInfo);
if (ret != TSDB_CODE_SUCCESS) {
pQInfo->killed = 1;
return NULL;
}
}
qTrace("QInfo:%p vid:%d sid:%d id:%s, query in cache, qrange:%lld-%lld, lastKey:%lld", pQInfo, pMeterObj->vnode,
@ -306,7 +310,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe
if (pReqMeterDataInfo == NULL) {
dError("QInfo:%p failed to allocate memory to perform query processing, abort", pQInfo);
pQInfo->code = TSDB_CODE_SERV_OUT_OF_MEMORY;
pQInfo->code = -TSDB_CODE_SERV_OUT_OF_MEMORY;
pQInfo->killed = 1;
return NULL;
}
@ -338,7 +342,7 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe
dError("QInfo:%p failed to allocate memory to perform query processing, abort", pQInfo);
tfree(pReqMeterDataInfo);
pQInfo->code = TSDB_CODE_SERV_OUT_OF_MEMORY;
pQInfo->code = -TSDB_CODE_SERV_OUT_OF_MEMORY;
pQInfo->killed = 1;
return NULL;
}
@ -393,7 +397,12 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe
setExecutionContext(pSupporter, pSupporter->pResult, pOneMeterDataInfo->meterOrderIdx,
pOneMeterDataInfo->groupIdx, pMeterQueryInfo);
} else { // interval query
setIntervalQueryExecutionContext(pSupporter, pOneMeterDataInfo->meterOrderIdx, pMeterQueryInfo);
int32_t ret = setIntervalQueryExecutionContext(pSupporter, pOneMeterDataInfo->meterOrderIdx, pMeterQueryInfo);
if (ret != TSDB_CODE_SUCCESS) {
tfree(pReqMeterDataInfo); // error code has been set
pQInfo->killed = 1;
return NULL;
}
}
SCompBlock *pBlock = pInfoEx->pBlock.compBlock;
@ -900,7 +909,12 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) {
dTrace("QInfo:%p main scan completed, elapsed time: %lldms, supplementary scan start, order:%d", pQInfo, et - st,
pQuery->order.order ^ 1);
doCloseAllOpenedResults(pSupporter);
// failed to save all intermediate results into disk, abort further query processing
if (doCloseAllOpenedResults(pSupporter) != TSDB_CODE_SUCCESS) {
dError("QInfo:%p failed to save intermediate results, abort further query processing", pQInfo);
return;
}
doMultiMeterSupplementaryScan(pQInfo);
if (isQueryKilled(pQuery)) {
@ -911,12 +925,13 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) {
if (pQuery->nAggTimeInterval > 0) {
assert(pSupporter->subgroupIdx == 0 && pSupporter->numOfGroupResultPages == 0);
mergeMetersResultToOneGroups(pSupporter);
copyResToQueryResultBuf(pSupporter, pQuery);
if (mergeMetersResultToOneGroups(pSupporter) == TSDB_CODE_SUCCESS) {
copyResToQueryResultBuf(pSupporter, pQuery);
#ifdef _DEBUG_VIEW
displayInterResult(pQuery->sdata, pQuery, pQuery->sdata[0]->len);
displayInterResult(pQuery->sdata, pQuery, pQuery->sdata[0]->len);
#endif
}
} else { // not a interval query
copyFromGroupBuf(pQInfo, pSupporter->pResult);
}

View File

@ -824,11 +824,11 @@ int vnodeRetrieveQueryInfo(void *handle, int *numOfRows, int *rowSize, int16_t *
}
if (pQInfo->killed) {
dTrace("QInfo:%p it is already killed, %p, code:%d", pQInfo, pQuery, pQInfo->code);
dTrace("QInfo:%p query is killed, %p, code:%d", pQInfo, pQuery, pQInfo->code);
if (pQInfo->code == TSDB_CODE_SUCCESS) {
return TSDB_CODE_QUERY_CANCELLED;
} else { // in case of not TSDB_CODE_SUCCESS, return the code to client
return pQInfo->code;
return abs(pQInfo->code);
}
}
@ -837,8 +837,13 @@ int vnodeRetrieveQueryInfo(void *handle, int *numOfRows, int *rowSize, int16_t *
*rowSize = pQuery->rowSize;
*timePrec = vnodeList[pQInfo->pObj->vnode].cfg.precision;
if (pQInfo->code < 0) return -pQInfo->code;
dTrace("QInfo:%p, retrieve data info completed, precision:%d, rowsize:%d, rows:%d, code:%d", pQInfo, *timePrec,
*rowSize, *numOfRows, pQInfo->code);
if (pQInfo->code < 0) { // less than 0 means there are error existed.
return -pQInfo->code;
}
return TSDB_CODE_SUCCESS;
}

View File

@ -529,9 +529,11 @@ static int vnodeDoSubmitJob(SVnodeObj *pVnode, int import, int32_t *ssid, int32_
int code = TSDB_CODE_SUCCESS;
int32_t numOfPoints = 0;
int32_t i = 0;
SShellSubmitBlock tBlock;
for (i = *ssid; i < esid; i++) {
numOfPoints = 0;
tBlock = *pBlocks;
code = vnodeCheckSubmitBlockContext(pBlocks, pVnode);
if (code != TSDB_CODE_SUCCESS) break;
@ -565,6 +567,13 @@ static int vnodeDoSubmitJob(SVnodeObj *pVnode, int import, int32_t *ssid, int32_
*ssid = i;
*ppBlocks = pBlocks;
/* Since the pBlock part can be changed by the vnodeForwardToPeer interface,
* which is also possible to be used again. For that case, we just copy the original
* block content back.
*/
if (import && (code == TSDB_CODE_ACTION_IN_PROGRESS)) {
memcpy((void *)pBlocks, (void *)&tBlock, sizeof(SShellSubmitBlock));
}
return code;
}
@ -606,7 +615,7 @@ int vnodeProcessShellSubmitRequest(char *pMsg, int msgLen, SShellObj *pObj) {
if (tsAvailDataDirGB < tsMinimalDataDirGB) {
dError("server disk space remain %.3f GB, need at least %.3f GB, stop writing", tsAvailDataDirGB, tsMinimalDataDirGB);
code = TSDB_CODE_SERVER_NO_SPACE;
code = TSDB_CODE_SERV_NO_DISKSPACE;
goto _submit_over;
}

View File

@ -26,7 +26,7 @@
#include "tcrc32c.h"
//todo : use the original source code
#pragma GCC diagnostic ignored "-Wunused-function"
//#pragma GCC diagnostic ignored "-Wunused-function"
#define POLY 0x82f63b78
#define LONG_SHIFT 8192
@ -1093,6 +1093,7 @@ static uint32_t short_shifts[4][256] = {
0xe1a734e7, 0xc41cc13c, 0x140cd014, 0x31b725cf, 0x5f7b3ba2, 0x7ac0ce79,
0x82e30778, 0xa758f2a3, 0xc994ecce, 0xec2f1915}};
#if 0
static uint32_t append_trivial(uint32_t crc, crc_stream input, size_t length) {
for (size_t i = 0; i < length; ++i) {
crc = crc ^ input[i];
@ -1130,6 +1131,7 @@ static uint32_t append_adler_table(uint32_t crci, crc_stream input,
}
return (uint32_t)(crc ^ 0xffffffff);
}
#endif
/* Table-driven software version as a fall-back. This is about 15 times slower
than using the hardware instructions. This assumes little-endian integers,

View File

@ -12,7 +12,7 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <inttypes.h>
#include "os.h"
#include "taos.h"
#include "taosmsg.h"
@ -23,7 +23,7 @@
#include "ttypes.h"
#include "tutil.h"
#pragma GCC diagnostic ignored "-Wformat"
//#pragma GCC diagnostic ignored "-Wformat"
#define COLMODEL_GET_VAL(data, schema, allrow, rowId, colId) \
(data + (schema)->colOffset[colId] * (allrow) + (rowId) * (schema)->pFields[colId].bytes)
@ -1017,7 +1017,7 @@ static void UNUSED_FUNC tSortDataPrint(int32_t type, char *prefix, char *startx,
break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
printf("%s:(%lld, %lld, %lld)\n", prefix, *(int64_t *)startx, *(int64_t *)midx, *(int64_t *)endx);
printf("%s:(%" PRId64 ", %" PRId64 ", %" PRId64 ")\n", prefix, *(int64_t *)startx, *(int64_t *)midx, *(int64_t *)endx);
break;
case TSDB_DATA_TYPE_FLOAT:
printf("%s:(%f, %f, %f)\n", prefix, *(float *)startx, *(float *)midx, *(float *)endx);
@ -1093,7 +1093,7 @@ static UNUSED_FUNC void tRowModelDisplay(tOrderDescriptor *pDescriptor, int32_t
break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
printf("%lld\t", *(int64_t *)startx);
printf("%" PRId64 "\t", *(int64_t *)startx);
break;
case TSDB_DATA_TYPE_BINARY:
printf("%s\t", startx);
@ -1264,7 +1264,7 @@ static tFilePage *loadIntoBucketFromDisk(tMemBucket *pMemBucket, int32_t segIdx,
assert(pPage->numOfElems > 0);
tColModelAppend(pDesc->pSchema, buffer, pPage->data, 0, pPage->numOfElems, pPage->numOfElems);
printf("id: %d count: %d\n", j, buffer->numOfElems);
printf("id: %d count: %" PRIu64 "\n", j, buffer->numOfElems);
}
}
tfree(pPage);
@ -1376,10 +1376,16 @@ static void printBinaryData(char *data, int32_t len) {
}
if (len == 50) { // probably the avg intermediate result
printf("%lf,%d\t", *(double *)data, *(int64_t *)(data + sizeof(double)));
printf("%lf,%" PRId64 "\t", *(double *)data, *(int64_t *)(data + sizeof(double)));
} else if (data[8] == ',') { // in TSDB_FUNC_FIRST_DST/TSDB_FUNC_LAST_DST,
// the value is seperated by ','
printf("%ld,%0x\t", *(int64_t *)data, data + sizeof(int64_t) + 1);
//printf("%" PRId64 ",%0x\t", *(int64_t *)data, data + sizeof(int64_t) + 1);
printf("%" PRId64 ", HEX: ", *(int64_t *)data);
int32_t tmp_len = len - sizeof(int64_t) - 1;
for (int32_t i = 0; i < tmp_len; ++i) {
printf("%0x ", *(data + sizeof(int64_t) + 1 + i));
}
printf("\t");
} else if (isCharString) {
printf("%s\t", data);
}
@ -1389,26 +1395,26 @@ static void printBinaryData(char *data, int32_t len) {
static void printBinaryDataEx(char *data, int32_t len, SSrcColumnInfo *param) {
if (param->functionId == TSDB_FUNC_LAST_DST) {
switch (param->type) {
case TSDB_DATA_TYPE_TINYINT:printf("%lld,%d\t", *(int64_t *) data, *(int8_t *) (data + TSDB_KEYSIZE + 1));
case TSDB_DATA_TYPE_TINYINT:printf("%" PRId64 ",%d\t", *(int64_t *) data, *(int8_t *) (data + TSDB_KEYSIZE + 1));
break;
case TSDB_DATA_TYPE_SMALLINT:printf("%lld,%d\t", *(int64_t *) data, *(int16_t *) (data + TSDB_KEYSIZE + 1));
case TSDB_DATA_TYPE_SMALLINT:printf("%" PRId64 ",%d\t", *(int64_t *) data, *(int16_t *) (data + TSDB_KEYSIZE + 1));
break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:printf("%lld,%lld\t", *(int64_t *) data, *(int64_t *) (data + TSDB_KEYSIZE + 1));
case TSDB_DATA_TYPE_BIGINT:printf("%" PRId64 ",%" PRId64 "\t", *(int64_t *) data, *(int64_t *) (data + TSDB_KEYSIZE + 1));
break;
case TSDB_DATA_TYPE_FLOAT:printf("%lld,%d\t", *(int64_t *) data, *(float *) (data + TSDB_KEYSIZE + 1));
case TSDB_DATA_TYPE_FLOAT:printf("%" PRId64 ",%f\t", *(int64_t *) data, *(float *) (data + TSDB_KEYSIZE + 1));
break;
case TSDB_DATA_TYPE_DOUBLE:printf("%lld,%d\t", *(int64_t *) data, *(double *) (data + TSDB_KEYSIZE + 1));
case TSDB_DATA_TYPE_DOUBLE:printf("%" PRId64 ",%f\t", *(int64_t *) data, *(double *) (data + TSDB_KEYSIZE + 1));
break;
case TSDB_DATA_TYPE_BINARY:printf("%lld,%s\t", *(int64_t *) data, (data + TSDB_KEYSIZE + 1));
case TSDB_DATA_TYPE_BINARY:printf("%" PRId64 ",%s\t", *(int64_t *) data, (data + TSDB_KEYSIZE + 1));
break;
case TSDB_DATA_TYPE_INT:
default:printf("%lld,%d\t", *(int64_t *) data, *(int32_t *) (data + TSDB_KEYSIZE + 1));
default:printf("%" PRId64 ",%d\t", *(int64_t *) data, *(int32_t *) (data + TSDB_KEYSIZE + 1));
break;
}
} else if (param->functionId == TSDB_FUNC_AVG) {
printf("%f,%lld\t", *(double *) data, *(int64_t *) (data + sizeof(double) + 1));
printf("%f,%" PRId64 "\t", *(double *) data, *(int64_t *) (data + sizeof(double) + 1));
} else {
// functionId == TSDB_FUNC_MAX_DST | TSDB_FUNC_TAG
switch (param->type) {
@ -1420,13 +1426,13 @@ static void printBinaryDataEx(char *data, int32_t len, SSrcColumnInfo *param) {
break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
printf("%lld\t", *(int64_t *)data);
printf("%" PRId64 "\t", *(int64_t *)data);
break;
case TSDB_DATA_TYPE_FLOAT:
printf("%d\t", *(float *)data);
printf("%f\t", *(float *)data);
break;
case TSDB_DATA_TYPE_DOUBLE:
printf("%d\t", *(double *)data);
printf("%f\t", *(double *)data);
break;
case TSDB_DATA_TYPE_BINARY:
printf("%s\t", data);
@ -1434,7 +1440,7 @@ static void printBinaryDataEx(char *data, int32_t len, SSrcColumnInfo *param) {
case TSDB_DATA_TYPE_INT:
default:
printf("%d\t", *(double *)data);
printf("%f\t", *(double *)data);
break;
}
}
@ -1450,7 +1456,7 @@ void tColModelDisplay(tColModel *pModel, void *pData, int32_t numOfRows, int32_t
switch (type) {
case TSDB_DATA_TYPE_BIGINT:
printf("%lld\t", *(int64_t *)val);
printf("%" PRId64 "\t", *(int64_t *)val);
break;
case TSDB_DATA_TYPE_INT:
printf("%d\t", *(int32_t *)val);
@ -1468,7 +1474,7 @@ void tColModelDisplay(tColModel *pModel, void *pData, int32_t numOfRows, int32_t
printf("%lf\t", *(double *)val);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
printf("%lld\t", *(int64_t *)val);
printf("%" PRId64 "\t", *(int64_t *)val);
break;
case TSDB_DATA_TYPE_TINYINT:
printf("%d\t", *(int8_t *)val);
@ -1501,7 +1507,7 @@ void tColModelDisplayEx(tColModel *pModel, void *pData, int32_t numOfRows, int32
switch (pModel->pFields[j].type) {
case TSDB_DATA_TYPE_BIGINT:
printf("%lld\t", *(int64_t *)val);
printf("%" PRId64 "\t", *(int64_t *)val);
break;
case TSDB_DATA_TYPE_INT:
printf("%d\t", *(int32_t *)val);
@ -1519,7 +1525,7 @@ void tColModelDisplayEx(tColModel *pModel, void *pData, int32_t numOfRows, int32
printf("%lf\t", *(double *)val);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
printf("%lld\t", *(int64_t *)val);
printf("%" PRId64 "\t", *(int64_t *)val);
break;
case TSDB_DATA_TYPE_TINYINT:
printf("%d\t", *(int8_t *)val);

View File

@ -124,6 +124,7 @@ int tsMgmtEqualVnodeNum = 0;
int tsEnableHttpModule = 1;
int tsEnableMonitorModule = 1;
int tsRestRowLimit = 10240;
int tsMaxSQLStringLen = TSDB_MAX_SQL_LEN;
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
@ -653,7 +654,11 @@ static void doInitGlobalConfig() {
tsInitConfigOption(cfg++, "compressMsgSize", &tsCompressMsgSize, TSDB_CFG_VTYPE_INT,
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW,
-1, 10000000, 0, TSDB_CFG_UTYPE_NONE);
tsInitConfigOption(cfg++, "maxSQLLength", &tsMaxSQLStringLen, TSDB_CFG_VTYPE_INT,
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW,
TSDB_MAX_SQL_LEN, TSDB_MAX_ALLOWED_SQL_LEN, 0, TSDB_CFG_UTYPE_BYTE);
// locale & charset
tsInitConfigOption(cfg++, "timezone", tsTimezone, TSDB_CFG_VTYPE_STRING,
TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT,

View File

@ -12,7 +12,7 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <inttypes.h>
#include "os.h"
#include "taosmsg.h"
@ -447,7 +447,7 @@ void tHistogramPrint(SHistogramInfo* pHisto) {
printf("total entries: %d, elements: %d\n", pHisto->numOfEntries, pHisto->numOfElems);
#if defined(USE_ARRAYLIST)
for (int32_t i = 0; i < pHisto->numOfEntries; ++i) {
printf("%d: (%f, %lld)\n", i + 1, pHisto->elems[i].val, pHisto->elems[i].num);
printf("%d: (%f, %" PRId64 ")\n", i + 1, pHisto->elems[i].val, pHisto->elems[i].num);
}
#else
tSkipListNode* pNode = pHisto->pList->pHead.pForward[0];

View File

@ -12,7 +12,7 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <inttypes.h>
#include <float.h>
#include <math.h>
#include <stdbool.h>
@ -570,7 +570,7 @@ int32_t tSkipListIterateList(tSkipList *pSkipList, tSkipListNode ***pRes, bool (
char* tmp = realloc((*pRes), num * POINTER_BYTES);
assert(tmp != NULL);
*pRes = tmp;
*pRes = (tSkipListNode**)tmp;
}
return num;
@ -688,7 +688,7 @@ void tSkipListPrint(tSkipList *pSkipList, int16_t nlevel) {
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_BIGINT:
fprintf(stdout, "%d: %lld \n", id++, p->key.i64Key);
fprintf(stdout, "%d: %" PRId64 " \n", id++, p->key.i64Key);
break;
case TSDB_DATA_TYPE_BINARY:
fprintf(stdout, "%d: %s \n", id++, p->key.pz);

View File

@ -12,11 +12,9 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <inttypes.h>
#include "os.h"
#include "tstrbuild.h"
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
void taosStringBuilderEnsureCapacity(SStringBuilder* sb, size_t size) {
size += sb->pos;
@ -72,7 +70,7 @@ void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendSt
void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) {
char buf[64];
size_t len = sprintf(buf, "%lld", v);
size_t len = sprintf(buf, "%" PRId64, v);
taosStringBuilderAppendStringLen(sb, buf, len);
}

View File

@ -12,7 +12,7 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <inttypes.h>
#include "os.h"
#include "taos.h"
#include "tsdb.h"
@ -213,7 +213,7 @@ int32_t tVariantToString(tVariant *pVar, char *dst) {
return sprintf(dst, "%d", (int32_t)pVar->i64Key);
case TSDB_DATA_TYPE_BIGINT:
return sprintf(dst, "%lld", pVar->i64Key);
return sprintf(dst, "%" PRId64, pVar->i64Key);
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_DOUBLE:
@ -224,6 +224,7 @@ int32_t tVariantToString(tVariant *pVar, char *dst) {
}
}
#if 0
static int32_t doConvertToInteger(tVariant *pVariant, char *pDest, int32_t type, bool releaseVariantPtr) {
if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
setNull(pDest, type, tDataTypeDesc[type].nSize);
@ -337,7 +338,7 @@ static int32_t doConvertToInteger(tVariant *pVariant, char *pDest, int32_t type,
return 0;
}
#endif
static FORCE_INLINE int32_t convertToBoolImpl(char *pStr, int32_t len) {
if ((strncasecmp(pStr, "true", len) == 0) && (len == 4)) {
return TSDB_TRUE;
@ -386,7 +387,7 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
} else {
if (pVariant->nType >= TSDB_DATA_TYPE_TINYINT && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
sprintf(pBuf == NULL ? *pDest : pBuf, "%lld", pVariant->i64Key);
sprintf(pBuf == NULL ? *pDest : pBuf, "%" PRId64, pVariant->i64Key);
} else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) {
sprintf(pBuf == NULL ? *pDest : pBuf, "%lf", pVariant->dKey);
} else if (pVariant->nType == TSDB_DATA_TYPE_BOOL) {
@ -411,7 +412,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
int32_t nLen = 0;
if (pVariant->nType >= TSDB_DATA_TYPE_TINYINT && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
nLen = sprintf(pDst, "%lld", pVariant->i64Key);
nLen = sprintf(pDst, "%" PRId64, pVariant->i64Key);
} else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) {
nLen = sprintf(pDst, "%lf", pVariant->dKey);
} else if (pVariant->nType == TSDB_DATA_TYPE_BINARY) {
@ -437,7 +438,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) {
char* tmp = realloc(pVariant->wpz, (*pDestSize + 1)*TSDB_NCHAR_SIZE);
assert(tmp != NULL);
pVariant->wpz = tmp;
pVariant->wpz = (wchar_t *)tmp;
} else {
taosMbsToUcs4(pDst, nLen, *pDest, (nLen + 1) * TSDB_NCHAR_SIZE);
}