Merge pull request #4350 from taosdata/release/s106

Release/s106
This commit is contained in:
huili 2020-11-26 16:30:09 +08:00 committed by GitHub
commit ee8755e509
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
520 changed files with 39516 additions and 17482 deletions

6
.gitmodules vendored
View File

@ -4,3 +4,9 @@
[submodule "src/connector/grafanaplugin"]
path = src/connector/grafanaplugin
url = https://github.com/taosdata/grafanaplugin
[submodule "tests/examples/rust"]
path = tests/examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
[submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension
url = https://github.com/huskar-t/hivemq-tdengine-extension.git

128
Jenkinsfile vendored
View File

@ -27,6 +27,7 @@ pipeline {
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
#./test-all.sh smoke
./test-all.sh pytest
@ -79,7 +80,20 @@ pipeline {
cmake .. > /dev/null
make > /dev/null
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./handle_crash_gen_val_log.sh
'''
}
sh '''
date
cd ${WKC}/tests
./test-all.sh b2
@ -124,14 +138,33 @@ pipeline {
sh'''
cd ${WORKSPACE}
git checkout develop
cd tests/gotest
bash batchtest.sh
cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single >/dev/null
java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/gotest
bash batchtest.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single >/dev/null
java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
'''
}
}
}
@ -139,5 +172,82 @@ pipeline {
}
}
post {
success {
emailext (
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
}
}

View File

@ -61,7 +61,7 @@ The use of each configuration item is:
* **port**: This is the `http` service port which enables other application to manage rules by `restful API`.
* **database**: rules are stored in a `sqlite` database, this is the path of the database file (if the file does not exist, the alert application creates it automatically).
* **tdengine**: connection string of `TDEngine` server, note the database name should be put in the `sql` field of a rule in most cases, thus it should NOT be included in the string.
* **tdengine**: connection string of `TDEngine` server (please refer the documentation of GO connector for the detailed format of this string), note the database name should be put in the `sql` field of a rule in most cases, thus it should NOT be included in the string.
* **log > level**: log level, could be `production` or `debug`.
* **log > path**: log output file path.
* **receivers > alertManager**: the alert application pushes alerts to `AlertManager` at this URL.

View File

@ -58,7 +58,7 @@ $ go build
* **port**:报警监测程序支持使用 `restful API` 对规则进行管理,这个参数用于配置 `http` 服务的侦听端口。
* **database**:报警监测程序将规则保存到了一个 `sqlite` 数据库中,这个参数用于指定数据库文件的路径(不需要提前创建这个文件,如果它不存在,程序会自动创建它)。
* **tdengine**`TDEngine` 的连接字符串,一般来说,数据库名应该在报警规则的 `sql` 语句中指定,所以这个字符串中 **不** 应包含数据库名。
* **tdengine**`TDEngine` 的连接字符串(这个字符串的详细格式说明请见 GO 连接器的文档),一般来说,数据库名应该在报警规则的 `sql` 语句中指定,所以这个字符串中 **不** 应包含数据库名。
* **log > level**:日志的记录级别,可选 `production``debug`
* **log > path**:日志文件的路径。
* **receivers > alertManager**:报警监测程序会将报警推送到 `AlertManager`,在这里指定 `AlertManager` 的接收地址。

View File

@ -84,6 +84,7 @@ func (alert *Alert) doRefresh(firing bool, rule *Rule) bool {
case firing && (alert.State == AlertStateWaiting):
alert.StartsAt = time.Now()
alert.EndsAt = time.Time{}
if rule.For.Nanoseconds() > 0 {
alert.State = AlertStatePending
return false
@ -95,6 +96,7 @@ func (alert *Alert) doRefresh(firing bool, rule *Rule) bool {
return false
}
alert.StartsAt = alert.StartsAt.Add(rule.For.Duration)
alert.EndsAt = time.Time{}
alert.State = AlertStateFiring
case firing && (alert.State == AlertStateFiring):

View File

@ -13,6 +13,7 @@ ELSEIF (TD_WINDOWS)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/nodejs DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/C\# DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .)
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.6.0")
SET(TD_VER_NUMBER "2.0.8.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -87,6 +87,7 @@ TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修
- httpPort: RESTful服务使用的端口号所有的HTTP请求TCP都需要向该接口发起查询/写入请求。
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
- logDir日志文件目录客户端和服务器的运行日志文件将写入该目录。默认值/var/log/taos。
- tempDir临时文件目录客户端和服务器的临时文件主要是查询时用于保存中间结果的问题将写入该目录。 默认值Linux下为 /tmp/Windows下为环境变量 tmp 或 temp 指向的目录。
- arbitrator系统中裁决器的end point, 缺省值为空。
- rolednode的可选角色。0-any; 既可作为mnode也可分配vnode1-mgmt;只能作为mnode不能分配vnode2-dnode;不能作为mnode只能分配vnode
- debugFlag运行日志开关。131输出错误和警告日志135 输出错误、警告和调试日志143 输出错误、警告、调试和跟踪日志。默认值131或135不同模块有不同的默认值

View File

@ -34,7 +34,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台
- [SQL写入](https://www.taosdata.com/cn/documentation20/insert/#SQL写入)使用SQL insert命令向一张或多张表写入单条或多条记录
- [Telegraf写入](https://www.taosdata.com/cn/documentation20/insert/#Telegraf直接写入)配置Telegraf, 不用任何代码,将采集数据直接写入
- [Prometheus写入](https://www.taosdata.com/cn/documentation20/insert/#Prometheus直接写入)配置Prometheus, 不用任何代码,将数据直接写入
- [EMQ X Broker](https://www.taosdata.com/cn/documentation20/insert/#EMQ-X-Broker直接写入)配置EMQ X不用任何代码就可将MQTT数据直接写入
- [EMQ X Broker](https://www.taosdata.com/cn/documentation20/insert/#EMQ-X-Broker直接写入)配置EMQ X不用任何代码就可将 MQTT 数据直接写入
- [HiveMQ Broker](https://www.taosdata.com/cn/documentation20/insert/#HiveMQ-Broker直接写入):通过 HiveMQ Extension不用任何代码就可将 MQTT 数据直接写入
## [高效查询数据](https://www.taosdata.com/cn/documentation20/queries)

View File

@ -10,7 +10,7 @@ TDengine的模块之一是时序数据库。但除此之外为减少研发的
* __硬件或云服务成本降至1/5__由于超强性能计算资源不到通用大数据方案的1/5通过列式存储和先进的压缩算法存储空间不到通用数据库的1/10。
* __全栈时序数据处理引擎__将数据库、消息队列、缓存、流式计算等功能融为一体应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件大幅降低应用开发和维护的复杂度成本。
* __强大的分析功能__无论是十年前还是一秒钟前的数据指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, Matlab随时进行。
* __与第三方工具无缝连接__不用一行代码即可与Telegraf, Grafana, EMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
* __与第三方工具无缝连接__不用一行代码即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
* __零运维成本、零学习成本__安装集群简单快捷无需分库分表实时备份。类似标准SQL支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似零学习成本。
采用TDengine可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是因充分利用了物联网时序数据的特点它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。

View File

@ -90,7 +90,7 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
```mysql
ALTER DATABASE db_name REPLICA 2;
```
REPLICA参数是指修改数据库副本数取值范围[1, 3]。在集群中使用副本数必须小于dnode的数目。
REPLICA参数是指修改数据库副本数取值范围[1, 3]。在集群中使用,副本数必须小于或等于dnode的数目。
```mysql
ALTER DATABASE db_name KEEP 365;
@ -844,7 +844,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
- **PERCENTILE**
```mysql
SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause];
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
```
功能说明:统计表中某列的值百分比分位数。
返回结果数据类型: 双精度浮点数Double。
@ -1016,9 +1016,9 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER
```
## TAOS SQL 边界限制
- 数据库名最大长度为33
- 表名最大长度为193每行数据最大长度16k个字符
- 列名最大长度为65最多允许1024列最少需要2列第一列必须是时间戳
- 数据库名最大长度为32
- 表名最大长度为192每行数据最大长度16k个字符
- 列名最大长度为64最多允许1024列最少需要2列第一列必须是时间戳
- 标签最多允许128个可以0个标签总长度不超过16k个字符
- SQL语句最大长度65480个字符但可通过系统配置参数maxSQLLength修改最长可配置为1M
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制

View File

@ -35,7 +35,7 @@ TDengine相对于通用数据库有超高的压缩比在绝大多数场景
Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
```
示例1000万台智能电表每台电表每15分钟采集一次数据每次采集的数据128字节那么一年的原始数据量是10000000\*128\*24\*60/15*365 = 44851T。TDengine大概需要消耗44851/5=8970T, 8.9P空间。
示例1000万台智能电表每台电表每15分钟采集一次数据每次采集的数据128字节那么一年的原始数据量是10000000\*128\*24\*60/15*365 = 44.8512T。TDengine大概需要消耗44.851/5=8.97024T空间。
用户可以通过参数keep设置数据在磁盘中的最大保存时长。为进一步减少存储成本TDengine还提供多级存储最冷的数据可以存放在最廉价的存储介质上应用的访问不用做任何调整只是读取速度降低了。
@ -253,7 +253,7 @@ ALTER USER <user_name> PASS <'password'>;
修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
```
ALTER USER <user_name> PRIVILEDGE <super|write|read>;
ALTER USER <user_name> PRIVILEGE <super|write|read>;
```
修改用户权限为super/write/read不需要添加单引号

View File

@ -4,16 +4,99 @@
### 物联网典型场景
在典型的物联网、车联网、运维监测场景中,往往有多种不同类型的数据采集设备,采集一个到多个不同的物理量。而同一种采集设备类型,往往又有多个具体的采集设备分布在不同的地点。大数据处理系统就是要将各种采集的数据汇总,然后进行计算和分析。对于同一类设备,其采集的数据都是很规则的。以智能电表为例,假设每个智能电表采集电流、电压、相位三个量,其采集的数据类似如下的表格:
| Device ID | Time Stamp | current | voltage | phase | location | groupId |
| :-------: | :-----------: | :-----: | :-----: | :---: | :--------------: | :-----: |
| d1001 | 1538548685000 | 10.3 | 219 | 0.31 | Beijing.Chaoyang | 2 |
| d1002 | 1538548684000 | 10.2 | 220 | 0.23 | Beijing.Chaoyang | 3 |
| d1003 | 1538548686500 | 11.5 | 221 | 0.35 | Beijing.Haidian | 3 |
| d1004 | 1538548685500 | 13.4 | 223 | 0.29 | Beijing.Haidian | 2 |
| d1001 | 1538548695000 | 12.6 | 218 | 0.33 | Beijing.Chaoyang | 2 |
| d1004 | 1538548696600 | 11.8 | 221 | 0.28 | Beijing.Haidian | 2 |
| d1002 | 1538548696650 | 10.3 | 218 | 0.25 | Beijing.Chaoyang | 3 |
| d1001 | 1538548696800 | 12.3 | 221 | 0.31 | Beijing.Chaoyang | 2 |
<figure><table>
<thead><tr>
<th style="text-align:center;">设备ID</th>
<th style="text-align:center;">时间戳</th>
<th style="text-align:center;" colspan="3">采集量</th>
<th style="text-align:center;" colspan="2">标签</th>
</tr>
<tr>
<th style="text-align:center;">Device ID</th>
<th style="text-align:center;">Time Stamp</th>
<th style="text-align:center;">current</th>
<th style="text-align:center;">voltage</th>
<th style="text-align:center;">phase</th>
<th style="text-align:center;">location</th>
<th style="text-align:center;">groupId</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align:center;">d1001</td>
<td style="text-align:center;">1538548685000</td>
<td style="text-align:center;">10.3</td>
<td style="text-align:center;">219</td>
<td style="text-align:center;">0.31</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1002</td>
<td style="text-align:center;">1538548684000</td>
<td style="text-align:center;">10.2</td>
<td style="text-align:center;">220</td>
<td style="text-align:center;">0.23</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">3</td>
</tr>
<tr>
<td style="text-align:center;">d1003</td>
<td style="text-align:center;">1538548686500</td>
<td style="text-align:center;">11.5</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.35</td>
<td style="text-align:center;">Beijing.Haidian</td>
<td style="text-align:center;">3</td>
</tr>
<tr>
<td style="text-align:center;">d1004</td>
<td style="text-align:center;">1538548685500</td>
<td style="text-align:center;">13.4</td>
<td style="text-align:center;">223</td>
<td style="text-align:center;">0.29</td>
<td style="text-align:center;">Beijing.Haidian</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1001</td>
<td style="text-align:center;">1538548695000</td>
<td style="text-align:center;">12.6</td>
<td style="text-align:center;">218</td>
<td style="text-align:center;">0.33</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1004</td>
<td style="text-align:center;">1538548696600</td>
<td style="text-align:center;">11.8</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.28</td>
<td style="text-align:center;">Beijing.Haidian</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1002</td>
<td style="text-align:center;">1538548696650</td>
<td style="text-align:center;">10.3</td>
<td style="text-align:center;">218</td>
<td style="text-align:center;">0.25</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">3</td>
</tr>
<tr>
<td style="text-align:center;">d1001</td>
<td style="text-align:center;">1538548696800</td>
<td style="text-align:center;">12.3</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.31</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">2</td>
</tr>
</tbody>
</table></figure>
<center> 表1智能电表数据示例</center>
@ -221,7 +304,7 @@ TDengine采用时间驱动缓存管理策略First-In-First-OutFIFO
TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中可以更加快速地响应用户针对最近一条或一批数据的查询分析整体上提供更快的数据库查询响应能力。从这个意义上来说**可通过设置合适的配置参数将TDengine作为数据缓存来使用而不需要再部署Redis或其他额外的缓存系统**可有效地简化系统架构降低运维的成本。需要注意的是TDengine重启以后系统的缓存将被清空之前缓存的数据均会被批量写入磁盘缓存的数据将不会像专门的Key-value缓存系统再将之前缓存的数据重新加载到缓存中。
每个vnode有自己独立的内存而且由多个固定大小的内存块组成不同vnode之间完全隔离。数据写入时类似于日志的写法数据被顺序追加写入内存但每个vnode维护有自己的skip list便于迅速查找。当一以上的内存块写满时启动落盘操作而且后续写的操作在新的内存块进行。这样一个vnode里有一内存块是保留有最近的数据的以达到缓存、快速查找的目的。一个vnode的内存块的个数由配置参数blocks决定内存块的大小由配置参数cache决定。
每个vnode有自己独立的内存而且由多个固定大小的内存块组成不同vnode之间完全隔离。数据写入时类似于日志的写法数据被顺序追加写入内存但每个vnode维护有自己的skip list便于迅速查找。当三分之一以上的内存块写满时启动落盘操作而且后续写的操作在新的内存块进行。这样一个vnode里有三分之一内存块是保留有最近的数据的以达到缓存、快速查找的目的。一个vnode的内存块的个数由配置参数blocks决定内存块的大小由配置参数cache决定。
### 持久化存储
TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久化存储。当vnode中缓存的数据达到一定规模时为了不阻塞后续数据的写入TDengine也会拉起落盘线程将缓存的数据写入持久化存储。TDengine在数据落盘时会打开新的数据库日志文件在落盘成功后则会删除老的数据库日志文件避免日志文件无限制的增长。

View File

@ -616,6 +616,43 @@ HTTP请求URL采用`sqlutc`时返回结果集的时间戳将采用UTC时间
- httpEnableCompress: 是否支持压缩默认不支持目前TDengine仅支持gzip压缩格式
- httpDebugFlag: 日志开关131仅错误和报警信息135调试信息143非常详细的调试信息默认131
## CSharp Connector
在Windows系统上C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作后续版本将提供ORMdapper框架驱动。
#### 安装TDengine客户端
C#连接器需要使用`libtaos.so`和`taos.h`。因此在使用C#连接器之前需在程序运行的Windows环境安装TDengine的Windows客户端以便获得相关驱动文件。
安装完成后,在文件夹`C:/TDengine/examples/C#`中,将会看到两个文件
- TDengineDriver.cs 调用taos.dll文件的Native C方法
- TDengineTest.cs 参考程序示例
在文件夹`C:\Windows\System32`,将会看到`taos.dll`文件
#### 使用方法
- 将C#接口文件TDengineDriver.cs加入到应用程序所在.NET项目中
- 参考TDengineTest.cs来定义数据库连接参数及执行数据插入、查询等操作的方法
- 因为C#接口需要用到`taos.dll`文件,用户可以将`taos.dll`文件加入.NET解决方案中
#### 注意事项
- `taos.dll`文件使用x64平台编译所以.NET项目在生成.exe文件时“解决方案”/“项目”的“平台”请均选择“x64”。
- 此.NET接口目前已经在Visual Studio 2013/2015/2017中验证过其它VS版本尚待验证。
#### 第三方驱动
Maikebing.Data.Taos是一个TDengine的ADO.Net提供器支持linuxwindows。该开发包由热心贡献者`麦壳饼@@maikebing`提供,具体请参考
```
//接口下载
https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos
//用法说明
https://www.taosdata.com/blog/2020/11/02/1901.html
```
## Go Connector

View File

@ -38,9 +38,9 @@
6. 检查防火墙设置确认TCP/UDP 端口6030-6042 是打开的
7. 对于Linux上的JDBCODBC, Python, Go等接口类似连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH*里
7. 对于Linux上的JDBCODBC, Python, Go等接口类似连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里
8. 对于windows上的JDBC, ODBC, Python, Go等连接确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
8. 对于windows上的JDBC, ODBC, Python, Go等连接确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
9. 如果仍不能排除连接故障请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
检查UDP端口连接是否工作`nc -vuz {hostIP} {port} `

View File

@ -1,6 +1,6 @@
# 高效写入数据
TDengine支持多种接口写入数据包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, CSV文件等后续还将提供Kafka, OPC等接口。数据可以单条插入也可以批量插入可以插入一个数据采集点的数据也可以同时插入多个数据采集点的数据。支持多线程插入支持时间乱序数据插入也支持历史数据插入。
TDengine支持多种接口写入数据包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV文件等后续还将提供Kafka, OPC等接口。数据可以单条插入也可以批量插入可以插入一个数据采集点的数据也可以同时插入多个数据采集点的数据。支持多线程插入支持时间乱序数据插入也支持历史数据插入。
## SQL写入
@ -218,7 +218,15 @@ use telegraf;
select * from cpu;
```
## EMQ X Broker直接写入
MQTT是一流行的物联网数据传输协议[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件无需任何代码只需要在EMQ Dashboard里使用“规则”做简单配置即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDEngine也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考 [EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)。
MQTT是一流行的物联网数据传输协议TDengine 可以很方便的接入 MQTT Broker 接受的数据并写入到 TDengine。
## EMQ Broker 直接写入
[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件无需任何代码只需要在EMQ Dashboard里使用“规则”做简单配置即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考 [EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)。
## HiveMQ Broker 直接写入
[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理主要用于企业和新兴的机器到机器M2M通讯和内部传输满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)。

View File

@ -20,6 +20,9 @@
# data file's directory
# dataDir /var/lib/taos
# temporary file's directory
# tempDir /tmp/
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
# arbitrator arbitrator_hostname:6042
@ -256,3 +259,5 @@
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
# enable/disable telemetry reporting
# telemetryReporting 1

View File

@ -48,6 +48,7 @@ cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_pat
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
@ -58,7 +59,7 @@ cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_pat
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||:
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
chmod 755 ${pkg_dir}/DEBIAN/*

View File

@ -156,9 +156,15 @@ build_time=$(date +"%F %R")
# get commint id from git
gitinfo=$(git rev-parse --verify HEAD)
enterprise_dir="${top_dir}/../enterprise"
cd ${enterprise_dir}
gitinfoOfInternal=$(git rev-parse --verify HEAD)
if [[ "$verMode" == "cluster" ]]; then
enterprise_dir="${top_dir}/../enterprise"
cd ${enterprise_dir}
gitinfoOfInternal=$(git rev-parse --verify HEAD)
else
gitinfoOfInternal=NULL
fi
cd ${curr_dir}
# 2. cmake executable file
@ -193,23 +199,35 @@ cd ${curr_dir}
# 3. Call the corresponding script for packaging
if [ "$osType" != "Darwin" ]; then
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir}
ret='0'
command -v dpkg >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir}
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/deb
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
else
echo "==========dpkg command not exist, so not release deb package!!!"
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/deb
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
echo "====do rpm package for the centos system===="
output_dir="${top_dir}/rpms"
if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir}
ret='0'
command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
echo "====do rpm package for the centos system===="
output_dir="${top_dir}/rpms"
if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir}
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/rpm
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
else
echo "==========rpmbuild command not exist, so not release rpm package!!!"
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/rpm
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
fi
echo "====do tar.gz package for all systems===="

View File

@ -2,7 +2,7 @@
%define cfg_install_dir /etc/taos
%define __strip /bin/true
Name: TDengine
Name: tdengine
Version: %{_version}
Release: 3%{?dist}
Summary: tdengine from taosdata
@ -58,6 +58,7 @@ cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/scri
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
@ -65,7 +66,7 @@ cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/conn
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
#Scripts executed before installation
@ -134,6 +135,7 @@ if [ $1 -eq 0 ];then
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
#${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :

21
packaging/tools/get_client.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/bash
#
log_dir=$1
result_file=$2
if [ ! -n "$1" ];then
echo "Pleas input the director of taosdlog."
echo "usage: ./get_client.sh <taosdlog directory> <result file>"
exit 1
else
log_dir=$1
fi
if [ ! -n "$2" ];then
result_file=clientInfo.txt
else
result_file=$2
fi
grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file}

View File

@ -312,7 +312,7 @@ function install_data() {
}
function install_connector() {
${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
${csudo} cp -rf ${script_dir}/connector/ ${install_main_dir}/
}
function install_examples() {

View File

@ -163,7 +163,7 @@ function install_log() {
}
function install_connector() {
${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
${csudo} cp -rf ${script_dir}/connector/ ${install_main_dir}/
}
function install_examples() {

View File

@ -278,11 +278,11 @@ function install_service_on_sysvinit() {
# Install taosd service
if ((${os_type}==1)); then
${csudo} cp -f ${script_dir}/../deb/init.d/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../deb/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
${csudo} cp -f ${script_dir}/../deb/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../deb/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
elif ((${os_type}==2)); then
${csudo} cp -f ${script_dir}/../rpm/init.d/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../rpm/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
${csudo} cp -f ${script_dir}/../rpm/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../rpm/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
fi
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"

View File

@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh"
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
else
@ -110,7 +110,7 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/

View File

@ -77,7 +77,9 @@ if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
fi
else
cp ${bin_files} ${install_dir}/bin
@ -135,7 +137,7 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/

View File

@ -36,7 +36,7 @@ if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
else
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh"
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
@ -124,7 +124,7 @@ cp ${lib_files} ${install_dir}/driver
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector

View File

@ -77,8 +77,10 @@ else
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
fi
chmod a+x ${install_dir}/bin/* || :
@ -156,7 +158,7 @@ cp ${lib_files} ${install_dir}/driver
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector

View File

@ -81,8 +81,10 @@ function install_lib() {
${csudo} ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
}
function install_bin() {
@ -121,8 +123,11 @@ function install_config() {
echo -e -n "${GREEN}Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join${NC}"
echo
echo -e -n "${GREEN}OR leave it blank to build one${NC}:"
read firstEp
while true; do
#read firstEp
if exec < /dev/tty; then
read firstEp;
fi
while true; do
if [ ! -z "$firstEp" ]; then
# check the format of the firstEp
#if [[ $firstEp == $FQDN_PATTERN ]]; then

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18
version: '2.0.6.0'
version: '2.0.8.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.6.0
- usr/lib/libtaos.so.2.0.8.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so

View File

@ -20,6 +20,6 @@ ADD_SUBDIRECTORY(tsdb)
ADD_SUBDIRECTORY(wal)
ADD_SUBDIRECTORY(cq)
ADD_SUBDIRECTORY(dnode)
ADD_SUBDIRECTORY(connector/odbc)
#ADD_SUBDIRECTORY(connector/odbc)
ADD_SUBDIRECTORY(connector/jdbc)

View File

@ -490,7 +490,7 @@ static bool balanceMontiorDropping() {
if (pDnode->status == TAOS_DN_STATUS_OFFLINE) {
if (pDnode->lastAccess + tsOfflineThreshold > tsAccessSquence) continue;
if (strcmp(pDnode->dnodeEp, dnodeGetMnodeMasterEp()) == 0) continue;
if (dnodeIsMasterEp(pDnode->dnodeEp)) continue;
if (mnodeGetDnodesNum() <= 1) continue;
mLInfo("dnode:%d, set to removing state for it offline:%d seconds", pDnode->dnodeId,
@ -571,8 +571,8 @@ static void balanceCheckDnodeAccess() {
if (pDnode->status != TAOS_DN_STATUS_DROPPING && pDnode->status != TAOS_DN_STATUS_OFFLINE) {
pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->offlineReason = TAOS_DN_OFF_STATUS_MSG_TIMEOUT;
mInfo("dnode:%d, set to offline state, access seq:%d, last seq:%d", pDnode->dnodeId, tsAccessSquence,
pDnode->lastAccess);
mInfo("dnode:%d, set to offline state, access seq:%d last seq:%d laststat:%d", pDnode->dnodeId, tsAccessSquence,
pDnode->lastAccess, pDnode->status);
balanceSetVgroupOffline(pDnode);
}
}

View File

@ -62,11 +62,11 @@ typedef struct SLocalReducer {
bool hasUnprocessedRow;
tOrderDescriptor * pDesc;
SColumnModel * resColModel;
SColumnModel* finalModel;
tExtMemBuffer ** pExtMemBuffer; // disk-based buffer
SFillInfo* pFillInfo; // interpolation support structure
char * pFinalRes; // result data after interpo
tFilePage * discardData;
SResultInfo * pResInfo;
char* pFinalRes; // result data after interpo
tFilePage* discardData;
bool discard;
int32_t offset; // limit offset value
bool orderPrjOnSTable; // projection query on stable
@ -75,7 +75,8 @@ typedef struct SLocalReducer {
typedef struct SRetrieveSupport {
tExtMemBuffer ** pExtMemBuffer; // for build loser tree
tOrderDescriptor *pOrderDescriptor;
SColumnModel * pFinalColModel; // colModel for final result
SColumnModel* pFinalColModel; // colModel for final result
SColumnModel* pFFColModel;
int32_t subqueryIndex; // index of current vnode in vnode list
SSqlObj * pParentSql;
tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
@ -83,9 +84,9 @@ typedef struct SRetrieveSupport {
} SRetrieveSupport;
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pDesc,
SColumnModel **pFinalModel, uint32_t nBufferSize);
SColumnModel **pFinalModel, SColumnModel** pFFModel, uint32_t nBufferSize);
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel,
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, SColumnModel* pFFModel,
int32_t numOfVnodes);
int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data,
@ -97,7 +98,7 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
* create local reducer to launch the second-stage reduce process at client site
*/
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalModel, SSqlObj* pSql);
SColumnModel *finalModel, SColumnModel *pFFModel, SSqlObj* pSql);
void tscDestroyLocalReducer(SSqlObj *pSql);

View File

@ -23,7 +23,7 @@ extern "C" {
#include "tscUtil.h"
#include "tsclient.h"
void tscFetchDatablockFromSubquery(SSqlObj* pSql);
void tscFetchDatablockForSubquery(SSqlObj* pSql);
void tscSetupOutputColumnIndex(SSqlObj* pSql);
void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code);
@ -39,7 +39,9 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql);
int32_t tscHandleInsertRetry(SSqlObj* pSql);
void tscBuildResFromSubqueries(SSqlObj *pSql);
TAOS_ROW doSetResultRowData(SSqlObj *pSql, bool finalResult);
TAOS_ROW doSetResultRowData(SSqlObj *pSql);
char *getArithmeticInputSrc(void *param, const char *name, int32_t colId);
#ifdef __cplusplus
}

View File

@ -75,6 +75,7 @@ typedef struct SJoinSupporter {
SArray* exprList;
SFieldInfo fieldsInfo;
STagCond tagCond;
SSqlGroupbyExpr groupInfo; // group by info
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
FILE* f; // temporary file in order to create TSBuf
char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory
@ -82,11 +83,12 @@ typedef struct SJoinSupporter {
char* pIdTagList; // result of first stage tags
int32_t totalLen;
int32_t num;
SArray* pVgroupTables;
} SJoinSupporter;
typedef struct SVgroupTableInfo {
SCMVgroupInfo vgInfo;
SArray* itemList; //SArray<STableIdInfo>
SVgroupInfo vgInfo;
SArray* itemList; //SArray<STableIdInfo>
} SVgroupTableInfo;
static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex) {
@ -123,6 +125,7 @@ int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t
*/
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
@ -156,7 +159,7 @@ SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t ind
TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index);
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo);
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index);
void tscFieldInfoClear(SFieldInfo* pFieldInfo);
@ -165,15 +168,15 @@ static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQue
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
int32_t tscGetResRowLength(SArray* pExprList);
SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t interSize, bool isTagCol);
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol);
SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t interSize, bool isTagCol);
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol);
SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
int16_t size);
@ -215,7 +218,7 @@ SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex);
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache);
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols);
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo);
int32_t tscAddSubqueryInfo(SSqlCmd *pCmd);
@ -224,6 +227,9 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo);
void tscClearSubqueryInfo(SSqlCmd* pCmd);
void tscFreeVgroupTableInfo(SArray* pVgroupTables);
SArray* tscVgroupTableInfoClone(SArray* pVgroupTables);
void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index);
void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo);
int tscGetSTableVgroupInfo(SSqlObj* pSql, int32_t clauseIndex);
int tscGetTableMeta(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo);
@ -234,7 +240,7 @@ void tscDoQuery(SSqlObj* pSql);
SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo);
void* tscVgroupInfoClear(SVgroupsInfo *pInfo);
void tscSCMVgroupInfoCopy(SCMVgroupInfo* dst, const SCMVgroupInfo* src);
void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src);
/**
* The create object function must be successful expect for the out of memory issue.
*
@ -262,6 +268,7 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t sub
void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex);
int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid);
int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId);
void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex);
@ -275,6 +282,7 @@ int tscSetMgmtEpSetFromCfg(const char *first, const char *second);
bool tscSetSqlOwner(SSqlObj* pSql);
void tscClearSqlOwner(SSqlObj* pSql);
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize);
void* malloc_throw(size_t size);
void* calloc_throw(size_t nmemb, size_t size);

View File

@ -77,7 +77,7 @@ SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
* @param colId
* @return
*/
SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
/**
* check if the schema is valid or not, including following aspects:
@ -107,9 +107,6 @@ SSchema tscGetTbnameColumnSchema();
*/
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size);
//todo tags value as well as the table id structure needs refactor
char *tsGetTagsValue(STableMeta *pMeta);
#ifdef __cplusplus
}
#endif

View File

@ -30,6 +30,7 @@ extern "C" {
#include "tsqlfunction.h"
#include "tutil.h"
#include "tcache.h"
#include "tref.h"
#include "qExecutor.h"
#include "qSqlparser.h"
@ -89,12 +90,12 @@ typedef struct STableComInfo {
int32_t rowSize;
} STableComInfo;
typedef struct SCMCorVgroupInfo {
int32_t version;
int8_t inUse;
int8_t numOfEps;
SEpAddr1 epAddr[TSDB_MAX_REPLICA];
} SCMCorVgroupInfo;
typedef struct SCorVgroupInfo {
int32_t version;
int8_t inUse;
int8_t numOfEps;
SEpAddr1 epAddr[TSDB_MAX_REPLICA];
} SCorVgroupInfo;
typedef struct STableMeta {
STableComInfo tableInfo;
@ -102,8 +103,8 @@ typedef struct STableMeta {
int16_t sversion;
int16_t tversion;
char sTableId[TSDB_TABLE_FNAME_LEN];
SCMVgroupInfo vgroupInfo;
SCMCorVgroupInfo corVgroupInfo;
SVgroupInfo vgroupInfo;
SCorVgroupInfo corVgroupInfo;
STableId id;
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
} STableMeta;
@ -127,7 +128,7 @@ typedef struct STableMetaInfo {
typedef struct SSqlExpr {
char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
SColIndex colInfo;
int64_t uid; // refactor use the pointer
uint64_t uid; // refactor use the pointer
int16_t functionId; // function id in aAgg array
int16_t resType; // return value type
int16_t resBytes; // length of return value
@ -135,6 +136,7 @@ typedef struct SSqlExpr {
int16_t numOfParams; // argument value of each function
tVariant param[3]; // parameters are not more than 3
int32_t offset; // sub result column value of arithmetic expression.
int16_t resColId; // result column id
} SSqlExpr;
typedef struct SColumnIndex {
@ -250,6 +252,7 @@ typedef struct SQueryInfo {
int64_t clauseLimit; // limit for current sub clause
int64_t prjOffset; // offset value in the original sql expression, only applied at client side
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
int16_t resColumnId; // result column id
} SQueryInfo;
typedef struct {
@ -290,32 +293,32 @@ typedef struct SResRec {
} SResRec;
typedef struct {
int64_t numOfRows; // num of results in current retrieved
int64_t numOfRowsGroup; // num of results of current group
int64_t numOfTotal; // num of total results
int64_t numOfClauseTotal; // num of total result in current subclause
char * pRsp;
int32_t rspType;
int32_t rspLen;
uint64_t qhandle;
int64_t uid;
int64_t useconds;
int64_t offset; // offset value from vnode during projection query of stable
int32_t row;
int16_t numOfCols;
int16_t precision;
bool completed;
int32_t code;
int32_t numOfGroups;
SResRec * pGroupRec;
char * data;
TAOS_ROW tsrow;
int32_t* length; // length for each field for current row
char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t)
SColumnIndex * pColumnIndex;
SArithmeticSupport* pArithSup; // support the arithmetic expression calculation on agg functions
int32_t numOfRows; // num of results in current retrieval
int64_t numOfRowsGroup; // num of results of current group
int64_t numOfTotal; // num of total results
int64_t numOfClauseTotal; // num of total result in current subclause
char * pRsp;
int32_t rspType;
int32_t rspLen;
uint64_t qhandle;
int64_t useconds;
int64_t offset; // offset value from vnode during projection query of stable
int32_t row;
int16_t numOfCols;
int16_t precision;
bool completed;
int32_t code;
int32_t numOfGroups;
SResRec * pGroupRec;
char * data;
TAOS_ROW tsrow;
TAOS_ROW urow;
int32_t* length; // length for each field for current row
char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t)
SColumnIndex* pColumnIndex;
struct SLocalReducer *pLocalReducer;
SArithmeticSupport* pArithSup; // support the arithmetic expression calculation on agg functions
struct SLocalReducer* pLocalReducer;
} SSqlRes;
typedef struct STscObj {
@ -329,6 +332,7 @@ typedef struct STscObj {
char writeAuth : 1;
char superAuth : 1;
uint32_t connId;
uint64_t rid; // ref ID returned by taosAddRef
struct SSqlObj * pHb;
struct SSqlObj * sqlList;
struct SSqlStream *streamList;
@ -338,16 +342,16 @@ typedef struct STscObj {
} STscObj;
typedef struct SSubqueryState {
int32_t numOfRemain; // the number of remain unfinished subquery
int32_t numOfSub; // the number of total sub-queries
uint64_t numOfRetrievedRows; // total number of points in this query
int32_t numOfRemain; // the number of remain unfinished subquery
int32_t numOfSub; // the number of total sub-queries
uint64_t numOfRetrievedRows; // total number of points in this query
} SSubqueryState;
typedef struct SSqlObj {
void *signature;
pthread_t owner; // owner of sql object, by which it is executed
STscObj *pTscObj;
void *pRpcCtx;
int64_t rpcRid;
void (*fp)();
void (*fetchFp)();
void *param;
@ -421,6 +425,7 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache);
@ -430,14 +435,6 @@ void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache);
*/
void tscFreeSqlResult(SSqlObj *pSql);
/**
* only free part of resources allocated during query.
* TODO remove it later
* Note: this function is multi-thread safe.
* @param pObj
*/
void tscPartiallyFreeSqlObj(SSqlObj *pSql);
/**
* free sql object, release allocated resource
* @param pObj
@ -446,7 +443,7 @@ void tscFreeSqlObj(SSqlObj *pSql);
void tscFreeRegisteredSqlObj(void *pSql);
void tscFreeTableMetaHelper(void *pTableMeta);
void tscCloseTscObj(STscObj *pObj);
void tscCloseTscObj(void *pObj);
// todo move to taos? or create a new file: taos_internal.h
TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
@ -468,17 +465,17 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo);
static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) {
static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex, int32_t offset) {
SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pFieldInfo->internalField, columnIndex);
assert(pInfo->pSqlExpr != NULL);
int32_t type = pInfo->pSqlExpr->resType;
int32_t bytes = pInfo->pSqlExpr->resBytes;
int32_t type = pInfo->field.type;
int32_t bytes = pInfo->field.bytes;
char* pData = pRes->data + (int32_t)(pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row);
char* pData = pRes->data + (int32_t)(offset * pRes->numOfRows + bytes * pRes->row);
UNUSED(pData);
// user defined constant value output columns
if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
// user defined constant value output columns
if (pInfo->pSqlExpr != NULL && TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
pData = pInfo->pSqlExpr->param[1].pz;
pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
@ -516,13 +513,14 @@ extern void * tscQhandle;
extern int tscKeepConn[];
extern int tsInsertHeadSize;
extern int tscNumOfThreads;
extern int tscRefId;
extern SRpcCorEpSet tscMgmtEpSet;
extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);
int32_t tscCompareTidTags(const void* p1, const void* p2);
void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables);
int16_t getNewResColId(SQueryInfo* pQueryInfo);
#ifdef __cplusplus
}

View File

@ -129,6 +129,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaData
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp
(JNIEnv *, jobject, jlong, jlong, jobject);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: fetchBlockImp
* Signature: (JJLcom/taosdata/jdbc/TSDBResultSetBlockData;)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp
(JNIEnv *, jobject, jlong, jlong, jobject);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: closeConnectionImp

View File

@ -17,7 +17,6 @@
#include "taos.h"
#include "tlog.h"
#include "tscUtil.h"
#include "tsclient.h"
#include "com_taosdata_jdbc_TSDBJNIConnector.h"
@ -57,6 +56,10 @@ jmethodID g_rowdataSetStringFp;
jmethodID g_rowdataSetTimestampFp;
jmethodID g_rowdataSetByteArrayFp;
jmethodID g_blockdataSetByteArrayFp;
jmethodID g_blockdataSetNumOfRowsFp;
jmethodID g_blockdataSetNumOfColsFp;
#define JNI_SUCCESS 0
#define JNI_TDENGINE_ERROR -1
#define JNI_CONNECTION_NULL -2
@ -66,7 +69,7 @@ jmethodID g_rowdataSetByteArrayFp;
#define JNI_FETCH_END -6
#define JNI_OUT_OF_MEMORY -7
void jniGetGlobalMethod(JNIEnv *env) {
static void jniGetGlobalMethod(JNIEnv *env) {
// make sure init function executed once
switch (atomic_val_compare_exchange_32(&__init, 0, 1)) {
case 0:
@ -114,10 +117,31 @@ void jniGetGlobalMethod(JNIEnv *env) {
g_rowdataSetByteArrayFp = (*env)->GetMethodID(env, g_rowdataClass, "setByteArray", "(I[B)V");
(*env)->DeleteLocalRef(env, rowdataClass);
jclass blockdataClass = (*env)->FindClass(env, "com/taosdata/jdbc/TSDBResultSetBlockData");
jclass g_blockdataClass = (*env)->NewGlobalRef(env, blockdataClass);
g_blockdataSetByteArrayFp = (*env)->GetMethodID(env, g_blockdataClass, "setByteArray", "(II[B)V");
g_blockdataSetNumOfRowsFp = (*env)->GetMethodID(env, g_blockdataClass, "setNumOfRows", "(I)V");
g_blockdataSetNumOfColsFp = (*env)->GetMethodID(env, g_blockdataClass, "setNumOfCols", "(I)V");
(*env)->DeleteLocalRef(env, blockdataClass);
atomic_store_32(&__init, 2);
jniDebug("native method register finished");
}
static int32_t check_for_params(jobject jobj, jlong conn, jlong res) {
if ((TAOS*) conn == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
}
if ((TAOS_RES *) res == NULL) {
jniError("jobj:%p, conn:%p, res is null", jobj, (TAOS*) conn);
return JNI_RESULT_SET_NULL;
}
return JNI_SUCCESS;
}
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setAllocModeImp(JNIEnv *env, jobject jobj, jint jMode,
jstring jPath, jboolean jAutoDump) {
if (jPath != NULL) {
@ -192,39 +216,37 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEnv *env, jobject jobj, jstring jhost,
jint jport, jstring jdbName, jstring juser,
jstring jpass) {
jlong ret = 0;
jlong ret = 0;
const char *host = NULL;
const char *dbname = NULL;
const char *user = NULL;
const char *pass = NULL;
const char *dbname = NULL;
if (jhost != NULL) {
host = (*env)->GetStringUTFChars(env, jhost, NULL);
}
if (jdbName != NULL) {
dbname = (*env)->GetStringUTFChars(env, jdbName, NULL);
}
if (juser != NULL) {
user = (*env)->GetStringUTFChars(env, juser, NULL);
}
if (jpass != NULL) {
pass = (*env)->GetStringUTFChars(env, jpass, NULL);
}
if (user == NULL) {
jniDebug("jobj:%p, user is null, use default user %s", jobj, TSDB_DEFAULT_USER);
jniDebug("jobj:%p, user not specified, use default user %s", jobj, TSDB_DEFAULT_USER);
}
if (pass == NULL) {
jniDebug("jobj:%p, pass is null, use default password", jobj);
jniDebug("jobj:%p, pass not specified, use default password", jobj);
}
/*
* set numOfThreadsPerCore = 0
* means only one thread for client side scheduler
*/
tsNumOfThreadsPerCore = 0.0;
ret = (jlong)taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport);
ret = (jlong) taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport);
if (ret == 0) {
jniError("jobj:%p, conn:%p, connect to database failed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret,
(char *)host, (char *)user, (char *)dbname, (int32_t)jport);
@ -233,10 +255,21 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEn
(char *)host, (char *)user, (char *)dbname, (int32_t)jport);
}
if (host != NULL) (*env)->ReleaseStringUTFChars(env, jhost, host);
if (dbname != NULL) (*env)->ReleaseStringUTFChars(env, jdbName, dbname);
if (user != NULL) (*env)->ReleaseStringUTFChars(env, juser, user);
if (pass != NULL) (*env)->ReleaseStringUTFChars(env, jpass, pass);
if (host != NULL) {
(*env)->ReleaseStringUTFChars(env, jhost, host);
}
if (dbname != NULL) {
(*env)->ReleaseStringUTFChars(env, jdbName, dbname);
}
if (user != NULL) {
(*env)->ReleaseStringUTFChars(env, juser, user);
}
if (pass != NULL) {
(*env)->ReleaseStringUTFChars(env, jpass, pass);
}
return ret;
}
@ -245,64 +278,53 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(
jbyteArray jsql, jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is already closed", jobj);
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
if (jsql == NULL) {
jniError("jobj:%p, conn:%p, sql is null", jobj, tscon);
jniError("jobj:%p, conn:%p, empty sql string", jobj, tscon);
return JNI_SQL_NULL;
}
jsize len = (*env)->GetArrayLength(env, jsql);
char *dst = (char *)calloc(1, sizeof(char) * (len + 1));
if (dst == NULL) {
jniError("jobj:%p, conn:%p, can not alloc memory", jobj, tscon);
char *str = (char *) calloc(1, sizeof(char) * (len + 1));
if (str == NULL) {
jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon);
return JNI_OUT_OF_MEMORY;
}
(*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)dst);
(*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)str);
if ((*env)->ExceptionCheck(env)) {
// todo handle error
}
jniDebug("jobj:%p, conn:%p, sql:%s", jobj, tscon, dst);
SSqlObj *pSql = taos_query(tscon, dst);
SSqlObj *pSql = taos_query(tscon, str);
int32_t code = taos_errno(pSql);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s, msg:%s", jobj, tscon, tstrerror(code), taos_errstr(pSql));
} else {
int32_t affectRows = 0;
if (pSql->cmd.command == TSDB_SQL_INSERT) {
affectRows = taos_affected_rows(pSql);
int32_t affectRows = taos_affected_rows(pSql);
jniDebug("jobj:%p, conn:%p, code:%s, affect rows:%d", jobj, tscon, tstrerror(code), affectRows);
} else {
jniDebug("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
}
}
free(dst);
return (jlong)pSql;
free(str);
return (jlong) pSql;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp(JNIEnv *env, jobject jobj, jlong con, jlong tres) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return (jint)TSDB_CODE_TSC_INVALID_CONNECTION;
int32_t code = check_for_params(jobj, con, tres);
if (code != JNI_SUCCESS) {
return code;
}
if ((void *)tres == NULL) {
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
return JNI_RESULT_SET_NULL;
}
TAOS_RES *pSql = (TAOS_RES *)tres;
return (jint)taos_errno(pSql);
return (jint)taos_errno((TAOS_RES*) tres);
}
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(JNIEnv *env, jobject jobj, jlong tres) {
@ -313,23 +335,16 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(J
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(JNIEnv *env, jobject jobj, jlong con,
jlong tres) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
}
if ((void *)tres == NULL) {
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
return JNI_RESULT_SET_NULL;
int32_t code = check_for_params(jobj, con, tres);
if (code != JNI_SUCCESS) {
return code;
}
SSqlObj *pSql = (TAOS_RES *)tres;
STscObj *pObj = pSql->pTscObj;
if (tscIsUpdateQuery(pSql)) {
jniDebug("jobj:%p, conn:%p, update query, no resultset, %p", jobj, pObj, (void *)tres);
jniDebug("jobj:%p, conn:%p, update query, no resultset, %p", jobj, tscon, (void *)tres);
} else {
jniDebug("jobj:%p, conn:%p, get resultset, %p", jobj, pObj, (void *)tres);
jniDebug("jobj:%p, conn:%p, get resultset, %p", jobj, tscon, (void *)tres);
}
return tres;
@ -337,15 +352,9 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp(JNIEnv *env, jobject jobj, jlong con,
jlong tres) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
}
if ((void *)tres == NULL) {
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
return JNI_RESULT_SET_NULL;
int32_t code = check_for_params(jobj, con, tres);
if (code != JNI_SUCCESS) {
return code;
}
SSqlObj *pSql = (TAOS_RES *)tres;
@ -355,37 +364,27 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp(
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp(JNIEnv *env, jobject jobj, jlong con,
jlong res) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
}
if ((void *)res == NULL) {
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
return JNI_RESULT_SET_NULL;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
taos_free_result((void *)res);
jniDebug("jobj:%p, conn:%p, free resultset:%p", jobj, tscon, (void *)res);
jniDebug("jobj:%p, conn:%p, free resultset:%p", jobj, (TAOS*) con, (void *)res);
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsImp(JNIEnv *env, jobject jobj, jlong con,
jlong res) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
}
if ((void *)res == NULL) {
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
return JNI_RESULT_SET_NULL;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
jint ret = taos_affected_rows((SSqlObj *)res);
jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (void *)con, (void *)res, (int32_t)ret);
jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (TAOS *)con, (TAOS_RES *)res, (int32_t)ret);
return ret;
}
@ -394,27 +393,20 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaData
jlong con, jlong res,
jobject arrayListObj) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
TAOS_RES *result = (TAOS_RES *)res;
if (result == NULL) {
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
return JNI_RESULT_SET_NULL;
}
TAOS_FIELD *fields = taos_fetch_fields(result);
int num_fields = taos_num_fields(result);
// jobject arrayListObj = (*env)->NewObject(env, g_arrayListClass, g_arrayListConstructFp, "");
TAOS_RES* tres = (TAOS_RES*) res;
TAOS_FIELD *fields = taos_fetch_fields(tres);
int32_t num_fields = taos_num_fields(tres);
if (num_fields == 0) {
jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void *)res, num_fields);
jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, tres, num_fields);
return JNI_NUM_OF_FIELDS_0;
} else {
jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void *)res, num_fields);
jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, tres, num_fields);
for (int i = 0; i < num_fields; ++i) {
jobject metadataObj = (*env)->NewObject(env, g_metadataClass, g_metadataConstructFp);
(*env)->SetIntField(env, metadataObj, g_metadataColtypeField, fields[i].type);
@ -457,21 +449,21 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
}
TAOS_FIELD *fields = taos_fetch_fields(result);
int num_fields = taos_num_fields(result);
if (num_fields == 0) {
jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void*)res, num_fields);
int32_t numOfFields = taos_num_fields(result);
if (numOfFields == 0) {
jniError("jobj:%p, conn:%p, resultset:%p, fields size %d", jobj, tscon, (void*)res, numOfFields);
return JNI_NUM_OF_FIELDS_0;
}
TAOS_ROW row = taos_fetch_row(result);
if (row == NULL) {
int tserrno = taos_errno(result);
if (tserrno == 0) {
jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void*)res, num_fields);
int code = taos_errno(result);
if (code == TSDB_CODE_SUCCESS) {
jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void*)res, numOfFields);
return JNI_FETCH_END;
} else {
jniDebug("jobj:%p, conn:%p, interruptted query", jobj, tscon);
jniDebug("jobj:%p, conn:%p, interrupted query", jobj, tscon);
return JNI_RESULT_SET_NULL;
}
}
@ -480,7 +472,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
char tmp[TSDB_MAX_BYTES_PER_ROW] = {0};
for (int i = 0; i < num_fields; i++) {
for (int i = 0; i < numOfFields; i++) {
if (row[i] == NULL) {
continue;
}
@ -534,6 +526,45 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNIEnv *env, jobject jobj, jlong con,
jlong res, jobject rowobj) {
TAOS * tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
TAOS_RES * tres = (TAOS_RES *)res;
TAOS_FIELD *fields = taos_fetch_fields(tres);
int32_t numOfFields = taos_num_fields(tres);
assert(numOfFields > 0);
TAOS_ROW row = NULL;
int32_t numOfRows = taos_fetch_block(tres, &row);
if (numOfRows == 0) {
code = taos_errno(tres);
if (code == JNI_SUCCESS) {
jniDebug("jobj:%p, conn:%p, resultset:%p, numOfFields:%d, no data to retrieve", jobj, tscon, (void *)res,
numOfFields);
return JNI_FETCH_END;
} else {
jniDebug("jobj:%p, conn:%p, query interrupted", jobj, tscon);
return JNI_RESULT_SET_NULL;
}
}
(*env)->CallVoidMethod(env, rowobj, g_blockdataSetNumOfRowsFp, (jint)numOfRows);
(*env)->CallVoidMethod(env, rowobj, g_blockdataSetNumOfColsFp, (jint)numOfFields);
for (int i = 0; i < numOfFields; i++) {
(*env)->CallVoidMethod(env, rowobj, g_blockdataSetByteArrayFp, i, fields[i].bytes * numOfRows,
jniFromNCharToByteArray(env, (char *)row[i], fields[i].bytes * numOfRows));
}
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionImp(JNIEnv *env, jobject jobj,
jlong con) {
TAOS *tscon = (TAOS *)con;
@ -589,7 +620,6 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEn
jniGetGlobalMethod(env);
TAOS_SUB *tsub = (TAOS_SUB *)sub;
TAOS_RES *res = taos_consume(tsub);
if (res == NULL) {
@ -621,16 +651,16 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTab
jsize len = (*env)->GetArrayLength(env, jsql);
char *dst = (char *)calloc(1, sizeof(char) * (len + 1));
(*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)dst);
char *str = (char *)calloc(1, sizeof(char) * (len + 1));
(*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)str);
if ((*env)->ExceptionCheck(env)) {
// todo handle error
}
int code = taos_validate_sql(tscon, dst);
int code = taos_validate_sql(tscon, str);
jniDebug("jobj:%p, conn:%p, code is %d", jobj, tscon, code);
free(dst);
free(str);
return code;
}

View File

@ -91,8 +91,8 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
int32_t sqlLen = (int32_t)strlen(sqlstr);
if (sqlLen > tsMaxSQLStringLen) {
tscError("sql string exceeds max length:%d", tsMaxSQLStringLen);
terrno = TSDB_CODE_TSC_INVALID_SQL;
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_INVALID_SQL);
terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
tscQueueAsyncError(fp, param, terrno);
return;
}
@ -176,7 +176,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
}
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql);
tscFetchDatablockForSubquery(pSql);
} else {
tscProcessSql(pSql);
}
@ -226,7 +226,7 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
// handle the sub queries of join query
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql);
tscFetchDatablockForSubquery(pSql);
} else if (pRes->completed) {
if(pCmd->command == TSDB_SQL_FETCH || (pCmd->command >= TSDB_SQL_SERV_STATUS && pCmd->command <= TSDB_SQL_CURRENT_USER)) {
if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
@ -351,7 +351,7 @@ void tscProcessFetchRow(SSchedMsg *pMsg) {
SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
if (pSup->pSqlExpr != NULL) {
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i);
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, 0);
} else {
// todo add
}
@ -405,7 +405,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlRes *pRes = &pSql->res;
pRes->code = code;
const char* msg = (pCmd->command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
SSqlObj *sub = (SSqlObj*) res;
const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
if (code != TSDB_CODE_SUCCESS) {
tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code));
goto _error;
@ -428,7 +429,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
assert(code == TSDB_CODE_SUCCESS);
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0) && pSql->param != NULL);
// param already freed by other routine and pSql in tscCache when ctrl + c
if (atomic_load_ptr(&pSql->param) == NULL) {
return;
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param;
SSqlObj * pParObj = trs->pParentSql;
@ -437,6 +442,20 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex &&
pTableMetaInfo->vgroupIndex >= 0 && pTableMetaInfo->vgroupList != NULL);
// tscProcessSql can add error into async res
tscProcessSql(pSql);
return;
} else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) {
tscDebug("%p update table meta in local cache, continue to process sql and send corresponding tid_tag query", pSql);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
} else {
assert(code == TSDB_CODE_SUCCESS);
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
// tscProcessSql can add error into async res
tscProcessSql(pSql);
return;
@ -461,7 +480,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
tscResetSqlCmdObj(pCmd, false);
code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
} else if (code != TSDB_CODE_SUCCESS) {

File diff suppressed because it is too large Load Diff

View File

@ -49,82 +49,6 @@ typedef struct SCreateBuilder {
} SCreateBuilder;
static void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength);
static int32_t getToStringLength(const char *pData, int32_t length, int32_t type) {
char buf[512] = {0};
int32_t len = 0;
int32_t MAX_BOOL_TYPE_LENGTH = 5; // max(strlen("true"), strlen("false"));
switch (type) {
case TSDB_DATA_TYPE_BINARY:
return length;
case TSDB_DATA_TYPE_NCHAR:
return length;
case TSDB_DATA_TYPE_DOUBLE: {
double dv = 0;
dv = GET_DOUBLE_VAL(pData);
len = sprintf(buf, "%lf", dv);
if (strncasecmp("nan", buf, 3) == 0) {
len = 4;
}
} break;
case TSDB_DATA_TYPE_FLOAT: {
float fv = 0;
fv = GET_FLOAT_VAL(pData);
len = sprintf(buf, "%f", fv);
if (strncasecmp("nan", buf, 3) == 0) {
len = 4;
}
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
len = sprintf(buf, "%" PRId64, *(int64_t *)pData);
break;
case TSDB_DATA_TYPE_BOOL:
len = MAX_BOOL_TYPE_LENGTH;
break;
default:
len = sprintf(buf, "%d", *(int32_t *)pData);
break;
};
return len;
}
/*
* we need to convert all data into string, so we need to sprintf all kinds of
* non-string data into string, and record its length to get the right
* maximum length. The length may be less or greater than its original binary length:
* For example:
* length((short) 1) == 1, less than sizeof(short)
* length((uint64_t) 123456789011) > 12, greater than sizsof(uint64_t)
*/
static int32_t tscMaxLengthOfTagsFields(SSqlObj *pSql) {
STableMeta *pMeta = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0)->pTableMeta;
if (pMeta->tableType == TSDB_SUPER_TABLE || pMeta->tableType == TSDB_NORMAL_TABLE ||
pMeta->tableType == TSDB_STREAM_TABLE) {
return 0;
}
char * pTagValue = tsGetTagsValue(pMeta);
SSchema *pTagsSchema = tscGetTableTagSchema(pMeta);
int32_t len = getToStringLength(pTagValue, pTagsSchema[0].bytes, pTagsSchema[0].type);
pTagValue += pTagsSchema[0].bytes;
int32_t numOfTags = tscGetNumOfTags(pMeta);
for (int32_t i = 1; i < numOfTags; ++i) {
int32_t tLen = getToStringLength(pTagValue, pTagsSchema[i].bytes, pTagsSchema[i].type);
if (len < tLen) {
len = tLen;
}
pTagValue += pTagsSchema[i].bytes;
}
return len;
}
static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
SSqlRes *pRes = &pSql->res;
@ -186,8 +110,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
return 0;
}
// the following is handle display tags value for meters created according to metric
char *pTagValue = tsGetTagsValue(pMeta);
// the following is handle display tags for table created according to super table
for (int32_t i = numOfRows; i < totalNumOfRows; ++i) {
// field name
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0);
@ -219,8 +142,6 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
char *target = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i;
const char *src = "TAG";
STR_WITH_MAXSIZE_TO_VARSTR(target, src, pField->bytes);
pTagValue += pSchema[i].bytes;
}
return 0;
@ -241,7 +162,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
(TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, (TSDB_COL_NAME_LEN - 1), false);
(TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, -1000, (TSDB_COL_NAME_LEN - 1), false);
rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE);
@ -251,7 +172,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE),
typeColLength, false);
-1000, typeColLength, false);
rowLen += typeColLength + VARSTR_HEADER_SIZE;
@ -261,7 +182,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t),
sizeof(int32_t), false);
-1000, sizeof(int32_t), false);
rowLen += sizeof(int32_t);
@ -271,7 +192,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE),
noteColLength, false);
-1000, noteColLength, false);
rowLen += noteColLength + VARSTR_HEADER_SIZE;
return rowLen;
@ -286,10 +207,10 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
const int32_t TYPE_COLUMN_LENGTH = 16;
const int32_t NOTE_COLUMN_MIN_LENGTH = 8;
int32_t noteFieldLen = tscMaxLengthOfTagsFields(pSql);
if (noteFieldLen == 0) {
noteFieldLen = NOTE_COLUMN_MIN_LENGTH;
}
int32_t noteFieldLen = NOTE_COLUMN_MIN_LENGTH;//tscMaxLengthOfTagsFields(pSql);
// if (noteFieldLen == 0) {
// noteFieldLen = NOTE_COLUMN_MIN_LENGTH;
// }
int32_t rowLen = tscBuildTableSchemaResultFields(pSql, NUM_OF_DESC_TABLE_COLUMNS, TYPE_COLUMN_LENGTH, noteFieldLen);
tscFieldInfoUpdateOffset(pQueryInfo);
@ -420,7 +341,7 @@ TAOS_ROW tscFetchRow(void *param) {
return NULL;
}
void* data = doSetResultRowData(pSql, true);
void* data = doSetResultRowData(pSql);
tscClearSqlOwner(pSql);
return data;
@ -486,8 +407,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
}
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
f.bytes, f.bytes - VARSTR_HEADER_SIZE, false);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false);
rowLen += f.bytes;
@ -501,7 +421,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
(int16_t)(ddlLen + VARSTR_HEADER_SIZE), ddlLen, false);
(int16_t)(ddlLen + VARSTR_HEADER_SIZE), -1000, ddlLen, false);
rowLen += ddlLen + VARSTR_HEADER_SIZE;
@ -698,7 +618,11 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName,
for (int32_t i = 0; i < numOfRows; ++i) {
uint8_t type = pSchema[i].type;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE;
}
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName, bytes);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName);
}
@ -721,7 +645,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
for (int32_t i = 0; i < numOfRows; ++i) {
uint8_t type = pSchema[i].type;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE;
}
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
}
@ -731,7 +659,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
for (int32_t i = numOfRows; i < totalRows; i++) {
uint8_t type = pSchema[i].type;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE;
}
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
}

View File

@ -13,14 +13,15 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tscLocalMerge.h"
#include "tscSubquery.h"
#include "os.h"
#include "qAst.h"
#include "tlosertree.h"
#include "tscLog.h"
#include "tscUtil.h"
#include "tschemautil.h"
#include "tsclient.h"
#include "tutil.h"
#include "tscLog.h"
#include "tscLocalMerge.h"
typedef struct SCompareParam {
SLocalDataSource **pLocalData;
@ -97,14 +98,14 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc
pCtx->param[2].i64Key = pQueryInfo->order.order;
pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT;
pCtx->param[1].i64Key = pQueryInfo->order.orderColId;
} else if (functionId == TSDB_FUNC_APERCT) {
pCtx->param[0].i64Key = pExpr->param[0].i64Key;
pCtx->param[0].nType = pExpr->param[0].nType;
}
SResultInfo *pResInfo = &pReducer->pResInfo[i];
pResInfo->bufLen = pExpr->interBytes;
pResInfo->interResultBuf = calloc(1, (size_t) pResInfo->bufLen);
pCtx->resultInfo = &pReducer->pResInfo[i];
pCtx->resultInfo->superTableQ = true;
pCtx->interBufBytes = pExpr->interBytes;
pCtx->resultInfo = calloc(1, pCtx->interBufBytes + sizeof(SResultRowCellInfo));
pCtx->stableQuery = true;
}
int16_t n = 0;
@ -132,40 +133,53 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc
}
static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) {
int32_t numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo);
int32_t offset = 0;
SFillColInfo* pFillCol = calloc(numOfCols, sizeof(SFillColInfo));
for(int32_t i = 0; i < numOfCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
SInternalField* pIField = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
pFillCol[i].col.bytes = pExpr->resBytes;
pFillCol[i].col.type = (int8_t)pExpr->resType;
pFillCol[i].col.colId = pExpr->colInfo.colId;
pFillCol[i].flag = pExpr->colInfo.flag;
pFillCol[i].col.offset = offset;
pFillCol[i].functionId = pExpr->functionId;
pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
offset += pExpr->resBytes;
if (pIField->pArithExprInfo == NULL) {
SSqlExpr* pExpr = pIField->pSqlExpr;
pFillCol[i].col.bytes = pExpr->resBytes;
pFillCol[i].col.type = (int8_t)pExpr->resType;
pFillCol[i].col.colId = pExpr->colInfo.colId;
pFillCol[i].flag = pExpr->colInfo.flag;
pFillCol[i].col.offset = offset;
pFillCol[i].functionId = pExpr->functionId;
pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
} else {
pFillCol[i].col.bytes = pIField->field.bytes;
pFillCol[i].col.type = (int8_t)pIField->field.type;
pFillCol[i].col.colId = -100;
pFillCol[i].flag = TSDB_COL_NORMAL;
pFillCol[i].col.offset = offset;
pFillCol[i].functionId = -1;
pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
}
offset += pFillCol[i].col.bytes;
}
return pFillCol;
}
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalmodel, SSqlObj* pSql) {
SColumnModel *finalmodel, SColumnModel *pFFModel, SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
if (pMemBuffer == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
tscError("%p pMemBuffer is NULL", pMemBuffer);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return;
}
if (pDesc->pColumnModel == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
tscError("%p no local buffer or intermediate result format model", pSql);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return;
@ -183,7 +197,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
}
if (numOfFlush == 0 || numOfBuffer == 0) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
tscDebug("%p retrieved no data", pSql);
return;
}
@ -192,7 +206,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
tscError("%p Invalid value of buffer capacity %d and page size %d ", pSql, pDesc->pColumnModel->capacity,
pMemBuffer[0]->pageSize);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return;
}
@ -203,7 +217,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (pReducer == NULL) {
tscError("%p failed to create local merge structure, out of memory", pSql);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return;
}
@ -227,7 +241,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (ds == NULL) {
tscError("%p failed to create merge structure", pSql);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
taosTFree(pReducer);
tfree(pReducer);
return;
}
@ -254,7 +268,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (ds->filePage.num == 0) { // no data in this flush, the index does not increase
tscDebug("%p flush data is empty, ignore %d flush record", pSql, idx);
taosTFree(ds);
tfree(ds);
continue;
}
@ -264,7 +278,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
// no data actually, no need to merge result.
if (idx == 0) {
taosTFree(pReducer);
tfree(pReducer);
return;
}
@ -272,7 +286,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
SCompareParam *param = malloc(sizeof(SCompareParam));
if (param == NULL) {
taosTFree(pReducer);
tfree(pReducer);
return;
}
@ -286,8 +300,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator);
if (pReducer->pLoserTree == NULL || pRes->code != 0) {
taosTFree(param);
taosTFree(pReducer);
tfree(param);
tfree(pReducer);
return;
}
@ -320,6 +334,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->resColModel = finalmodel;
pReducer->resColModel->capacity = pReducer->nResultBufSize;
pReducer->finalModel = pFFModel;
assert(pReducer->finalRowSize > 0);
if (pReducer->finalRowSize > 0) {
pReducer->resColModel->capacity /= pReducer->finalRowSize;
@ -330,22 +346,19 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (pReducer->pTempBuffer == NULL || pReducer->discardData == NULL || pReducer->pResultBuf == NULL ||
/*pReducer->pBufForInterpo == NULL || */pReducer->pFinalRes == NULL || pReducer->prevRowOfInput == NULL) {
taosTFree(pReducer->pTempBuffer);
taosTFree(pReducer->discardData);
taosTFree(pReducer->pResultBuf);
taosTFree(pReducer->pFinalRes);
taosTFree(pReducer->prevRowOfInput);
taosTFree(pReducer->pLoserTree);
taosTFree(param);
taosTFree(pReducer);
tfree(pReducer->pTempBuffer);
tfree(pReducer->discardData);
tfree(pReducer->pResultBuf);
tfree(pReducer->pFinalRes);
tfree(pReducer->prevRowOfInput);
tfree(pReducer->pLoserTree);
tfree(param);
tfree(pReducer);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return;
}
size_t numOfCols = tscSqlExprNumOfExprs(pQueryInfo);
pReducer->pTempBuffer->num = 0;
pReducer->pResInfo = calloc(numOfCols, sizeof(SResultInfo));
tscCreateResPointerInfo(pRes, pQueryInfo);
tscInitSqlContext(pCmd, pReducer, pDesc);
@ -373,8 +386,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
4096, (int32_t)numOfCols, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
tinfo.precision, pQueryInfo->fillType, pFillCol);
4096, (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
tinfo.precision, pQueryInfo->fillType, pFillCol, pSql);
}
}
@ -489,47 +502,41 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
tscDebug("%p waiting for delete procedure, status: %d", pSql, status);
}
pLocalReducer->pFillInfo = taosDestoryFillInfo(pLocalReducer->pFillInfo);
pLocalReducer->pFillInfo = taosDestroyFillInfo(pLocalReducer->pFillInfo);
if (pLocalReducer->pCtx != NULL) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i];
tVariantDestroy(&pCtx->tag);
tfree(pCtx->resultInfo);
if (pCtx->tagInfo.pTagCtxList != NULL) {
taosTFree(pCtx->tagInfo.pTagCtxList);
tfree(pCtx->tagInfo.pTagCtxList);
}
}
taosTFree(pLocalReducer->pCtx);
tfree(pLocalReducer->pCtx);
}
taosTFree(pLocalReducer->prevRowOfInput);
tfree(pLocalReducer->prevRowOfInput);
taosTFree(pLocalReducer->pTempBuffer);
taosTFree(pLocalReducer->pResultBuf);
if (pLocalReducer->pResInfo != NULL) {
size_t num = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < num; ++i) {
taosTFree(pLocalReducer->pResInfo[i].interResultBuf);
}
taosTFree(pLocalReducer->pResInfo);
}
tfree(pLocalReducer->pTempBuffer);
tfree(pLocalReducer->pResultBuf);
if (pLocalReducer->pLoserTree) {
taosTFree(pLocalReducer->pLoserTree->param);
taosTFree(pLocalReducer->pLoserTree);
tfree(pLocalReducer->pLoserTree->param);
tfree(pLocalReducer->pLoserTree);
}
taosTFree(pLocalReducer->pFinalRes);
taosTFree(pLocalReducer->discardData);
tfree(pLocalReducer->pFinalRes);
tfree(pLocalReducer->discardData);
tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel,
tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel, pLocalReducer->finalModel,
pLocalReducer->numOfVnode);
for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) {
taosTFree(pLocalReducer->pLocalDataSrc[i]);
tfree(pLocalReducer->pLocalDataSrc[i]);
}
pLocalReducer->numOfBuffer = 0;
@ -563,7 +570,8 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
if (numOfGroupByCols > 0) {
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
int32_t startCols = pQueryInfo->fieldsInfo.numOfOutput - pQueryInfo->groupbyExpr.numOfGroupCols;
int32_t numOfInternalOutput = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
int32_t startCols = numOfInternalOutput - pQueryInfo->groupbyExpr.numOfGroupCols;
// the last "pQueryInfo->groupbyExpr.numOfGroupCols" columns are order-by columns
for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) {
@ -596,7 +604,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
}
*pOrderDesc = tOrderDesCreate(orderColIndexList, numOfGroupByCols, pModel, pQueryInfo->order.order);
taosTFree(orderColIndexList);
tfree(orderColIndexList);
if (*pOrderDesc == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -649,7 +657,7 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
}
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pOrderDesc,
SColumnModel **pFinalModel, uint32_t nBufferSizes) {
SColumnModel **pFinalModel, SColumnModel** pFFModel, uint32_t nBufferSizes) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
@ -682,6 +690,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
pSchema[i].bytes = pExpr->resBytes;
pSchema[i].type = (int8_t)pExpr->resType;
tstrncpy(pSchema[i].name, pExpr->aliasName, tListLen(pSchema[i].name));
rlen += pExpr->resBytes;
}
@ -698,7 +708,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
pg *= 2;
}
size_t numOfSubs = pTableMetaInfo->vgroupList->numOfVgroups;
size_t numOfSubs = pSql->subState.numOfSub;
assert(numOfSubs <= pTableMetaInfo->vgroupList->numOfVgroups);
for (int32_t i = 0; i < numOfSubs; ++i) {
(*pMemBuffer)[i] = createExtMemBuffer(nBufferSizes, rlen, pg, pModel);
(*pMemBuffer)[i]->flushModel = MULTIPLE_APPEND_MODEL;
@ -706,7 +717,7 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
if (createOrderDescriptor(pOrderDesc, pCmd, pModel) != TSDB_CODE_SUCCESS) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
taosTFree(pSchema);
tfree(pSchema);
return pRes->code;
}
@ -743,8 +754,20 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
}
*pFinalModel = createColumnModel(pSchema, (int32_t)size, capacity);
taosTFree(pSchema);
memset(pSchema, 0, sizeof(SSchema) * size);
size = tscNumOfFields(pQueryInfo);
for(int32_t i = 0; i < size; ++i) {
SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
pSchema[i].bytes = pField->field.bytes;
pSchema[i].type = pField->field.type;
tstrncpy(pSchema[i].name, pField->field.name, tListLen(pSchema[i].name));
}
*pFFModel = createColumnModel(pSchema, (int32_t) size, capacity);
tfree(pSchema);
return TSDB_CODE_SUCCESS;
}
@ -754,16 +777,18 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
* @param pFinalModel
* @param numOfVnodes
*/
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel,
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, SColumnModel *pFFModel,
int32_t numOfVnodes) {
destroyColumnModel(pFinalModel);
destroyColumnModel(pFFModel);
tOrderDescDestroy(pDesc);
for (int32_t i = 0; i < numOfVnodes; ++i) {
pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]);
}
taosTFree(pMemBuffer);
tfree(pMemBuffer);
}
/**
@ -859,17 +884,17 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
tFilePage * pBeforeFillData = pLocalReducer->pResultBuf;
pRes->data = pLocalReducer->pFinalRes;
pRes->numOfRows = pBeforeFillData->num;
pRes->numOfRows = (int32_t) pBeforeFillData->num;
if (pQueryInfo->limit.offset > 0) {
if (pQueryInfo->limit.offset < pRes->numOfRows) {
int32_t prevSize = (int32_t)pBeforeFillData->num;
tColModelErase(pLocalReducer->resColModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
int32_t prevSize = (int32_t) pBeforeFillData->num;
tColModelErase(pLocalReducer->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
/* remove the hole in column model */
tColModelCompact(pLocalReducer->resColModel, pBeforeFillData, prevSize);
tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
pRes->numOfRows -= pQueryInfo->limit.offset;
pRes->numOfRows -= (int32_t) pQueryInfo->limit.offset;
pQueryInfo->limit.offset = 0;
} else {
pQueryInfo->limit.offset -= pRes->numOfRows;
@ -889,7 +914,7 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
pRes->numOfRows -= overflow;
pBeforeFillData->num -= overflow;
tColModelCompact(pLocalReducer->resColModel, pBeforeFillData, prevSize);
tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
// set remain data to be discarded, and reset the interpolation information
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo);
@ -923,7 +948,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
while (1) {
int64_t newRows = taosGenerateDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity);
int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity);
if (pQueryInfo->limit.offset < newRows) {
newRows -= pQueryInfo->limit.offset;
@ -937,7 +962,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
pRes->data = pLocalReducer->pFinalRes;
pRes->numOfRows = newRows;
pRes->numOfRows = (int32_t) newRows;
pQueryInfo->limit.offset = 0;
break;
@ -952,7 +977,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
// all output in current group are completed
int32_t totalRemainRows = (int32_t)getFilledNumOfRes(pFillInfo, actualETime, pLocalReducer->resColModel->capacity);
int32_t totalRemainRows = (int32_t)getNumOfResWithFill(pFillInfo, actualETime, pLocalReducer->resColModel->capacity);
if (totalRemainRows <= 0) {
break;
}
@ -973,10 +998,11 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo);
}
int32_t offset = 0;
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
int16_t offset = getColumnModelOffset(pLocalReducer->resColModel, i);
memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, (size_t)(pField->bytes * pRes->numOfRows));
offset += pField->bytes;
}
pRes->numOfRowsGroup += pRes->numOfRows;
@ -985,10 +1011,10 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
pBeforeFillData->num = 0;
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
taosTFree(pResPages[i]);
tfree(pResPages[i]);
}
taosTFree(pResPages);
tfree(pResPages);
}
static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
@ -1071,7 +1097,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx)
continue;
}
SResultInfo* pResInfo = GET_RES_INFO(&pCtx[j]);
SResultRowCellInfo* pResInfo = GET_RES_INFO(&pCtx[j]);
if (maxOutput < pResInfo->numOfRes) {
maxOutput = pResInfo->numOfRes;
}
@ -1229,6 +1255,10 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
tColModelCompact(pModel, pResBuf, pModel->capacity);
if (tscIsSecondStageQuery(pQueryInfo)) {
pLocalReducer->finalRowSize = doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalReducer->finalRowSize);
}
#ifdef _DEBUG_VIEW
printf("final result before interpo:\n");
// tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num);
@ -1252,10 +1282,11 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
return true;
}
void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) { // reset output buffer to the beginning
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
pLocalReducer->pCtx[i].aOutputBuf =
pLocalReducer->pResultBuf->data + tscFieldInfoGetOffset(pQueryInfo, i) * pLocalReducer->resColModel->capacity;
void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {// reset output buffer to the beginning
size_t t = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < t; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
pLocalReducer->pCtx[i].aOutputBuf = pLocalReducer->pResultBuf->data + pExpr->offset * pLocalReducer->resColModel->capacity;
}
memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage));
@ -1300,7 +1331,7 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1));
// the first column must be the timestamp column
int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity);
int32_t rows = (int32_t) getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity);
if (rows > 0) { // do fill gap
doFillResult(pSql, pLocalReducer, false);
}
@ -1329,7 +1360,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) {
int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey;
int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity);
int32_t rows = (int32_t)getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity);
if (rows > 0) {
doFillResult(pSql, pLocalReducer, true);
}
@ -1500,8 +1531,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
if (pLocalReducer->discard && sameGroup) {
pLocalReducer->hasUnprocessedRow = false;
tmpBuffer->num = 0;
} else {
// current row does not belongs to the previous group, so it is not be handled yet.
} else { // current row does not belongs to the previous group, so it is not be handled yet.
pLocalReducer->hasUnprocessedRow = true;
}
@ -1595,3 +1625,46 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
pRes->pLocalReducer->pResultBuf->num = numOfRes;
pRes->data = pRes->pLocalReducer->pResultBuf->data;
}
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {
char* pbuf = calloc(1, pOutput->num * rowSize);
size_t size = tscNumOfFields(pQueryInfo);
SArithmeticSupport arithSup = {0};
// todo refactor
arithSup.offset = 0;
arithSup.numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
arithSup.exprList = pQueryInfo->exprList;
arithSup.data = calloc(arithSup.numOfCols, POINTER_BYTES);
for(int32_t k = 0; k < arithSup.numOfCols; ++k) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k);
arithSup.data[k] = (pOutput->data + pOutput->num* pExpr->offset);
}
int32_t offset = 0;
for (int i = 0; i < size; ++i) {
SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
// calculate the result from several other columns
if (pSup->pArithExprInfo != NULL) {
arithSup.pArithExpr = pSup->pArithExprInfo;
tExprTreeCalcTraverse(arithSup.pArithExpr->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithmeticInputSrc);
} else {
SSqlExpr* pExpr = pSup->pSqlExpr;
memcpy(pbuf + pOutput->num * offset, pExpr->offset * pOutput->num + pOutput->data, pExpr->resBytes * pOutput->num);
}
offset += pSup->field.bytes;
}
assert(finalRowSize <= rowSize);
memcpy(pOutput->data, pbuf, pOutput->num * offset);
tfree(pbuf);
tfree(arithSup.data);
return offset;
}

View File

@ -702,7 +702,7 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st
}
int32_t code = TSDB_CODE_TSC_INVALID_SQL;
char * tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \"
char * tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@ -1148,6 +1148,10 @@ int tsParseInsertSql(SSqlObj *pSql) {
index = 0;
sToken = tStrGetToken(str, &index, false, 0, NULL);
if (sToken.type != TK_STRING && sToken.type != TK_ID) {
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
goto _error;
}
str += index;
if (sToken.n == 0) {
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
@ -1406,7 +1410,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
assert(taos_errno(pSql) == code);
taos_free_result(pSql);
taosTFree(pSupporter);
tfree(pSupporter);
fclose(fp);
pParentSql->res.code = code;
@ -1445,7 +1449,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
char *tokenBuf = calloc(1, 4096);
while ((readLen = taosGetline(&line, &n, fp)) != -1) {
while ((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
line[--readLen] = 0;
}
@ -1470,7 +1474,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
}
}
taosTFree(tokenBuf);
tfree(tokenBuf);
free(line);
if (count > 0) {
@ -1483,7 +1487,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
} else {
taos_free_result(pSql);
taosTFree(pSupporter);
tfree(pSupporter);
fclose(fp);
pParentSql->fp = pParentSql->fetchFp;
@ -1513,7 +1517,7 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
pSql->res.code = TAOS_SYSTEM_ERROR(errno);
tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code));
taosTFree(pSupporter)
tfree(pSupporter)
tscQueueAsyncRes(pSql);
return;

View File

@ -268,7 +268,6 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
if (1) {
// allow user bind param data with different type
short size = 0;
union {
int8_t v1;
int16_t v2;
@ -600,7 +599,7 @@ static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
if ((*bind->length) > (uintptr_t)param->bytes) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
size = (short)*bind->length;
short size = (short)*bind->length;
STR_WITH_SIZE_TO_VARSTR(data + param->offset, bind->buffer, size);
return TSDB_CODE_SUCCESS;
} break;

View File

@ -222,7 +222,7 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
}
int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
SCMHeartBeatMsg *pHeartbeat = pMsg;
SHeartBeatMsg *pHeartbeat = pMsg;
int allocedQueriesNum = pHeartbeat->numOfQueries;
int allocedStreamsNum = pHeartbeat->numOfStreams;
@ -277,7 +277,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
}
int32_t msgLen = pHeartbeat->numOfQueries * sizeof(SQueryDesc) + pHeartbeat->numOfStreams * sizeof(SStreamDesc) +
sizeof(SCMHeartBeatMsg);
sizeof(SHeartBeatMsg);
pHeartbeat->connId = htonl(pObj->connId);
pHeartbeat->numOfQueries = htonl(pHeartbeat->numOfQueries);
pHeartbeat->numOfStreams = htonl(pHeartbeat->numOfStreams);

File diff suppressed because it is too large Load Diff

View File

@ -118,7 +118,7 @@ SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t colIndex)
}
// TODO for large number of columns, employ the binary search method
SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId) {
SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId) {
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
for(int32_t i = 0; i < tinfo.numOfColumns + tinfo.numOfTags; ++i) {
@ -130,17 +130,7 @@ SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId) {
return NULL;
}
struct SSchema tscGetTbnameColumnSchema() {
struct SSchema s = {
.colId = TSDB_TBNAME_COLUMN_INDEX,
.type = TSDB_DATA_TYPE_BINARY,
.bytes = TSDB_TABLE_NAME_LEN
};
strcpy(s.name, TSQL_TBNAME_L);
return s;
}
static void tscInitCorVgroupInfo(SCMCorVgroupInfo *corVgroupInfo, SCMVgroupInfo *vgroupInfo) {
static void tscInitCorVgroupInfo(SCorVgroupInfo *corVgroupInfo, SVgroupInfo *vgroupInfo) {
corVgroupInfo->version = 0;
corVgroupInfo->inUse = 0;
corVgroupInfo->numOfEps = vgroupInfo->numOfEps;
@ -166,7 +156,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
pTableMeta->id.tid = pTableMetaMsg->tid;
pTableMeta->id.uid = pTableMetaMsg->uid;
SCMVgroupInfo* pVgroupInfo = &pTableMeta->vgroupInfo;
SVgroupInfo* pVgroupInfo = &pTableMeta->vgroupInfo;
pVgroupInfo->numOfEps = pTableMetaMsg->vgroup.numOfEps;
pVgroupInfo->vgId = pTableMetaMsg->vgroup.vgId;
@ -177,7 +167,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
pVgroupInfo->epAddr[i].port = pEpMsg->port;
}
tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, &pTableMeta->vgroupInfo);
tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, pVgroupInfo);
pTableMeta->sversion = pTableMetaMsg->sversion;
pTableMeta->tversion = pTableMetaMsg->tversion;
@ -197,28 +187,6 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
return pTableMeta;
}
/**
* the TableMeta data format in memory is as follows:
*
* +--------------------+
* |STableMeta Body data| sizeof(STableMeta)
* +--------------------+
* |Schema data | numOfTotalColumns * sizeof(SSchema)
* +--------------------+
* |Tags data | tag_col_1.bytes + tag_col_2.bytes + ....
* +--------------------+
*
* @param pTableMeta
* @return
*/
char* tsGetTagsValue(STableMeta* pTableMeta) {
int32_t offset = 0;
// int32_t numOfTotalCols = pTableMeta->numOfColumns + pTableMeta->numOfTags;
// uint32_t offset = sizeof(STableMeta) + numOfTotalCols * sizeof(SSchema);
return ((char*)pTableMeta + offset);
}
// todo refactor
UNUSED_FUNC static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) {
for (int32_t i = 0; i < num; ++i) {

View File

@ -48,7 +48,7 @@ static int32_t getWaitingTimeInterval(int32_t count) {
return initial * (2<<(count - 2));
}
static void tscSetDnodeEpSet(SSqlObj* pSql, SCMVgroupInfo* pVgroupInfo) {
static void tscSetDnodeEpSet(SSqlObj* pSql, SVgroupInfo* pVgroupInfo) {
assert(pSql != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0);
SRpcEpSet* pEpSet = &pSql->epSet;
@ -100,7 +100,7 @@ void tscUpdateMgmtEpSet(SRpcEpSet *pEpSet) {
tscMgmtEpSet.epSet = *pEpSet;
taosCorEndWrite(&tscMgmtEpSet.version);
}
static void tscDumpEpSetFromVgroupInfo(SCMCorVgroupInfo *pVgroupInfo, SRpcEpSet *pEpSet) {
static void tscDumpEpSetFromVgroupInfo(SCorVgroupInfo *pVgroupInfo, SRpcEpSet *pEpSet) {
if (pVgroupInfo == NULL) { return;}
taosCorBeginRead(&pVgroupInfo->version);
int8_t inUse = pVgroupInfo->inUse;
@ -117,14 +117,14 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) {
SSqlCmd *pCmd = &pObj->cmd;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
if (pTableMetaInfo == NULL || pTableMetaInfo->pTableMeta == NULL) { return;}
SCMCorVgroupInfo *pVgroupInfo = &pTableMetaInfo->pTableMeta->corVgroupInfo;
SCorVgroupInfo *pVgroupInfo = &pTableMetaInfo->pTableMeta->corVgroupInfo;
taosCorBeginWrite(&pVgroupInfo->version);
tscDebug("before: Endpoint in use: %d", pVgroupInfo->inUse);
pVgroupInfo->inUse = pEpSet->inUse;
pVgroupInfo->numOfEps = pEpSet->numOfEps;
for (int32_t i = 0; i < pVgroupInfo->numOfEps; i++) {
taosTFree(pVgroupInfo->epAddr[i].fqdn);
tfree(pVgroupInfo->epAddr[i].fqdn);
pVgroupInfo->epAddr[i].fqdn = strndup(pEpSet->fqdn[i], tListLen(pEpSet->fqdn[i]));
pVgroupInfo->epAddr[i].port = pEpSet->port[i];
}
@ -150,7 +150,7 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
if (pObj == NULL) return;
if (pObj != pObj->signature) {
tscError("heart beat msg, pObj:%p, signature:%p invalid", pObj, pObj->signature);
tscError("heartbeat msg, pObj:%p, signature:%p invalid", pObj, pObj->signature);
return;
}
@ -158,8 +158,8 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
SSqlRes *pRes = &pSql->res;
if (code == 0) {
SCMHeartBeatRsp *pRsp = (SCMHeartBeatRsp *)pRes->pRsp;
SRpcEpSet * epSet = &pRsp->epSet;
SHeartBeatRsp *pRsp = (SHeartBeatRsp *)pRes->pRsp;
SRpcEpSet * epSet = &pRsp->epSet;
if (epSet->numOfEps > 0) {
tscEpSetHtons(epSet);
tscUpdateMgmtEpSet(epSet);
@ -175,44 +175,44 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
if (pRsp->streamId) tscKillStream(pObj, htonl(pRsp->streamId));
}
} else {
tscDebug("heartbeat failed, code:%s", tstrerror(code));
tscDebug("%p heartbeat failed, code:%s", pObj->pHb, tstrerror(code));
}
if (pObj->pHb != NULL) {
int32_t waitingDuring = tsShellActivityTimer * 500;
tscDebug("%p start heartbeat in %dms", pSql, waitingDuring);
tscDebug("%p send heartbeat in %dms", pSql, waitingDuring);
taosTmrReset(tscProcessActivityTimer, waitingDuring, pObj, tscTmr, &pObj->pTimer);
taosTmrReset(tscProcessActivityTimer, waitingDuring, (void *)pObj->rid, tscTmr, &pObj->pTimer);
} else {
tscDebug("%p start to close tscObj:%p, not send heartbeat again", pSql, pObj);
}
}
void tscProcessActivityTimer(void *handle, void *tmrId) {
STscObj *pObj = (STscObj *)handle;
if (pObj == NULL || pObj->signature != pObj) {
return;
}
int64_t rid = (int64_t) handle;
STscObj *pObj = taosAcquireRef(tscRefId, rid);
if (pObj == NULL) return;
SSqlObj* pHB = pObj->pHb;
if (pObj->pTimer != tmrId || pHB == NULL) {
return;
}
void** p = taosCacheAcquireByKey(tscObjCache, &pHB, sizeof(TSDB_CACHE_PTR_TYPE));
if (p == NULL) {
tscWarn("%p HB object has been released already", pHB);
taosReleaseRef(tscRefId, pObj->rid);
return;
}
assert(*pHB->self == pHB);
pHB->retry = 0;
int32_t code = tscProcessSql(pHB);
taosCacheRelease(tscObjCache, (void**) &p, false);
if (code != TSDB_CODE_SUCCESS) {
tscError("%p failed to sent HB to server, reason:%s", pHB, tstrerror(code));
}
taosReleaseRef(tscRefId, rid);
}
int tscSendMsgToServer(SSqlObj *pSql) {
@ -237,15 +237,11 @@ int tscSendMsgToServer(SSqlObj *pSql) {
.pCont = pMsg,
.contLen = pSql->cmd.payloadLen,
.ahandle = pSql,
.handle = &pSql->pRpcCtx,
.handle = NULL,
.code = 0
};
// NOTE: the rpc context should be acquired before sending data to server.
// Otherwise, the pSql object may have been released already during the response function, which is
// processMsgFromServer function. In the meanwhile, the assignment of the rpc context to sql object will absolutely
// cause crash.
rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg);
rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg, &pSql->rpcRid);
return TSDB_CODE_SUCCESS;
}
@ -265,7 +261,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
SSqlCmd *pCmd = &pSql->cmd;
assert(*pSql->self == pSql);
pSql->pRpcCtx = NULL;
pSql->rpcRid = -1;
if (pObj->signature != pObj) {
tscDebug("%p DB connection is closed, cmd:%d pObj:%p signature:%p", pSql, pCmd->command, pObj, pObj->signature);
@ -361,7 +357,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
memcpy(pRes->pRsp, rpcMsg->pCont, pRes->rspLen);
}
} else {
pRes->pRsp = NULL;
tfree(pRes->pRsp);
}
/*
@ -481,14 +477,25 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
int32_t vgIndex = pTableMetaInfo->vgroupIndex;
if (pTableMetaInfo->pVgroupTables == NULL) {
SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList;
assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups);
SVgroupsInfo* pVgroupInfo = pTableMetaInfo->vgroupList;
assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups);
pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId);
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d", pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex);
} else {
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
assert(vgIndex >= 0 && vgIndex < numOfVgroups);
pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId);
SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex);
pRetrieveMsg->header.vgId = htonl(pTableIdList->vgInfo.vgId);
tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d", pSql, pTableIdList->vgInfo.vgId, vgIndex);
}
} else {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
pRetrieveMsg->header.vgId = htonl(pTableMeta->vgroupInfo.vgId);
tscDebug("%p build fetch msg from only one vgroup, vgId:%d", pSql, pTableMeta->vgroupInfo.vgId);
}
pSql->cmd.payloadLen = sizeof(SRetrieveTableMsg);
@ -539,10 +546,27 @@ static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd, int32_t clauseIndex) {
int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo));
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
int32_t exprSize = (int32_t)(sizeof(SSqlFuncMsg) * numOfExprs);
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
int32_t exprSize = (int32_t)(sizeof(SSqlFuncMsg) * numOfExprs * 2);
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + 4096;
int32_t tsBufSize = (pQueryInfo->tsBuf != NULL) ? pQueryInfo->tsBuf->fileSize : 0;
int32_t tableSerialize = 0;
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pTableMetaInfo->pVgroupTables != NULL) {
size_t numOfGroups = taosArrayGetSize(pTableMetaInfo->pVgroupTables);
int32_t totalTables = 0;
for (int32_t i = 0; i < numOfGroups; ++i) {
SVgroupTableInfo *pTableInfo = taosArrayGet(pTableMetaInfo->pVgroupTables, i);
totalTables += (int32_t) taosArrayGetSize(pTableInfo->itemList);
}
tableSerialize = totalTables * sizeof(STableIdInfo);
}
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + tsBufSize +
tableSerialize + 4096;
}
static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) {
@ -552,7 +576,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || pTableMetaInfo->pVgroupTables == NULL) {
SCMVgroupInfo* pVgroupInfo = NULL;
SVgroupInfo* pVgroupInfo = NULL;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
int32_t index = pTableMetaInfo->vgroupIndex;
assert(index >= 0);
@ -662,19 +686,19 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->limit = htobe64(pQueryInfo->limit.limit);
pQueryMsg->offset = htobe64(pQueryInfo->limit.offset);
pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList));
pQueryMsg->interval.interval = htobe64(pQueryInfo->interval.interval);
pQueryMsg->interval.sliding = htobe64(pQueryInfo->interval.sliding);
pQueryMsg->interval.interval = htobe64(pQueryInfo->interval.interval);
pQueryMsg->interval.sliding = htobe64(pQueryInfo->interval.sliding);
pQueryMsg->interval.offset = htobe64(pQueryInfo->interval.offset);
pQueryMsg->interval.intervalUnit = pQueryInfo->interval.intervalUnit;
pQueryMsg->interval.slidingUnit = pQueryInfo->interval.slidingUnit;
pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit;
pQueryMsg->interval.slidingUnit = pQueryInfo->interval.slidingUnit;
pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit;
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
pQueryMsg->numOfTags = htonl(numOfTags);
pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType);
pQueryMsg->queryType = htonl(pQueryInfo->type);
size_t numOfOutput = tscSqlExprNumOfExprs(pQueryInfo);
pQueryMsg->numOfOutput = htons((int16_t)numOfOutput);
pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); // this is the stage one output column number
// set column list ids
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
@ -736,12 +760,15 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_TSC_INVALID_SQL;
}
assert(pExpr->resColId < 0);
pSqlFuncExpr->colInfo.colId = htons(pExpr->colInfo.colId);
pSqlFuncExpr->colInfo.colIndex = htons(pExpr->colInfo.colIndex);
pSqlFuncExpr->colInfo.flag = htons(pExpr->colInfo.flag);
pSqlFuncExpr->functionId = htons(pExpr->functionId);
pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams);
pSqlFuncExpr->resColId = htons(pExpr->resColId);
pMsg += sizeof(SSqlFuncMsg);
for (int32_t j = 0; j < pExpr->numOfParams; ++j) {
@ -760,6 +787,73 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pSqlFuncExpr = (SSqlFuncMsg *)pMsg;
}
size_t output = tscNumOfFields(pQueryInfo);
if (tscIsSecondStageQuery(pQueryInfo)) {
pQueryMsg->secondStageOutput = htonl((int32_t) output);
SSqlFuncMsg *pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg;
for (int32_t i = 0; i < output; ++i) {
SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
SSqlExpr *pExpr = pField->pSqlExpr;
if (pExpr != NULL) {
if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) {
tscError("%p table schema is not matched with parsed sql", pSql);
return TSDB_CODE_TSC_INVALID_SQL;
}
pSqlFuncExpr1->colInfo.colId = htons(pExpr->colInfo.colId);
pSqlFuncExpr1->colInfo.colIndex = htons(pExpr->colInfo.colIndex);
pSqlFuncExpr1->colInfo.flag = htons(pExpr->colInfo.flag);
pSqlFuncExpr1->functionId = htons(pExpr->functionId);
pSqlFuncExpr1->numOfParams = htons(pExpr->numOfParams);
pMsg += sizeof(SSqlFuncMsg);
for (int32_t j = 0; j < pExpr->numOfParams; ++j) {
// todo add log
pSqlFuncExpr1->arg[j].argType = htons((uint16_t)pExpr->param[j].nType);
pSqlFuncExpr1->arg[j].argBytes = htons(pExpr->param[j].nLen);
if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) {
memcpy(pMsg, pExpr->param[j].pz, pExpr->param[j].nLen);
pMsg += pExpr->param[j].nLen;
} else {
pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExpr->param[j].i64Key);
}
}
pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg;
} else {
assert(pField->pArithExprInfo != NULL);
SExprInfo* pExprInfo = pField->pArithExprInfo;
pSqlFuncExpr1->colInfo.colId = htons(pExprInfo->base.colInfo.colId);
pSqlFuncExpr1->functionId = htons(pExprInfo->base.functionId);
pSqlFuncExpr1->numOfParams = htons(pExprInfo->base.numOfParams);
pMsg += sizeof(SSqlFuncMsg);
for (int32_t j = 0; j < pExprInfo->base.numOfParams; ++j) {
// todo add log
pSqlFuncExpr1->arg[j].argType = htons((uint16_t)pExprInfo->base.arg[j].argType);
pSqlFuncExpr1->arg[j].argBytes = htons(pExprInfo->base.arg[j].argBytes);
if (pExprInfo->base.arg[j].argType == TSDB_DATA_TYPE_BINARY) {
memcpy(pMsg, pExprInfo->base.arg[j].argValue.pz, pExprInfo->base.arg[j].argBytes);
pMsg += pExprInfo->base.arg[j].argBytes;
} else {
pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExprInfo->base.arg[j].argValue.i64);
}
}
pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg;
}
}
} else {
pQueryMsg->secondStageOutput = 0;
}
// serialize the table info (sid, uid, tags)
pMsg = doSerializeTableInfo(pQueryMsg, pSql, pMsg);
@ -786,7 +880,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
for (int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) {
*((int64_t *)pMsg) = htobe64(pQueryInfo->fillVal[i]);
pMsg += sizeof(pQueryInfo->fillVal[0]);
}
@ -846,41 +940,25 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
// compressed ts block
pQueryMsg->tsOffset = htonl((int32_t)(pMsg - pCmd->payload));
int32_t tsLen = 0;
int32_t numOfBlocks = 0;
if (pQueryInfo->tsBuf != NULL) {
STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pQueryInfo->tsBuf, pTableMetaInfo->vgroupIndex);
assert(QUERY_IS_JOIN_QUERY(pQueryInfo->type) && pBlockInfo != NULL); // this query should not be sent
// todo refactor
if (fseek(pQueryInfo->tsBuf->f, pBlockInfo->offset, SEEK_SET) != 0) {
int code = TAOS_SYSTEM_ERROR(ferror(pQueryInfo->tsBuf->f));
tscError("%p: fseek failed: %s", pSql, tstrerror(code));
// note: here used the index instead of actual vnode id.
int32_t vnodeIndex = pTableMetaInfo->vgroupIndex;
int32_t code = dumpFileBlockByGroupId(pQueryInfo->tsBuf, vnodeIndex, pMsg, &pQueryMsg->tsLen, &pQueryMsg->tsNumOfBlocks);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
size_t s = fread(pMsg, 1, pBlockInfo->compLen, pQueryInfo->tsBuf->f);
if (s != pBlockInfo->compLen) {
int code = TAOS_SYSTEM_ERROR(ferror(pQueryInfo->tsBuf->f));
tscError("%p: fread didn't return expected data: %s", pSql, tstrerror(code));
return code;
}
pMsg += pQueryMsg->tsLen;
pMsg += pBlockInfo->compLen;
tsLen = pBlockInfo->compLen;
numOfBlocks = pBlockInfo->numOfBlocks;
}
pQueryMsg->tsLen = htonl(tsLen);
pQueryMsg->tsNumOfBlocks = htonl(numOfBlocks);
if (pQueryInfo->tsBuf != NULL) {
pQueryMsg->tsOrder = htonl(pQueryInfo->tsBuf->tsOrder);
pQueryMsg->tsLen = htonl(pQueryMsg->tsLen);
pQueryMsg->tsNumOfBlocks = htonl(pQueryMsg->tsNumOfBlocks);
}
int32_t msgLen = (int32_t)(pMsg - pCmd->payload);
tscDebug("%p msg built success,len:%d bytes", pSql, msgLen);
tscDebug("%p msg built success, len:%d bytes", pSql, msgLen);
pCmd->payloadLen = msgLen;
pSql->cmd.msgType = TSDB_MSG_TYPE_QUERY;
@ -892,10 +970,10 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMCreateDbMsg);
pCmd->payloadLen = sizeof(SCreateDbMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_DB;
SCMCreateDbMsg *pCreateDbMsg = (SCMCreateDbMsg*)pCmd->payload;
SCreateDbMsg *pCreateDbMsg = (SCreateDbMsg *)pCmd->payload;
assert(pCmd->numOfClause == 1);
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
@ -906,13 +984,13 @@ int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMCreateDnodeMsg);
pCmd->payloadLen = sizeof(SCreateDnodeMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMCreateDnodeMsg *pCreate = (SCMCreateDnodeMsg *)pCmd->payload;
SCreateDnodeMsg *pCreate = (SCreateDnodeMsg *)pCmd->payload;
strncpy(pCreate->ep, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n);
pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_DNODE;
@ -922,13 +1000,13 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMCreateAcctMsg);
pCmd->payloadLen = sizeof(SCreateAcctMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMCreateAcctMsg *pAlterMsg = (SCMCreateAcctMsg *)pCmd->payload;
SCreateAcctMsg *pAlterMsg = (SCreateAcctMsg *)pCmd->payload;
SStrToken *pName = &pInfo->pDCLInfo->user.user;
SStrToken *pPwd = &pInfo->pDCLInfo->user.passwd;
@ -967,14 +1045,14 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMCreateUserMsg);
pCmd->payloadLen = sizeof(SCreateUserMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMCreateUserMsg *pAlterMsg = (SCMCreateUserMsg*)pCmd->payload;
SCreateUserMsg *pAlterMsg = (SCreateUserMsg *)pCmd->payload;
SUserInfo *pUser = &pInfo->pDCLInfo->user;
strncpy(pAlterMsg->user, pUser->user.z, pUser->user.n);
@ -999,21 +1077,21 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t tscBuildCfgDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMCfgDnodeMsg);
pCmd->payloadLen = sizeof(SCfgDnodeMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_CONFIG_DNODE;
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMDropDbMsg);
pCmd->payloadLen = sizeof(SDropDbMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMDropDbMsg *pDropDbMsg = (SCMDropDbMsg*)pCmd->payload;
SDropDbMsg *pDropDbMsg = (SDropDbMsg*)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDropDbMsg->db, pTableMetaInfo->name, sizeof(pDropDbMsg->db));
@ -1043,13 +1121,13 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMDropDnodeMsg);
pCmd->payloadLen = sizeof(SDropDnodeMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMDropDnodeMsg *pDrop = (SCMDropDnodeMsg *)pCmd->payload;
SDropDnodeMsg * pDrop = (SDropDnodeMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDrop->ep, pTableMetaInfo->name, sizeof(pDrop->ep));
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_DNODE;
@ -1059,7 +1137,7 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMDropUserMsg);
pCmd->payloadLen = sizeof(SDropUserMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_USER;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
@ -1067,7 +1145,7 @@ int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMDropUserMsg *pDropMsg = (SCMDropUserMsg*)pCmd->payload;
SDropUserMsg * pDropMsg = (SDropUserMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDropMsg->user, pTableMetaInfo->name, sizeof(pDropMsg->user));
@ -1076,7 +1154,7 @@ int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t tscBuildDropAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMDropUserMsg);
pCmd->payloadLen = sizeof(SDropUserMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_ACCT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
@ -1084,7 +1162,7 @@ int32_t tscBuildDropAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMDropUserMsg *pDropMsg = (SCMDropUserMsg*)pCmd->payload;
SDropUserMsg * pDropMsg = (SDropUserMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDropMsg->user, pTableMetaInfo->name, sizeof(pDropMsg->user));
@ -1093,14 +1171,14 @@ int32_t tscBuildDropAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMUseDbMsg);
pCmd->payloadLen = sizeof(SUseDbMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMUseDbMsg *pUseDbMsg = (SCMUseDbMsg*)pCmd->payload;
SUseDbMsg *pUseDbMsg = (SUseDbMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
strcpy(pUseDbMsg->db, pTableMetaInfo->name);
pCmd->msgType = TSDB_MSG_TYPE_CM_USE_DB;
@ -1112,14 +1190,14 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STscObj *pObj = pSql->pTscObj;
SSqlCmd *pCmd = &pSql->cmd;
pCmd->msgType = TSDB_MSG_TYPE_CM_SHOW;
pCmd->payloadLen = sizeof(SCMShowMsg) + 100;
pCmd->payloadLen = sizeof(SShowMsg) + 100;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMShowMsg *pShowMsg = (SCMShowMsg*)pCmd->payload;
SShowMsg *pShowMsg = (SShowMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
size_t nameLen = strlen(pTableMetaInfo->name);
@ -1146,13 +1224,13 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pShowMsg->payloadLen = htons(pEpAddr->n);
}
pCmd->payloadLen = sizeof(SCMShowMsg) + pShowMsg->payloadLen;
pCmd->payloadLen = sizeof(SShowMsg) + pShowMsg->payloadLen;
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildKillMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMKillQueryMsg);
pCmd->payloadLen = sizeof(SKillQueryMsg);
switch (pCmd->command) {
case TSDB_SQL_KILL_QUERY:
@ -1264,8 +1342,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int tscEstimateAlterTableMsgLength(SSqlCmd *pCmd) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
return minMsgSize() + sizeof(SCMAlterTableMsg) + sizeof(SSchema) * tscNumOfFields(pQueryInfo) +
TSDB_EXTRA_PAYLOAD_SIZE;
return minMsgSize() + sizeof(SAlterTableMsg) + sizeof(SSchema) * tscNumOfFields(pQueryInfo) + TSDB_EXTRA_PAYLOAD_SIZE;
}
int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
@ -1284,7 +1361,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMAlterTableMsg *pAlterTableMsg = (SCMAlterTableMsg *)pCmd->payload;
SAlterTableMsg *pAlterTableMsg = (SAlterTableMsg *)pCmd->payload;
tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pAlterTableMsg->db);
strcpy(pAlterTableMsg->tableId, pTableMetaInfo->name);
@ -1333,10 +1410,10 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SCMAlterDbMsg);
pCmd->payloadLen = sizeof(SAlterDbMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_ALTER_DB;
SCMAlterDbMsg *pAlterDbMsg = (SCMAlterDbMsg*)pCmd->payload;
SAlterDbMsg *pAlterDbMsg = (SAlterDbMsg* )pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pAlterDbMsg->db, pTableMetaInfo->name, sizeof(pAlterDbMsg->db));
@ -1361,19 +1438,6 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS;
}
static int tscSetResultPointer(SQueryInfo *pQueryInfo, SSqlRes *pRes) {
if (tscCreateResPointerInfo(pRes, pQueryInfo) != TSDB_CODE_SUCCESS) {
return pRes->code;
}
for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i);
pRes->tsrow[i] = (unsigned char*)((char*) pRes->data + offset * pRes->numOfRows);
}
return 0;
}
/*
* this function can only be called once.
* by using pRes->rspType to denote its status
@ -1384,15 +1448,18 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pRes->code = TSDB_CODE_SUCCESS;
if (pRes->rspType == 0) {
pRes->numOfRows = numOfRes;
pRes->row = 0;
pRes->rspType = 1;
tscSetResultPointer(pQueryInfo, pRes);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (tscCreateResPointerInfo(pRes, pQueryInfo) != TSDB_CODE_SUCCESS) {
return pRes->code;
}
tscSetResRawPtr(pRes, pQueryInfo);
} else {
tscResetForNextRetrieve(pRes);
}
@ -1436,10 +1503,11 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
}
pRes->code = tscDoLocalMerge(pSql);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (pRes->code == TSDB_CODE_SUCCESS && pRes->numOfRows > 0) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
tscCreateResPointerInfo(pRes, pQueryInfo);
tscSetResRawPtr(pRes, pQueryInfo);
}
pRes->row = 0;
@ -1461,14 +1529,14 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STscObj *pObj = pSql->pTscObj;
SSqlCmd *pCmd = &pSql->cmd;
pCmd->msgType = TSDB_MSG_TYPE_CM_CONNECT;
pCmd->payloadLen = sizeof(SCMConnectMsg);
pCmd->payloadLen = sizeof(SConnectMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMConnectMsg *pConnect = (SCMConnectMsg*)pCmd->payload;
SConnectMsg *pConnect = (SConnectMsg*)pCmd->payload;
// TODO refactor full_name
char *db; // ugly code to move the space
@ -1490,11 +1558,11 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SCMTableInfoMsg* pInfoMsg = (SCMTableInfoMsg *)pCmd->payload;
STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload;
strcpy(pInfoMsg->tableId, pTableMetaInfo->name);
pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0);
char* pMsg = (char*)pInfoMsg + sizeof(SCMTableInfoMsg);
char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg);
size_t len = htonl(pCmd->tagData.dataLen);
if (pSql->cmd.autoCreated) {
@ -1513,7 +1581,7 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
/**
* multi table meta req pkg format:
* | SMgmtHead | SCMMultiTableInfoMsg | tableId0 | tableId1 | tableId2 | ......
* | SMgmtHead | SMultiTableInfoMsg | tableId0 | tableId1 | tableId2 | ......
* no used 4B
**/
int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
@ -1531,16 +1599,16 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SMgmtHead *pMgmt = (SMgmtHead *)(pCmd->payload + tsRpcHeadSize);
memset(pMgmt->db, 0, TSDB_TABLE_FNAME_LEN); // server don't need the db
SCMMultiTableInfoMsg *pInfoMsg = (SCMMultiTableInfoMsg *)(pCmd->payload + tsRpcHeadSize + sizeof(SMgmtHead));
SMultiTableInfoMsg *pInfoMsg = (SMultiTableInfoMsg *)(pCmd->payload + tsRpcHeadSize + sizeof(SMgmtHead));
pInfoMsg->numOfTables = htonl((int32_t)pCmd->count);
if (pCmd->payloadLen > 0) {
memcpy(pInfoMsg->tableIds, tmpData, pCmd->payloadLen);
}
taosTFree(tmpData);
tfree(tmpData);
pCmd->payloadLen += sizeof(SMgmtHead) + sizeof(SCMMultiTableInfoMsg);
pCmd->payloadLen += sizeof(SMgmtHead) + sizeof(SMultiTableInfoMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_TABLES_META;
assert(pCmd->payloadLen + minMsgSize() <= pCmd->allocSize);
@ -1586,11 +1654,11 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
char* pMsg = pCmd->payload;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SCMSTableVgroupMsg *pStableVgroupMsg = (SCMSTableVgroupMsg *) pMsg;
SSTableVgroupMsg *pStableVgroupMsg = (SSTableVgroupMsg *)pMsg;
pStableVgroupMsg->numOfTables = htonl(pQueryInfo->numOfTables);
pMsg += sizeof(SCMSTableVgroupMsg);
pMsg += sizeof(SSTableVgroupMsg);
for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, i);
size_t size = sizeof(pTableMetaInfo->name);
tstrncpy(pMsg, pTableMetaInfo->name, size);
@ -1623,14 +1691,17 @@ int tscBuildHeartBeatMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
numOfStreams++;
}
int size = numOfQueries * sizeof(SQueryDesc) + numOfStreams * sizeof(SStreamDesc) + sizeof(SCMHeartBeatMsg) + 100;
int size = numOfQueries * sizeof(SQueryDesc) + numOfStreams * sizeof(SStreamDesc) + sizeof(SHeartBeatMsg) + 100;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
pthread_mutex_unlock(&pObj->mutex);
tscError("%p failed to malloc for heartbeat msg", pSql);
tscError("%p failed to create heartbeat msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCMHeartBeatMsg *pHeartbeat = (SCMHeartBeatMsg *)pCmd->payload;
// TODO the expired hb and client can not be identified by server till now.
SHeartBeatMsg *pHeartbeat = (SHeartBeatMsg *)pCmd->payload;
tstrncpy(pHeartbeat->clientVer, version, tListLen(pHeartbeat->clientVer));
pHeartbeat->numOfQueries = numOfQueries;
pHeartbeat->numOfStreams = numOfStreams;
@ -1705,7 +1776,6 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
pTableMetaInfo->pTableMeta = (STableMeta *) taosCachePut(tscMetaCache, pTableMetaInfo->name,
strlen(pTableMetaInfo->name), pTableMeta, size, tsTableMetaKeepTimer * 1000);
// todo handle out of memory case
if (pTableMetaInfo->pTableMeta == NULL) {
free(pTableMeta);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -1719,7 +1789,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
/**
* multi table meta rsp pkg format:
* | STaosRsp | ieType | SCMMultiTableInfoMsg | SMeterMeta0 | SSchema0 | SMeterMeta1 | SSchema1 | SMeterMeta2 | SSchema2
* | STaosRsp | ieType | SMultiTableInfoMsg | SMeterMeta0 | SSchema0 | SMeterMeta1 | SSchema1 | SMeterMeta2 | SSchema2
* |...... 1B 1B 4B
**/
int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) {
@ -1736,9 +1806,9 @@ int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) {
rsp++;
SCMMultiTableInfoMsg *pInfo = (SCMMultiTableInfoMsg *)rsp;
SMultiTableInfoMsg *pInfo = (SMultiTableInfoMsg *)rsp;
totalNum = htonl(pInfo->numOfTables);
rsp += sizeof(SCMMultiTableInfoMsg);
rsp += sizeof(SMultiTableInfoMsg);
for (i = 0; i < totalNum; i++) {
SMultiTableMeta *pMultiMeta = (SMultiTableMeta *)rsp;
@ -1830,9 +1900,9 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
SSqlRes* pRes = &pSql->res;
// NOTE: the order of several table must be preserved.
SCMSTableVgroupRspMsg *pStableVgroup = (SCMSTableVgroupRspMsg *)pRes->pRsp;
SSTableVgroupRspMsg *pStableVgroup = (SSTableVgroupRspMsg *)pRes->pRsp;
pStableVgroup->numOfTables = htonl(pStableVgroup->numOfTables);
char* pMsg = pRes->pRsp + sizeof(SCMSTableVgroupRspMsg);
char *pMsg = pRes->pRsp + sizeof(SSTableVgroupRspMsg);
// master sqlObj locates in param
SSqlObj* parent = pSql->param;
@ -1845,18 +1915,18 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
SVgroupsMsg * pVgroupMsg = (SVgroupsMsg *) pMsg;
pVgroupMsg->numOfVgroups = htonl(pVgroupMsg->numOfVgroups);
size_t size = sizeof(SCMVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg);
size_t size = sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg);
size_t vgroupsz = sizeof(SCMVgroupInfo) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo);
size_t vgroupsz = sizeof(SVgroupInfo) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo);
pInfo->vgroupList = calloc(1, vgroupsz);
assert(pInfo->vgroupList != NULL);
pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups;
for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) {
//just init, no need to lock
SCMVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j];
SVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j];
SCMVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j];
pVgroups->vgId = htonl(vmsg->vgId);
pVgroups->numOfEps = vmsg->numOfEps;
@ -1878,10 +1948,10 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
* current process do not use the cache at all
*/
int tscProcessShowRsp(SSqlObj *pSql) {
STableMetaMsg * pMetaMsg;
SCMShowRsp *pShow;
SSchema * pSchema;
char key[20];
STableMetaMsg *pMetaMsg;
SShowRsp * pShow;
SSchema * pSchema;
char key[20];
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
@ -1890,7 +1960,7 @@ int tscProcessShowRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
pShow = (SCMShowRsp *)pRes->pRsp;
pShow = (SShowRsp *)pRes->pRsp;
pShow->qhandle = htobe64(pShow->qhandle);
pRes->qhandle = pShow->qhandle;
@ -1937,13 +2007,13 @@ int tscProcessShowRsp(SSqlObj *pSql) {
SInternalField* pInfo = tscFieldInfoAppend(pFieldInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index,
pTableSchema[i].type, pTableSchema[i].bytes, pTableSchema[i].bytes, false);
pTableSchema[i].type, pTableSchema[i].bytes, getNewResColId(pQueryInfo), pTableSchema[i].bytes, false);
}
pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
tscFieldInfoUpdateOffset(pQueryInfo);
taosTFree(pTableMeta);
tfree(pTableMeta);
return 0;
}
@ -1968,7 +2038,7 @@ static void createHBObj(STscObj* pObj) {
pSql->cmd.command = pQueryInfo->command;
if (TSDB_CODE_SUCCESS != tscAllocPayload(&(pSql->cmd), TSDB_DEFAULT_PAYLOAD_SIZE)) {
taosTFree(pSql);
tfree(pSql);
return;
}
@ -1983,11 +2053,12 @@ static void createHBObj(STscObj* pObj) {
}
int tscProcessConnectRsp(SSqlObj *pSql) {
char temp[TSDB_TABLE_FNAME_LEN * 2];
STscObj *pObj = pSql->pTscObj;
SSqlRes *pRes = &pSql->res;
SCMConnectRsp *pConnect = (SCMConnectRsp *)pRes->pRsp;
char temp[TSDB_TABLE_FNAME_LEN * 2] = {0};
SConnectRsp *pConnect = (SConnectRsp *)pRes->pRsp;
tstrncpy(pObj->acctId, pConnect->acctId, sizeof(pObj->acctId)); // copy acctId from response
int32_t len = sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db);
@ -2005,7 +2076,9 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
pObj->connId = htonl(pConnect->connId);
createHBObj(pObj);
taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, pObj, tscTmr, &pObj->pTimer);
//launch a timer to send heartbeat to maintain the connection and send status to mnode
taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, (void *)pObj->rid, tscTmr, &pObj->pTimer);
return 0;
}
@ -2115,6 +2188,15 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
return pRes->code;
}
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pCmd->command == TSDB_SQL_RETRIEVE) {
tscSetResRawPtr(pRes, pQueryInfo);
} else if ((UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_SUBQUERY)) {
tscSetResRawPtr(pRes, pQueryInfo);
} else if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) {
tscSetResRawPtr(pRes, pQueryInfo);
}
if (pSql->pSubscription != NULL) {
int32_t numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
@ -2136,7 +2218,7 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
}
pRes->row = 0;
tscDebug("%p numOfRows:%" PRId64 ", offset:%" PRId64 ", complete:%d", pSql, pRes->numOfRows, pRes->offset, pRes->completed);
tscDebug("%p numOfRows:%d, offset:%" PRId64 ", complete:%d", pSql, pRes->numOfRows, pRes->offset, pRes->completed);
return 0;
}
@ -2271,7 +2353,7 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i);
STableMeta *pTableMeta = taosCacheAcquireByData(tscMetaCache, pMInfo->pTableMeta);
tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList);
tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList, pMInfo->pVgroupTables);
}
if ((code = tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) {

View File

@ -28,7 +28,6 @@
#include "tutil.h"
#include "ttimer.h"
#include "tscProfile.h"
#include "ttimer.h"
static bool validImpl(const char* str, size_t maxsize) {
if (str == NULL) {
@ -161,6 +160,7 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa
registerSqlObj(pSql);
tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
pObj->rid = taosAddRef(tscRefId, pObj);
return pSql;
}
@ -278,9 +278,9 @@ void taos_close(TAOS *taos) {
SSqlObj* pHb = pObj->pHb;
if (pHb != NULL && atomic_val_compare_exchange_ptr(&pObj->pHb, pHb, 0) == pHb) {
if (pHb->pRpcCtx != NULL) { // wait for rsp from dnode
rpcCancelRequest(pHb->pRpcCtx);
pHb->pRpcCtx = NULL;
if (pHb->rpcRid > 0) { // wait for rsp from dnode
rpcCancelRequest(pHb->rpcRid);
pHb->rpcRid = -1;
}
tscDebug("%p HB is freed", pHb);
@ -296,7 +296,8 @@ void taos_close(TAOS *taos) {
}
tscDebug("%p all sqlObj are freed, free tscObj and close dnodeConn:%p", pObj, pObj->pDnodeConn);
tscCloseTscObj(pObj);
taosRemoveRef(tscRefId, pObj->rid);
}
void waitForQueryRsp(void *param, TAOS_RES *tres, int code) {
@ -320,7 +321,7 @@ TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen, TAOS_RES
if (sqlLen > (uint32_t)tsMaxSQLStringLen) {
tscError("sql string exceeds max length:%d", tsMaxSQLStringLen);
terrno = TSDB_CODE_TSC_INVALID_SQL;
terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
return NULL;
}
@ -393,7 +394,7 @@ int taos_affected_rows(TAOS_RES *tres) {
SSqlObj* pSql = (SSqlObj*) tres;
if (pSql == NULL || pSql->signature != pSql) return 0;
return (int)(pSql->res.numOfRows);
return pSql->res.numOfRows;
}
TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
@ -419,7 +420,16 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
for(int32_t i = 0; i < pFieldInfo->numOfOutput; ++i) {
SInternalField* pField = tscFieldInfoGetInternalField(pFieldInfo, i);
if (pField->visible) {
f[j++] = pField->field;
f[j] = pField->field;
// revise the length for binary and nchar fields
if (f[j].type == TSDB_DATA_TYPE_BINARY) {
f[j].bytes -= VARSTR_HEADER_SIZE;
} else if (f[j].type == TSDB_DATA_TYPE_NCHAR) {
f[j].bytes = (f[j].bytes - VARSTR_HEADER_SIZE)/TSDB_NCHAR_SIZE;
}
j += 1;
}
}
@ -442,50 +452,30 @@ int taos_retrieve(TAOS_RES *res) {
if (pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
tscProcessSql(pSql);
return (int)pRes->numOfRows;
tscProcessSql(pSql);
return pRes->numOfRows;
}
int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) {
SSqlObj *pSql = (SSqlObj *)res;
SSqlCmd *pCmd = &pSql->cmd;
static bool needToFetchNewBlock(SSqlObj* pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
if (pRes->qhandle == 0 || pSql->signature != pSql) {
*rows = NULL;
return 0;
}
// Retrieve new block
tscResetForNextRetrieve(pRes);
if (pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
tscProcessSql(pSql);
if (pRes->numOfRows == 0) {
*rows = NULL;
return 0;
}
// secondary merge has handle this situation
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE) {
pRes->numOfClauseTotal += pRes->numOfRows;
}
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
if (pQueryInfo == NULL)
return 0;
assert(0);
for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i);
}
*rows = pRes->tsrow;
return (int)((pQueryInfo->order.order == TSDB_ORDER_DESC) ? pRes->numOfRows : -pRes->numOfRows);
return (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) &&
(pCmd->command == TSDB_SQL_RETRIEVE ||
pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE ||
pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
pCmd->command == TSDB_SQL_SERV_STATUS ||
pCmd->command == TSDB_SQL_CURRENT_DB ||
pCmd->command == TSDB_SQL_SERV_VERSION ||
pCmd->command == TSDB_SQL_CLI_VERSION ||
pCmd->command == TSDB_SQL_CURRENT_USER);
}
TAOS_ROW taos_fetch_row(TAOS_RES *res) {
@ -508,77 +498,50 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
// set the sql object owner
tscSetSqlOwner(pSql);
// current data set are exhausted, fetch more data from node
if (pRes->row >= pRes->numOfRows && (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) &&
(pCmd->command == TSDB_SQL_RETRIEVE ||
pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE ||
pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
pCmd->command == TSDB_SQL_SERV_STATUS ||
pCmd->command == TSDB_SQL_CURRENT_DB ||
pCmd->command == TSDB_SQL_SERV_VERSION ||
pCmd->command == TSDB_SQL_CLI_VERSION ||
pCmd->command == TSDB_SQL_CURRENT_USER )) {
// current data set are exhausted, fetch more result from node
if (pRes->row >= pRes->numOfRows && needToFetchNewBlock(pSql)) {
taos_fetch_rows_a(res, waitForRetrieveRsp, pSql->pTscObj);
tsem_wait(&pSql->rspSem);
}
void* data = doSetResultRowData(pSql, true);
void* data = doSetResultRowData(pSql);
tscClearSqlOwner(pSql);
return data;
}
int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) {
#if 0
SSqlObj *pSql = (SSqlObj *)res;
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
int nRows = 0;
if (pSql == NULL || pSql->signature != pSql) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
*rows = NULL;
return 0;
}
// projection query on metric, pipeline retrieve data from vnode list,
// instead of two-stage mergednodeProcessMsgFromShell free qhandle
nRows = taos_fetch_block_impl(res, rows);
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
// current subclause is completed, try the next subclause
while (rows == NULL && pCmd->clauseIndex < pCmd->numOfClause - 1) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pSql->cmd.command = pQueryInfo->command;
pCmd->clauseIndex++;
pRes->numOfTotal += pRes->numOfClauseTotal;
pRes->numOfClauseTotal = 0;
pRes->rspType = 0;
pSql->subState.numOfSub = 0;
taosTFree(pSql->pSubs);
assert(pSql->fp == NULL);
tscDebug("%p try data in the next subclause:%d, total subclause:%d", pSql, pCmd->clauseIndex, pCmd->numOfClause);
tscProcessSql(pSql);
nRows = taos_fetch_block_impl(res, rows);
if (pRes->qhandle == 0 ||
pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED ||
pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
pCmd->command == TSDB_SQL_INSERT) {
return 0;
}
return nRows;
#endif
tscResetForNextRetrieve(pRes);
(*rows) = taos_fetch_row(res);
return ((*rows) != NULL)? 1:0;
// set the sql object owner
tscSetSqlOwner(pSql);
// current data set are exhausted, fetch more data from node
if (needToFetchNewBlock(pSql)) {
taos_fetch_rows_a(res, waitForRetrieveRsp, pSql->pTscObj);
tsem_wait(&pSql->rspSem);
}
*rows = pRes->urow;
tscClearSqlOwner(pSql);
return pRes->numOfRows;
}
int taos_select_db(TAOS *taos, const char *db) {
@ -599,7 +562,7 @@ int taos_select_db(TAOS *taos, const char *db) {
}
// send free message to vnode to free qhandle and corresponding resources in vnode
static UNUSED_FUNC bool tscKillQueryInDnode(SSqlObj* pSql) {
static bool tscKillQueryInDnode(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
@ -746,9 +709,9 @@ static void tscKillSTableQuery(SSqlObj *pSql) {
assert(pSubObj->self == (SSqlObj**) p);
pSubObj->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
if (pSubObj->pRpcCtx != NULL) {
rpcCancelRequest(pSubObj->pRpcCtx);
pSubObj->pRpcCtx = NULL;
if (pSubObj->rpcRid > 0) {
rpcCancelRequest(pSubObj->rpcRid);
pSubObj->rpcRid = -1;
}
tscQueueAsyncRes(pSubObj);
@ -773,7 +736,7 @@ void taos_stop_query(TAOS_RES *res) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
assert(pSql->pRpcCtx == NULL);
assert(pSql->rpcRid <= 0);
tscKillSTableQuery(pSql);
} else {
if (pSql->cmd.command < TSDB_SQL_LOCAL) {
@ -782,9 +745,9 @@ void taos_stop_query(TAOS_RES *res) {
* reset and freed in the processMsgFromServer function, and causes the invalid
* write problem for rpcCancelRequest.
*/
if (pSql->pRpcCtx != NULL) {
rpcCancelRequest(pSql->pRpcCtx);
pSql->pRpcCtx = NULL;
if (pSql->rpcRid > 0) {
rpcCancelRequest(pSql->rpcRid);
pSql->rpcRid = -1;
}
tscQueueAsyncRes(pSql);
@ -794,6 +757,25 @@ void taos_stop_query(TAOS_RES *res) {
tscDebug("%p query is cancelled", res);
}
bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
SSqlObj *pSql = (SSqlObj *)res;
if (pSql == NULL || pSql->signature != pSql) {
return true;
}
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo == NULL) {
return true;
}
SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, col);
if (col < 0 || col >= tscNumOfFields(pQueryInfo) || row < 0 || row > pSql->res.numOfRows) {
return true;
}
return isNull(((char*) pSql->res.urow[col]) + row * pInfo->field.bytes, pInfo->field.type);
}
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
int len = 0;
for (int i = 0; i < num_fields; ++i) {
@ -891,18 +873,16 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
int32_t sqlLen = (int32_t)strlen(sql);
if (sqlLen > tsMaxSQLStringLen) {
tscError("%p sql too long", pSql);
pRes->code = TSDB_CODE_TSC_INVALID_SQL;
taosTFree(pSql);
return pRes->code;
tfree(pSql);
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
}
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
if (pSql->sqlstr == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscError("%p failed to malloc sql string buffer", pSql);
tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj);
taosTFree(pSql);
return pRes->code;
tfree(pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
strtolower(pSql->sqlstr, sql);

View File

@ -273,7 +273,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false);
tscFreeSqlResult(pSql);
taosTFree(pSql->pSubs);
tfree(pSql->pSubs);
pSql->subState.numOfSub = 0;
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
tscSetNextLaunchTimer(pStream, pSql);
@ -617,6 +617,6 @@ void taos_close_stream(TAOS_STREAM *handle) {
pStream->pSql = NULL;
taos_free_result(pSql);
taosTFree(pStream);
tfree(pStream);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -36,6 +36,7 @@ void * tscTmr;
void * tscQhandle;
void * tscCheckDiskUsageTmr;
int tsInsertHeadSize;
int tscRefId = -1;
int tscNumOfThreads;
@ -79,7 +80,7 @@ int32_t tscInitRpc(const char *user, const char *secretEncrypt, void **pDnodeCon
void taos_init_imp(void) {
char temp[128];
char temp[128] = {0};
errno = TSDB_CODE_SUCCESS;
srand(taosGetTimestampSec());
@ -103,8 +104,8 @@ void taos_init_imp(void) {
taosReadGlobalCfg();
taosCheckGlobalCfg();
taosPrintGlobalCfg();
rpcInit();
tscDebug("starting to initialize TAOS client ...");
tscDebug("Local End Point is:%s", tsLocalEp);
}
@ -146,29 +147,45 @@ void taos_init_imp(void) {
tscObjCache = taosCacheInit(TSDB_CACHE_PTR_KEY, refreshTime / 2, false, tscFreeRegisteredSqlObj, "sqlObj");
}
tscRefId = taosOpenRef(200, tscCloseTscObj);
// in other language APIs, taos_cleanup is not available yet.
// So, to make sure taos_cleanup will be invoked to clean up the allocated
// resource to suppress the valgrind warning.
atexit(taos_cleanup);
tscDebug("client is initialized successfully");
}
void taos_init() { pthread_once(&tscinit, taos_init_imp); }
void taos_cleanup() {
if (tscMetaCache != NULL) {
taosCacheCleanup(tscMetaCache);
tscMetaCache = NULL;
// this function may be called by user or system, or by both simultaneously.
void taos_cleanup(void) {
tscDebug("start to cleanup client environment");
taosCacheCleanup(tscObjCache);
tscObjCache = NULL;
void* m = tscMetaCache;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscMetaCache, m, 0) == m) {
taosCacheCleanup(m);
}
if (tscQhandle != NULL) {
taosCleanUpScheduler(tscQhandle);
tscQhandle = NULL;
m = tscObjCache;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscObjCache, m, 0) == m) {
taosCacheCleanup(m);
}
m = tscQhandle;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscQhandle, m, 0) == m) {
taosCleanUpScheduler(m);
}
taosCloseRef(tscRefId);
taosCleanupKeywordsTable();
taosCloseLog();
if (tscEmbedded == 0) rpcCleanup();
taosTmrCleanUp(tscTmr);
m = tscTmr;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscTmr, m, 0) == m) {
taosTmrCleanUp(m);
}
}
static int taos_options_imp(TSDB_OPTION option, const char *pStr) {

View File

@ -71,7 +71,8 @@ void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBufferWriter* bw) {
}
bool tscQueryTags(SQueryInfo* pQueryInfo) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
int32_t functId = pExpr->functionId;
@ -201,13 +202,9 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) {
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) {
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
assert(pExpr != NULL);
// if (pExpr == NULL) {
// return false;
// }
int32_t functionId = pExpr->functionId;
if (functionId == TSDB_FUNC_TAG) {
@ -222,6 +219,22 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) {
return true;
}
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo) {
if (tscIsProjectionQuery(pQueryInfo)) {
return false;
}
size_t numOfOutput = tscNumOfFields(pQueryInfo);
for(int32_t i = 0; i < numOfOutput; ++i) {
SExprInfo* pExprInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i)->pArithExprInfo;
if (pExprInfo != NULL) {
return true;
}
}
return false;
}
bool tscIsTWAQuery(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
@ -245,21 +258,25 @@ void tscClearInterpInfo(SQueryInfo* pQueryInfo) {
}
pQueryInfo->fillType = TSDB_FILL_NONE;
taosTFree(pQueryInfo->fillVal);
tfree(pQueryInfo->fillVal);
}
int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
if (pRes->tsrow == NULL) {
int32_t numOfOutput = pQueryInfo->fieldsInfo.numOfOutput;
pRes->numOfCols = numOfOutput;
pRes->numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
pRes->tsrow = calloc(numOfOutput, POINTER_BYTES);
pRes->length = calloc(numOfOutput, sizeof(int32_t));
pRes->buffer = calloc(numOfOutput, POINTER_BYTES);
pRes->tsrow = calloc(pRes->numOfCols, POINTER_BYTES);
pRes->urow = calloc(pRes->numOfCols, POINTER_BYTES);
pRes->length = calloc(pRes->numOfCols, sizeof(int32_t));
pRes->buffer = calloc(pRes->numOfCols, POINTER_BYTES);
// not enough memory
if (pRes->tsrow == NULL || (pRes->buffer == NULL && pRes->numOfCols > 0)) {
taosTFree(pRes->tsrow);
if (pRes->tsrow == NULL || pRes->urow == NULL || pRes->length == NULL || (pRes->buffer == NULL && pRes->numOfCols > 0)) {
tfree(pRes->tsrow);
tfree(pRes->urow);
tfree(pRes->length);
tfree(pRes->buffer);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return pRes->code;
}
@ -268,27 +285,93 @@ int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
assert(pRes->numOfCols > 0);
int32_t offset = 0;
for (int32_t i = 0; i < pRes->numOfCols; ++i) {
SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
pRes->urow[i] = pRes->data + offset * pRes->numOfRows;
pRes->length[i] = pInfo->field.bytes;
offset += pInfo->field.bytes;
// generated the user-defined column result
if (pInfo->pSqlExpr != NULL && TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
if (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) {
setNullN(pRes->urow[i], pInfo->field.type, pInfo->field.bytes, (int32_t) pRes->numOfRows);
} else {
if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR || pInfo->field.type == TSDB_DATA_TYPE_BINARY) {
assert(pInfo->pSqlExpr->param[1].nLen <= pInfo->field.bytes);
for (int32_t k = 0; k < pRes->numOfRows; ++k) {
char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes;
memcpy(varDataVal(p), pInfo->pSqlExpr->param[1].pz, pInfo->pSqlExpr->param[1].nLen);
varDataSetLen(p, pInfo->pSqlExpr->param[1].nLen);
}
} else {
for (int32_t k = 0; k < pRes->numOfRows; ++k) {
char* p = ((char**)pRes->urow)[i] + k * pInfo->field.bytes;
memcpy(p, &pInfo->pSqlExpr->param[1].i64Key, pInfo->field.bytes);
}
}
}
} else if (pInfo->field.type == TSDB_DATA_TYPE_NCHAR) {
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
// string terminated char for binary data
memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows);
char* p = pRes->urow[i];
for (int32_t k = 0; k < pRes->numOfRows; ++k) {
char* dst = pRes->buffer[i] + k * pInfo->field.bytes;
if (isNull(p, TSDB_DATA_TYPE_NCHAR)) {
memcpy(dst, p, varDataTLen(p));
} else {
int32_t length = taosUcs4ToMbs(varDataVal(p), varDataLen(p), varDataVal(dst));
varDataSetLen(dst, length);
if (length == 0) {
tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p);
}
}
p += pInfo->field.bytes;
}
memcpy(pRes->urow[i], pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows);
}
}
}
static void tscDestroyResPointerInfo(SSqlRes* pRes) {
if (pRes->buffer != NULL) { // free all buffers containing the multibyte string
for (int i = 0; i < pRes->numOfCols; i++) {
taosTFree(pRes->buffer[i]);
tfree(pRes->buffer[i]);
}
pRes->numOfCols = 0;
}
taosTFree(pRes->pRsp);
tfree(pRes->pRsp);
taosTFree(pRes->tsrow);
taosTFree(pRes->length);
taosTFree(pRes->buffer);
tfree(pRes->tsrow);
tfree(pRes->length);
tfree(pRes->buffer);
tfree(pRes->urow);
taosTFree(pRes->pGroupRec);
taosTFree(pRes->pColumnIndex);
tfree(pRes->pGroupRec);
tfree(pRes->pColumnIndex);
if (pRes->pArithSup != NULL) {
taosTFree(pRes->pArithSup->data);
taosTFree(pRes->pArithSup);
tfree(pRes->pArithSup->data);
tfree(pRes->pArithSup);
}
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
@ -305,11 +388,11 @@ static void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeFromCache) {
freeQueryInfoImpl(pQueryInfo);
clearAllTableMetaInfo(pQueryInfo, (const char*)addr, removeFromCache);
taosTFree(pQueryInfo);
tfree(pQueryInfo);
}
pCmd->numOfClause = 0;
taosTFree(pCmd->pQueryInfo);
tfree(pCmd->pQueryInfo);
}
void tscResetSqlCmdObj(SSqlCmd* pCmd, bool removeFromCache) {
@ -338,34 +421,6 @@ void tscFreeSqlResult(SSqlObj* pSql) {
memset(&pSql->res, 0, sizeof(SSqlRes));
}
void tscPartiallyFreeSqlObj(SSqlObj* pSql) {
if (pSql == NULL || pSql->signature != pSql) {
return;
}
SSqlCmd* pCmd = &pSql->cmd;
int32_t cmd = pCmd->command;
if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_LOCALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
cmd == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscRemoveFromSqlList(pSql);
}
// pSql->sqlstr will be used by tscBuildQueryStreamDesc
// if (pObj->signature == pObj) {
//pthread_mutex_lock(&pObj->mutex);
taosTFree(pSql->sqlstr);
//pthread_mutex_unlock(&pObj->mutex);
// }
tscFreeSqlResult(pSql);
taosTFree(pSql->pSubs);
pSql->subState.numOfSub = 0;
pSql->self = 0;
tscResetSqlCmdObj(pCmd, false);
}
static void tscFreeSubobj(SSqlObj* pSql) {
if (pSql->subState.numOfSub == 0) {
return;
@ -404,7 +459,7 @@ void tscFreeRegisteredSqlObj(void *pSql) {
tscDebug("%p free sqlObj completed, tscObj:%p ref:%d", *p, pTscObj, ref);
if (ref == 0) {
tscDebug("%p all sqlObj freed, free tscObj:%p", *p, pTscObj);
tscCloseTscObj(pTscObj);
taosRemoveRef(tscRefId, pTscObj->rid);
}
}
@ -415,14 +470,14 @@ void tscFreeTableMetaHelper(void *pTableMeta) {
assert(numOfEps >= 0 && numOfEps <= TSDB_MAX_REPLICA);
for(int32_t i = 0; i < numOfEps; ++i) {
taosTFree(p->vgroupInfo.epAddr[i].fqdn);
tfree(p->vgroupInfo.epAddr[i].fqdn);
}
int32_t numOfEps1 = p->corVgroupInfo.numOfEps;
assert(numOfEps1 >= 0 && numOfEps1 <= TSDB_MAX_REPLICA);
for(int32_t i = 0; i < numOfEps1; ++i) {
taosTFree(p->corVgroupInfo.epAddr[i].fqdn);
tfree(p->corVgroupInfo.epAddr[i].fqdn);
}
}
@ -434,22 +489,32 @@ void tscFreeSqlObj(SSqlObj* pSql) {
tscDebug("%p start to free sqlObj", pSql);
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
tscFreeSubobj(pSql);
tscPartiallyFreeSqlObj(pSql);
SSqlCmd* pCmd = &pSql->cmd;
int32_t cmd = pCmd->command;
if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_LOCALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
cmd == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscRemoveFromSqlList(pSql);
}
pSql->signature = NULL;
pSql->fp = NULL;
tfree(pSql->sqlstr);
SSqlCmd* pCmd = &pSql->cmd;
tfree(pSql->pSubs);
pSql->subState.numOfSub = 0;
pSql->self = 0;
tscFreeSqlResult(pSql);
tscResetSqlCmdObj(pCmd, false);
memset(pCmd->payload, 0, (size_t)pCmd->allocSize);
taosTFree(pCmd->payload);
tfree(pCmd->payload);
pCmd->allocSize = 0;
taosTFree(pSql->sqlstr);
tsem_destroy(&pSql->rspSem);
free(pSql);
}
@ -458,15 +523,15 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
return;
}
taosTFree(pDataBlock->pData);
taosTFree(pDataBlock->params);
tfree(pDataBlock->pData);
tfree(pDataBlock->params);
// free the refcount for metermeta
if (pDataBlock->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void**)&(pDataBlock->pTableMeta), false);
}
taosTFree(pDataBlock);
tfree(pDataBlock);
}
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes,
@ -741,7 +806,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
taosHashCleanup(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList);
taosTFree(dataBuf->pData);
tfree(dataBuf->pData);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@ -786,8 +851,8 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
}
// TODO: all subqueries should be freed correctly before close this connection.
void tscCloseTscObj(STscObj* pObj) {
assert(pObj != NULL);
void tscCloseTscObj(void *param) {
STscObj *pObj = param;
pObj->signature = NULL;
taosTmrStopA(&(pObj->pTimer));
@ -801,7 +866,7 @@ void tscCloseTscObj(STscObj* pObj) {
pthread_mutex_destroy(&pObj->mutex);
tscDebug("%p DB connection is closed, dnodeConn:%p", pObj, p);
taosTFree(pObj);
tfree(pObj);
}
bool tscIsInsertData(char* sqlstr) {
@ -885,23 +950,6 @@ void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) {
}
}
void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo) {
if (tscSqlExprNumOfExprs(pQueryInfo) == 0) {
return;
}
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
pExpr->offset = 0;
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 1; i < numOfExprs; ++i) {
SSqlExpr* prev = taosArrayGetP(pQueryInfo->exprList, i - 1);
SSqlExpr* p = taosArrayGetP(pQueryInfo->exprList, i);
p->offset = prev->offset + prev->resBytes;
}
}
SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t index) {
assert(index < pFieldInfo->numOfOutput);
return TARRAY_GET_ELEM(pFieldInfo->internalField, index);
@ -965,18 +1013,26 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
if (pInfo->pArithExprInfo != NULL) {
tExprTreeDestroy(&pInfo->pArithExprInfo->pExpr, NULL);
taosTFree(pInfo->pArithExprInfo);
SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base;
for(int32_t j = 0; j < pFuncMsg->numOfParams; ++j) {
if (pFuncMsg->arg[j].argType == TSDB_DATA_TYPE_BINARY) {
tfree(pFuncMsg->arg[j].argValue.pz);
}
}
tfree(pInfo->pArithExprInfo);
}
}
taosArrayDestroy(pFieldInfo->internalField);
taosTFree(pFieldInfo->final);
tfree(pFieldInfo->final);
memset(pFieldInfo, 0, sizeof(SFieldInfo));
}
static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t interSize, int32_t colType) {
int16_t size, int16_t resColId, int16_t interSize, int32_t colType) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex);
SSqlExpr* pExpr = calloc(1, sizeof(SSqlExpr));
@ -1009,6 +1065,7 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol
pExpr->resType = type;
pExpr->resBytes = size;
pExpr->resColId = resColId;
pExpr->interBytes = interSize;
if (pTableMetaInfo->pTableMeta) {
@ -1019,20 +1076,20 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol
}
SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t interSize, bool isTagCol) {
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) {
int32_t num = (int32_t)taosArrayGetSize(pQueryInfo->exprList);
if (index == num) {
return tscSqlExprAppend(pQueryInfo, functionId, pColIndex, type, size, interSize, isTagCol);
return tscSqlExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
}
SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, interSize, isTagCol);
SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
taosArrayInsert(pQueryInfo->exprList, index, &pExpr);
return pExpr;
}
SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t interSize, bool isTagCol) {
SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, interSize, isTagCol);
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) {
SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
taosArrayPush(pQueryInfo->exprList, &pExpr);
return pExpr;
}
@ -1060,16 +1117,14 @@ size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo) {
return taosArrayGetSize(pQueryInfo->exprList);
}
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex) {
if (pExpr == NULL || argument == NULL || bytes == 0) {
return;
}
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) {
assert (pExpr != NULL || argument != NULL || bytes != 0);
// set parameter value
// transfer to tVariant from byte data/no ascii data
tVariantCreateFromBinary(&pExpr->param[pExpr->numOfParams], argument, bytes, type);
pExpr->numOfParams += 1;
assert(pExpr->numOfParams <= 3);
}
@ -1086,7 +1141,7 @@ void* sqlExprDestroy(SSqlExpr* pExpr) {
tVariantDestroy(&pExpr->param[i]);
}
taosTFree(pExpr);
tfree(pExpr);
return NULL;
}
@ -1121,6 +1176,8 @@ int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepco
}
*p1 = *pExpr;
memset(p1->param, 0, sizeof(tVariant) * tListLen(p1->param));
for (int32_t j = 0; j < pExpr->numOfParams; ++j) {
tVariantAssign(&p1->param[j], &pExpr->param[j]);
}
@ -1184,11 +1241,11 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) {
for(int32_t i = 0; i < numOfFilters; ++i) {
if (pFilterInfo[i].filterstr) {
taosTFree(pFilterInfo[i].pz);
tfree(pFilterInfo[i].pz);
}
}
taosTFree(pFilterInfo);
tfree(pFilterInfo);
}
SColumn* tscColumnClone(const SColumn* src) {
@ -1254,8 +1311,7 @@ void tscColumnListDestroy(SArray* pColumnList) {
*
*/
static int32_t validateQuoteToken(SStrToken* pToken) {
strdequote(pToken->z);
pToken->n = (uint32_t)strtrim(pToken->z);
tscDequoteAndTrimToken(pToken);
int32_t k = tSQLGetToken(pToken->z, &pToken->type);
@ -1270,8 +1326,6 @@ static int32_t validateQuoteToken(SStrToken* pToken) {
}
void tscDequoteAndTrimToken(SStrToken* pToken) {
assert(pToken->type == TK_STRING);
uint32_t first = 0, last = pToken->n;
// trim leading spaces
@ -1383,7 +1437,8 @@ int32_t tscValidateName(SStrToken* pToken) {
} else {
pStr[firstPartLen] = TS_PATH_DELIMITER[0];
memmove(&pStr[firstPartLen + 1], pToken->z, pToken->n);
pStr[firstPartLen + sizeof(TS_PATH_DELIMITER[0]) + pToken->n] = 0;
uint32_t offset = (uint32_t)(pToken->z - (pStr + firstPartLen + 1));
memset(pToken->z + pToken->n - offset, ' ', offset);
}
pToken->n += (firstPartLen + sizeof(TS_PATH_DELIMITER[0]));
pToken->z = pStr;
@ -1476,7 +1531,7 @@ void tscTagCondRelease(STagCond* pTagCond) {
size_t s = taosArrayGetSize(pTagCond->pCond);
for (int32_t i = 0; i < s; ++i) {
SCond* p = taosArrayGet(pTagCond->pCond, i);
taosTFree(p->cond);
tfree(p->cond);
}
taosArrayDestroy(pTagCond->pCond);
@ -1622,6 +1677,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX;
pQueryInfo->resColumnId= -1000;
}
int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
@ -1663,11 +1719,12 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) {
if (pQueryInfo->groupbyExpr.columnInfo != NULL) {
taosArrayDestroy(pQueryInfo->groupbyExpr.columnInfo);
pQueryInfo->groupbyExpr.columnInfo = NULL;
pQueryInfo->groupbyExpr.numOfGroupCols = 0;
}
pQueryInfo->tsBuf = tsBufDestroy(pQueryInfo->tsBuf);
taosTFree(pQueryInfo->fillVal);
tfree(pQueryInfo->fillVal);
}
void tscClearSubqueryInfo(SSqlCmd* pCmd) {
@ -1678,23 +1735,71 @@ void tscClearSubqueryInfo(SSqlCmd* pCmd) {
}
void tscFreeVgroupTableInfo(SArray* pVgroupTables) {
if (pVgroupTables != NULL) {
size_t num = taosArrayGetSize(pVgroupTables);
for (size_t i = 0; i < num; i++) {
SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTables, i);
for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) {
taosTFree(pInfo->vgInfo.epAddr[j].fqdn);
}
taosArrayDestroy(pInfo->itemList);
}
taosArrayDestroy(pVgroupTables);
if (pVgroupTables == NULL) {
return;
}
size_t num = taosArrayGetSize(pVgroupTables);
for (size_t i = 0; i < num; i++) {
SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTables, i);
for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) {
tfree(pInfo->vgInfo.epAddr[j].fqdn);
}
taosArrayDestroy(pInfo->itemList);
}
taosArrayDestroy(pVgroupTables);
}
void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index) {
assert(pVgroupTable != NULL && index >= 0);
size_t size = taosArrayGetSize(pVgroupTable);
assert(size > index);
SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTable, index);
for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) {
tfree(pInfo->vgInfo.epAddr[j].fqdn);
}
taosArrayDestroy(pInfo->itemList);
taosArrayRemove(pVgroupTable, index);
}
void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) {
memset(info, 0, sizeof(SVgroupTableInfo));
info->vgInfo = pInfo->vgInfo;
for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) {
info->vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn);
}
info->itemList = taosArrayClone(pInfo->itemList);
}
SArray* tscVgroupTableInfoClone(SArray* pVgroupTables) {
if (pVgroupTables == NULL) {
return NULL;
}
size_t num = taosArrayGetSize(pVgroupTables);
SArray* pa = taosArrayInit(num, sizeof(SVgroupTableInfo));
SVgroupTableInfo info;
for (size_t i = 0; i < num; i++) {
SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTables, i);
tscVgroupTableCopy(&info, pInfo);
taosArrayPush(pa, &info);
}
return pa;
}
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache) {
tscDebug("%p deref the table meta in cache, numOfTables:%d", address, pQueryInfo->numOfTables);
tscDebug("%p unref %d tables in the tableMeta cache", address, pQueryInfo->numOfTables);
for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
@ -1704,11 +1809,11 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool rem
free(pTableMetaInfo);
}
taosTFree(pQueryInfo->pTableMetaInfo);
tfree(pQueryInfo->pTableMetaInfo);
}
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols) {
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables) {
void* pAlloc = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES);
if (pAlloc == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -1734,6 +1839,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList);
}
// TODO handle malloc failure
pTableMetaInfo->tagColList = taosArrayInit(4, POINTER_BYTES);
if (pTableMetaInfo->tagColList == NULL) {
return NULL;
@ -1743,12 +1849,14 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, -1);
}
pTableMetaInfo->pVgroupTables = tscVgroupTableInfoClone(pVgroupTables);
pQueryInfo->numOfTables += 1;
return pTableMetaInfo;
}
STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo* pQueryInfo) {
return tscAddTableMetaInfo(pQueryInfo, NULL, NULL, NULL, NULL);
return tscAddTableMetaInfo(pQueryInfo, NULL, NULL, NULL, NULL, NULL);
}
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache) {
@ -1822,7 +1930,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
assert(pSql->cmd.clauseIndex == 0);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL);
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
registerSqlObj(pNew);
return pNew;
@ -1987,14 +2095,16 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
STableMeta* pTableMeta = taosCacheAcquireByData(tscMetaCache, pTableMetaInfo->pTableMeta); // get by name may failed due to the cache cleanup
assert(pTableMeta != NULL);
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList);
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
STableMeta* pPrevTableMeta = taosCacheTransfer(tscMetaCache, (void**)&pPrevInfo->pTableMeta);
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList);
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
pTableMetaInfo->pVgroupTables);
}
if (pFinalInfo->pTableMeta == NULL) {
@ -2106,6 +2216,21 @@ int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid) {
}
}
int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId) {
int32_t numOfTags = tscGetNumOfTags(pTableMeta);
SSchema* pSchema = tscGetTableTagSchema(pTableMeta);
for(int32_t i = 0; i < numOfTags; ++i) {
if (pSchema[i].colId == colId) {
return i;
}
}
// can not reach here
assert(0);
return INT16_MIN;
}
bool tscIsUpdateQuery(SSqlObj* pSql) {
if (pSql == NULL || pSql->signature != pSql) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
@ -2283,7 +2408,7 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) {
pRes->numOfTotal = num;
taosTFree(pSql->pSubs);
tfree(pSql->pSubs);
pSql->subState.numOfSub = 0;
pSql->fp = fp;
@ -2375,7 +2500,7 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) {
return NULL;
}
size_t size = sizeof(SVgroupsInfo) + sizeof(SCMVgroupInfo) * vgroupList->numOfVgroups;
size_t size = sizeof(SVgroupsInfo) + sizeof(SVgroupInfo) * vgroupList->numOfVgroups;
SVgroupsInfo* pNew = calloc(1, size);
if (pNew == NULL) {
return NULL;
@ -2384,9 +2509,9 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) {
pNew->numOfVgroups = vgroupList->numOfVgroups;
for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) {
SCMVgroupInfo* pNewVInfo = &pNew->vgroups[i];
SVgroupInfo* pNewVInfo = &pNew->vgroups[i];
SCMVgroupInfo* pvInfo = &vgroupList->vgroups[i];
SVgroupInfo* pvInfo = &vgroupList->vgroups[i];
pNewVInfo->vgId = pvInfo->vgId;
pNewVInfo->numOfEps = pvInfo->numOfEps;
@ -2405,21 +2530,22 @@ void* tscVgroupInfoClear(SVgroupsInfo *vgroupList) {
}
for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) {
SCMVgroupInfo* pVgroupInfo = &vgroupList->vgroups[i];
SVgroupInfo* pVgroupInfo = &vgroupList->vgroups[i];
for(int32_t j = 0; j < pVgroupInfo->numOfEps; ++j) {
taosTFree(pVgroupInfo->epAddr[j].fqdn);
tfree(pVgroupInfo->epAddr[j].fqdn);
}
}
taosTFree(vgroupList);
tfree(vgroupList);
return NULL;
}
void tscSCMVgroupInfoCopy(SCMVgroupInfo* dst, const SCMVgroupInfo* src) {
void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src) {
dst->vgId = src->vgId;
dst->numOfEps = src->numOfEps;
for(int32_t i = 0; i < dst->numOfEps; ++i) {
tfree(dst->epAddr[i].fqdn);
dst->epAddr[i].port = src->epAddr[i].port;
dst->epAddr[i].fqdn = strdup(src->epAddr[i].fqdn);
}

View File

@ -80,7 +80,7 @@ typedef struct {
#define schemaFLen(s) ((s)->flen)
#define schemaVLen(s) ((s)->vlen)
#define schemaColAt(s, i) ((s)->columns + i)
#define tdFreeSchema(s) taosTFree((s))
#define tdFreeSchema(s) tfree((s))
STSchema *tdDupSchema(STSchema *pSchema);
int tdEncodeSchema(void **buf, STSchema *pSchema);
@ -119,6 +119,33 @@ void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, int32_t version);
int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int16_t bytes);
STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
// ----------------- Semantic timestamp key definition
typedef uint64_t TKEY;
#define TKEY_INVALID UINT64_MAX
#define TKEY_NULL TKEY_INVALID
#define TKEY_NEGATIVE_FLAG (((TKEY)1) << 63)
#define TKEY_DELETE_FLAG (((TKEY)1) << 62)
#define TKEY_VALUE_FILTER (~(TKEY_NEGATIVE_FLAG | TKEY_DELETE_FLAG))
#define TKEY_IS_NEGATIVE(tkey) (((tkey)&TKEY_NEGATIVE_FLAG) != 0)
#define TKEY_IS_DELETED(tkey) (((tkey)&TKEY_DELETE_FLAG) != 0)
#define tdSetTKEYDeleted(tkey) ((tkey) | TKEY_DELETE_FLAG)
#define tdGetTKEY(key) (((TKEY)ABS(key)) | (TKEY_NEGATIVE_FLAG & (TKEY)(key)))
#define tdGetKey(tkey) (((TSKEY)((tkey)&TKEY_VALUE_FILTER)) * (TKEY_IS_NEGATIVE(tkey) ? -1 : 1))
static FORCE_INLINE int tkeyComparFn(const void *tkey1, const void *tkey2) {
TSKEY key1 = tdGetKey(*(TKEY *)tkey1);
TSKEY key2 = tdGetKey(*(TKEY *)tkey2);
if (key1 < key2) {
return -1;
} else if (key1 > key2) {
return 1;
} else {
return 0;
}
}
// ----------------- Data row structure
/* A data row, the format is like below:
@ -129,6 +156,8 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
* +----------+----------+---------------------------------+---------------------------------+
* | len | sversion | First part | Second part |
* +----------+----------+---------------------------------+---------------------------------+
*
* NOTE: timestamp in this row structure is TKEY instead of TSKEY
*/
typedef void *SDataRow;
@ -137,11 +166,13 @@ typedef void *SDataRow;
#define dataRowLen(r) (*(uint16_t *)(r))
#define dataRowVersion(r) *(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))
#define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE)
#define dataRowKey(r) (*(TSKEY *)(dataRowTuple(r)))
#define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r)))
#define dataRowKey(r) tdGetKey(dataRowTKey(r))
#define dataRowSetLen(r, l) (dataRowLen(r) = (l))
#define dataRowSetVersion(r, v) (dataRowVersion(r) = (v))
#define dataRowCpy(dst, r) memcpy((dst), (r), dataRowLen(r))
#define dataRowMaxBytesFromSchema(s) (schemaTLen(s) + TD_DATA_ROW_HEAD_SIZE)
#define dataRowDeleted(r) TKEY_IS_DELETED(dataRowTKey(r))
SDataRow tdNewDataRowFromSchema(STSchema *pSchema);
void tdFreeDataRow(SDataRow row);
@ -154,16 +185,18 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, void *value, int8_t type, i
int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE;
char * ptr = (char *)POINTER_SHIFT(row, dataRowLen(row));
switch (type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
*(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row);
memcpy(ptr, value, varDataTLen(value));
dataRowLen(row) += varDataTLen(value);
break;
default:
if (IS_VAR_DATA_TYPE(type)) {
*(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row);
memcpy(ptr, value, varDataTLen(value));
dataRowLen(row) += varDataTLen(value);
} else {
if (offset == 0) {
ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP);
TKEY tvalue = tdGetTKEY(*(TSKEY *)value);
memcpy(POINTER_SHIFT(row, toffset), (void *)(&tvalue), TYPE_BYTES[type]);
} else {
memcpy(POINTER_SHIFT(row, toffset), value, TYPE_BYTES[type]);
break;
}
}
return 0;
@ -171,12 +204,10 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, void *value, int8_t type, i
// NOTE: offset here including the header size
static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t offset) {
switch (type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
return POINTER_SHIFT(row, *(VarDataOffsetT *)POINTER_SHIFT(row, offset));
default:
return POINTER_SHIFT(row, offset);
if (IS_VAR_DATA_TYPE(type)) {
return POINTER_SHIFT(row, *(VarDataOffsetT *)POINTER_SHIFT(row, offset));
} else {
return POINTER_SHIFT(row, offset);
}
}
@ -196,7 +227,6 @@ static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints);
void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints);
void dataColPopPoints(SDataCol *pCol, int pointsToPop, int numOfRows);
void dataColSetOffset(SDataCol *pCol, int nEle);
bool isNEleNull(SDataCol *pCol, int nEle);
@ -204,28 +234,20 @@ void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints);
// Get the data pointer from a column-wised data
static FORCE_INLINE void *tdGetColDataOfRow(SDataCol *pCol, int row) {
switch (pCol->type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
break;
default:
return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row);
break;
if (IS_VAR_DATA_TYPE(pCol->type)) {
return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
} else {
return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row);
}
}
static FORCE_INLINE int32_t dataColGetNEleLen(SDataCol *pDataCol, int rows) {
ASSERT(rows > 0);
switch (pDataCol->type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
return pDataCol->dataOff[rows - 1] + varDataTLen(tdGetColDataOfRow(pDataCol, rows - 1));
break;
default:
return TYPE_BYTES[pDataCol->type] * rows;
if (IS_VAR_DATA_TYPE(pDataCol->type)) {
return pDataCol->dataOff[rows - 1] + varDataTLen(tdGetColDataOfRow(pDataCol, rows - 1));
} else {
return TYPE_BYTES[pDataCol->type] * rows;
}
}
@ -243,9 +265,14 @@ typedef struct {
} SDataCols;
#define keyCol(pCols) (&((pCols)->cols[0])) // Key column
#define dataColsKeyAt(pCols, idx) ((TSKEY *)(keyCol(pCols)->pData))[(idx)]
#define dataColsKeyFirst(pCols) dataColsKeyAt(pCols, 0)
#define dataColsKeyLast(pCols) ((pCols->numOfRows == 0) ? 0 : dataColsKeyAt(pCols, (pCols)->numOfRows - 1))
#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)]
#define dataColsKeyAt(pCols, idx) tdGetKey(dataColsTKeyAt(pCols, idx))
#define dataColsTKeyFirst(pCols) (((pCols)->numOfRows == 0) ? TKEY_INVALID : dataColsTKeyAt(pCols, 0))
#define dataColsKeyFirst(pCols) (((pCols)->numOfRows == 0) ? TSDB_DATA_TIMESTAMP_NULL : dataColsKeyAt(pCols, 0))
#define dataColsTKeyLast(pCols) \
(((pCols)->numOfRows == 0) ? TKEY_INVALID : dataColsTKeyAt(pCols, (pCols)->numOfRows - 1))
#define dataColsKeyLast(pCols) \
(((pCols)->numOfRows == 0) ? TSDB_DATA_TIMESTAMP_NULL : dataColsKeyAt(pCols, (pCols)->numOfRows - 1))
SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows);
void tdResetDataCols(SDataCols *pCols);
@ -253,10 +280,7 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData);
void tdFreeDataCols(SDataCols *pCols);
void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols);
void tdPopDataColsPoints(SDataCols *pCols, int pointsToPop); //!!!!
int tdMergeDataCols(SDataCols *target, SDataCols *src, int rowsToMerge);
void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
int limit2, int tRows);
// ----------------- K-V data row structure
/*
@ -284,7 +308,7 @@ typedef struct {
#define kvRowCpy(dst, r) memcpy((dst), (r), kvRowLen(r))
#define kvRowColVal(r, colIdx) POINTER_SHIFT(kvRowValues(r), (colIdx)->offset)
#define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i))
#define kvRowFree(r) taosTFree(r)
#define kvRowFree(r) tfree(r)
#define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r))
SKVRow tdKVRowDup(SKVRow row);

View File

@ -44,13 +44,18 @@ extern int32_t tsMaxShellConns;
extern int32_t tsShellActivityTimer;
extern uint32_t tsMaxTmrCtrl;
extern float tsNumOfThreadsPerCore;
extern float tsRatioOfQueryThreads;
extern int32_t tsNumOfCommitThreads;
extern float tsRatioOfQueryThreads; // todo remove it
extern int8_t tsDaylight;
extern char tsTimezone[];
extern char tsLocale[];
extern char tsCharset[]; // default encode string
extern char tsCharset[]; // default encode string
extern int32_t tsEnableCoreFile;
extern int32_t tsCompressMsgSize;
extern char tsTempDir[];
//query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer for each data node during query processing
// client
extern int32_t tsTableMetaKeepTimer;
@ -84,6 +89,7 @@ extern int16_t tsWAL;
extern int32_t tsFsyncPeriod;
extern int32_t tsReplications;
extern int32_t tsQuorum;
extern int32_t tsUpdate;
// balance
extern int32_t tsEnableBalance;
@ -180,12 +186,12 @@ extern int32_t debugFlag;
#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
void taosInitGlobalCfg();
bool taosCheckGlobalCfg();
void taosSetAllDebugFlag();
bool taosCfgDynamicOptions(char *msg);
int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port);
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId);
void taosInitGlobalCfg();
int32_t taosCheckGlobalCfg();
void taosSetAllDebugFlag();
bool taosCfgDynamicOptions(char *msg);
int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port);
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId);
#ifdef __cplusplus
}

View File

@ -35,6 +35,6 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
// int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
SSchema tscGetTbnameColumnSchema();
#endif // TDENGINE_NAME_H

View File

@ -18,6 +18,9 @@
#include "tcoding.h"
#include "wchar.h"
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
int limit2, int tRows);
/**
* Duplicate the schema and return a new object
*/
@ -94,7 +97,7 @@ int tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, int32_t version) {
void tdDestroyTSchemaBuilder(STSchemaBuilder *pBuilder) {
if (pBuilder) {
taosTFree(pBuilder->columns);
tfree(pBuilder->columns);
}
}
@ -202,7 +205,7 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints)
pDataCol->offset = colOffset(pCol) + TD_DATA_ROW_HEAD_SIZE;
pDataCol->len = 0;
if (pDataCol->type == TSDB_DATA_TYPE_BINARY || pDataCol->type == TSDB_DATA_TYPE_NCHAR) {
if (IS_VAR_DATA_TYPE(pDataCol->type)) {
pDataCol->dataOff = (VarDataOffsetT *)(*pBuf);
pDataCol->pData = POINTER_SHIFT(*pBuf, sizeof(VarDataOffsetT) * maxPoints);
pDataCol->spaceSize = pDataCol->bytes * maxPoints;
@ -215,60 +218,29 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints)
}
}
// value from timestamp should be TKEY here instead of TSKEY
void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints) {
ASSERT(pCol != NULL && value != NULL);
switch (pCol->type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
// set offset
pCol->dataOff[numOfRows] = pCol->len;
// Copy data
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value));
// Update the length
pCol->len += varDataTLen(value);
break;
default:
ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows);
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes);
pCol->len += pCol->bytes;
break;
}
}
void dataColPopPoints(SDataCol *pCol, int pointsToPop, int numOfRows) {
int pointsLeft = numOfRows - pointsToPop;
ASSERT(pointsLeft > 0);
if (pCol->type == TSDB_DATA_TYPE_BINARY || pCol->type == TSDB_DATA_TYPE_NCHAR) {
ASSERT(pCol->len > 0);
VarDataOffsetT toffset = pCol->dataOff[pointsToPop];
pCol->len = pCol->len - toffset;
ASSERT(pCol->len > 0);
memmove(pCol->pData, POINTER_SHIFT(pCol->pData, toffset), pCol->len);
dataColSetOffset(pCol, pointsLeft);
if (IS_VAR_DATA_TYPE(pCol->type)) {
// set offset
pCol->dataOff[numOfRows] = pCol->len;
// Copy data
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value));
// Update the length
pCol->len += varDataTLen(value);
} else {
ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows);
pCol->len = TYPE_BYTES[pCol->type] * pointsLeft;
memmove(pCol->pData, POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * pointsToPop), pCol->len);
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes);
pCol->len += pCol->bytes;
}
}
bool isNEleNull(SDataCol *pCol, int nEle) {
switch (pCol->type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
for (int i = 0; i < nEle; i++) {
if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false;
}
return true;
default:
for (int i = 0; i < nEle; i++) {
if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false;
}
return true;
for (int i = 0; i < nEle; i++) {
if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false;
}
return true;
}
void dataColSetNullAt(SDataCol *pCol, int index) {
@ -367,8 +339,8 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) {
void tdFreeDataCols(SDataCols *pCols) {
if (pCols) {
taosTFree(pCols->buf);
taosTFree(pCols->cols);
tfree(pCols->buf);
tfree(pCols->cols);
free(pCols);
}
}
@ -390,7 +362,7 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
pRet->cols[i].spaceSize = pDataCols->cols[i].spaceSize;
pRet->cols[i].pData = (void *)((char *)pRet->buf + ((char *)(pDataCols->cols[i].pData) - (char *)(pDataCols->buf)));
if (pRet->cols[i].type == TSDB_DATA_TYPE_BINARY || pRet->cols[i].type == TSDB_DATA_TYPE_NCHAR) {
if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
ASSERT(pDataCols->cols[i].dataOff != NULL);
pRet->cols[i].dataOff =
(int32_t *)((char *)pRet->buf + ((char *)(pDataCols->cols[i].dataOff) - (char *)(pDataCols->buf)));
@ -400,7 +372,7 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
pRet->cols[i].len = pDataCols->cols[i].len;
if (pDataCols->cols[i].len > 0) {
memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len);
if (pRet->cols[i].type == TSDB_DATA_TYPE_BINARY || pRet->cols[i].type == TSDB_DATA_TYPE_NCHAR) {
if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
memcpy(pRet->cols[i].dataOff, pDataCols->cols[i].dataOff, sizeof(VarDataOffsetT) * pDataCols->maxPoints);
}
}
@ -420,58 +392,54 @@ void tdResetDataCols(SDataCols *pCols) {
}
void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) {
ASSERT(dataColsKeyLast(pCols) < dataRowKey(row));
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row));
int rcol = 0;
int dcol = 0;
while (dcol < pCols->numOfCols) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) {
dataColSetNullAt(pDataCol, pCols->numOfRows);
dcol++;
continue;
if (dataRowDeleted(row)) {
for (; dcol < pCols->numOfCols; dcol++) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (dcol == 0) {
dataColAppendVal(pDataCol, dataRowTuple(row), pCols->numOfRows, pCols->maxPoints);
} else {
dataColSetNullAt(pDataCol, pCols->numOfRows);
}
}
} else {
while (dcol < pCols->numOfCols) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) {
dataColSetNullAt(pDataCol, pCols->numOfRows);
dcol++;
continue;
}
STColumn *pRowCol = schemaColAt(pSchema, rcol);
if (pRowCol->colId == pDataCol->colId) {
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset+TD_DATA_ROW_HEAD_SIZE);
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
dcol++;
rcol++;
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
dataColSetNullAt(pDataCol, pCols->numOfRows);
dcol++;
STColumn *pRowCol = schemaColAt(pSchema, rcol);
if (pRowCol->colId == pDataCol->colId) {
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE);
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
dcol++;
rcol++;
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
dataColSetNullAt(pDataCol, pCols->numOfRows);
dcol++;
}
}
}
pCols->numOfRows++;
}
// Pop pointsToPop points from the SDataCols
void tdPopDataColsPoints(SDataCols *pCols, int pointsToPop) {
int pointsLeft = pCols->numOfRows - pointsToPop;
if (pointsLeft <= 0) {
tdResetDataCols(pCols);
return;
}
for (int iCol = 0; iCol < pCols->numOfCols; iCol++) {
SDataCol *pCol = pCols->cols + iCol;
dataColPopPoints(pCol, pointsToPop, pCols->numOfRows);
}
pCols->numOfRows = pointsLeft;
}
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge) {
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows);
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
ASSERT(target->numOfCols == source->numOfCols);
SDataCols *pTarget = NULL;
if (dataColsKeyLast(target) < dataColsKeyFirst(source)) { // No overlap
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
for (int i = 0; i < rowsToMerge; i++) {
for (int j = 0; j < source->numOfCols; j++) {
if (source->cols[j].len > 0) {
@ -499,17 +467,23 @@ _err:
return -1;
}
void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, int limit2, int tRows) {
// src2 data has more priority than src1
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
int limit2, int tRows) {
tdResetDataCols(target);
ASSERT(limit1 <= src1->numOfRows && limit2 <= src2->numOfRows);
while (target->numOfRows < tRows) {
if (*iter1 >= limit1 && *iter2 >= limit2) break;
TSKEY key1 = (*iter1 >= limit1) ? INT64_MAX : ((TSKEY *)(src1->cols[0].pData))[*iter1];
TSKEY key2 = (*iter2 >= limit2) ? INT64_MAX : ((TSKEY *)(src2->cols[0].pData))[*iter2];
TSKEY key1 = (*iter1 >= limit1) ? INT64_MAX : dataColsKeyAt(src1, *iter1);
TKEY tkey1 = (*iter1 >= limit1) ? TKEY_NULL : dataColsTKeyAt(src1, *iter1);
TSKEY key2 = (*iter2 >= limit2) ? INT64_MAX : dataColsKeyAt(src2, *iter2);
TKEY tkey2 = (*iter2 >= limit2) ? TKEY_NULL : dataColsTKeyAt(src2, *iter2);
if (key1 <= key2) {
ASSERT(tkey1 == TKEY_NULL || (!TKEY_IS_DELETED(tkey1)));
if (key1 < key2) {
for (int i = 0; i < src1->numOfCols; i++) {
ASSERT(target->cols[i].type == src1->cols[i].type);
if (src1->cols[i].len > 0) {
@ -520,19 +494,23 @@ void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limi
target->numOfRows++;
(*iter1)++;
if (key1 == key2) (*iter2)++;
} else {
for (int i = 0; i < src2->numOfCols; i++) {
ASSERT(target->cols[i].type == src2->cols[i].type);
if (src2->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows,
target->maxPoints);
} else if (key1 >= key2) {
if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) {
for (int i = 0; i < src2->numOfCols; i++) {
ASSERT(target->cols[i].type == src2->cols[i].type);
if (src2->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows,
target->maxPoints);
}
}
target->numOfRows++;
}
target->numOfRows++;
(*iter2)++;
if (key1 == key2) (*iter1)++;
}
ASSERT(target->numOfRows <= target->maxPoints);
}
}
@ -691,8 +669,8 @@ int tdInitKVRowBuilder(SKVRowBuilder *pBuilder) {
}
void tdDestroyKVRowBuilder(SKVRowBuilder *pBuilder) {
taosTFree(pBuilder->pColIdx);
taosTFree(pBuilder->buf);
tfree(pBuilder->pColIdx);
tfree(pBuilder->buf);
}
void tdResetKVRowBuilder(SKVRowBuilder *pBuilder) {

View File

@ -45,19 +45,21 @@ int32_t tsEnableTelemetryReporting = 1;
char tsEmail[TSDB_FQDN_LEN] = {0};
// common
int32_t tsRpcTimer = 1000;
int32_t tsRpcMaxTime = 600; // seconds;
int32_t tsMaxShellConns = 5000;
int32_t tsRpcTimer = 1000;
int32_t tsRpcMaxTime = 600; // seconds;
int32_t tsMaxShellConns = 5000;
int32_t tsMaxConnections = 5000;
int32_t tsShellActivityTimer = 3; // second
float tsNumOfThreadsPerCore = 1.0;
float tsRatioOfQueryThreads = 0.5;
int8_t tsDaylight = 0;
int32_t tsShellActivityTimer = 3; // second
float tsNumOfThreadsPerCore = 1.0f;
int32_t tsNumOfCommitThreads = 1;
float tsRatioOfQueryThreads = 0.5f;
int8_t tsDaylight = 0;
char tsTimezone[TSDB_TIMEZONE_LEN] = {0};
char tsLocale[TSDB_LOCALE_LEN] = {0};
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
int32_t tsEnableCoreFile = 0;
int32_t tsMaxBinaryDisplayWidth = 30;
char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/";
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
@ -99,6 +101,12 @@ float tsStreamComputDelayRatio = 0.1f;
int32_t tsProjectExecInterval = 10000; // every 10sec, the projection will be executed once
int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance
// the maximum allowed query buffer size during query processing for each data node.
// -1 no limit (default)
// 0 no query allowed, queries are disabled
// positive value (in MB)
int32_t tsQueryBufferSize = -1;
// db parameters
int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE;
int32_t tsBlocksPerVnode = TSDB_DEFAULT_TOTAL_BLOCKS;
@ -113,6 +121,7 @@ int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
int32_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION;
int32_t tsMaxVgroupsPerDb = 0;
int32_t tsMinTablePerVnode = TSDB_TABLES_STEP;
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
@ -210,6 +219,8 @@ int32_t (*monitorStartSystemFp)() = NULL;
void (*monitorStopSystemFp)() = NULL;
void (*monitorExecuteSQLFp)(char *sql) = NULL;
char *qtypeStr[] = {"rpc", "fwd", "wal", "cq", "query"};
static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT;
void taosSetAllDebugFlag() {
@ -416,6 +427,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "numOfCommitThreads";
cfg.ptr = &tsNumOfCommitThreads;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 1;
cfg.maxValue = 100;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "ratioOfQueryThreads";
cfg.ptr = &tsRatioOfQueryThreads;
cfg.valType = TAOS_CFG_VTYPE_FLOAT;
@ -676,7 +697,7 @@ static void doInitGlobalConfig(void) {
cfg.minValue = TSDB_MIN_CACHE_BLOCK_SIZE;
cfg.maxValue = TSDB_MAX_CACHE_BLOCK_SIZE;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_Mb;
cfg.unitType = TAOS_CFG_UTYPE_MB;
taosInitConfigOption(cfg);
cfg.option = "blocks";
@ -779,6 +800,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "update";
cfg.ptr = &tsUpdate;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_DB_UPDATE;
cfg.maxValue = TSDB_MAX_DB_UPDATE;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "mqttHostName";
cfg.ptr = tsMqttHostName;
cfg.valType = TAOS_CFG_VTYPE_STRING;
@ -839,6 +870,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "queryBufferSize";
cfg.ptr = &tsQueryBufferSize;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = -1;
cfg.maxValue = 500000000000.0f;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_BYTE;
taosInitConfigOption(cfg);
// locale & charset
cfg.option = "timezone";
cfg.ptr = tsTimezone;
@ -1294,13 +1335,23 @@ static void doInitGlobalConfig(void) {
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "tempDir";
cfg.ptr = tsTempDir;
cfg.valType = TAOS_CFG_VTYPE_STRING;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0;
cfg.maxValue = 0;
cfg.ptrLength = tListLen(tsTempDir);
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
}
void taosInitGlobalCfg() {
pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig);
}
bool taosCheckGlobalCfg() {
int32_t taosCheckGlobalCfg() {
char fqdn[TSDB_FQDN_LEN];
uint16_t port;
@ -1359,7 +1410,9 @@ bool taosCheckGlobalCfg() {
tsSyncPort = tsServerPort + TSDB_PORT_SYNC;
tsHttpPort = tsServerPort + TSDB_PORT_HTTP;
return true;
taosPrintGlobalCfg();
return 0;
}
int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) {

View File

@ -188,3 +188,14 @@ void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) {
pToken->z = r;
}
}
SSchema tscGetTbnameColumnSchema() {
struct SSchema s = {
.colId = TSDB_TBNAME_COLUMN_INDEX,
.type = TSDB_DATA_TYPE_BINARY,
.bytes = TSDB_TABLE_NAME_LEN
};
strcpy(s.name, TSQL_TBNAME_L);
return s;
}

View File

@ -355,32 +355,6 @@ bool isValidDataType(int32_t type) {
return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_NCHAR;
}
//bool isNull(const char *val, int32_t type) {
// switch (type) {
// case TSDB_DATA_TYPE_BOOL:
// return *(uint8_t *)val == TSDB_DATA_BOOL_NULL;
// case TSDB_DATA_TYPE_TINYINT:
// return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL;
// case TSDB_DATA_TYPE_SMALLINT:
// return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL;
// case TSDB_DATA_TYPE_INT:
// return *(uint32_t *)val == TSDB_DATA_INT_NULL;
// case TSDB_DATA_TYPE_BIGINT:
// case TSDB_DATA_TYPE_TIMESTAMP:
// return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL;
// case TSDB_DATA_TYPE_FLOAT:
// return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL;
// case TSDB_DATA_TYPE_DOUBLE:
// return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL;
// case TSDB_DATA_TYPE_NCHAR:
// return *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL;
// case TSDB_DATA_TYPE_BINARY:
// return *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL;
// default:
// return false;
// };
//}
void setVardataNull(char* val, int32_t type) {
if (type == TSDB_DATA_TYPE_BINARY) {
varDataSetLen(val, sizeof(int8_t));
@ -433,14 +407,10 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
*(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_DOUBLE_NULL;
}
break;
case TSDB_DATA_TYPE_NCHAR: // todo : without length?
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * bytes) = TSDB_DATA_NCHAR_NULL;
}
break;
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_BINARY:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint8_t *)(val + i * bytes) = TSDB_DATA_BINARY_NULL;
setVardataNull(val + i * bytes, type);
}
break;
default: {

View File

@ -108,7 +108,7 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32
break;
}
case TSDB_DATA_TYPE_BINARY: { // todo refactor, extract a method
pVar->pz = calloc(len, sizeof(char));
pVar->pz = calloc(len + 1, sizeof(char));
memcpy(pVar->pz, pz, len);
pVar->nLen = (int32_t)len;
break;
@ -125,7 +125,7 @@ void tVariantDestroy(tVariant *pVar) {
if (pVar == NULL) return;
if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) {
taosTFree(pVar->pz);
tfree(pVar->pz);
pVar->nLen = 0;
}
@ -144,21 +144,24 @@ void tVariantDestroy(tVariant *pVar) {
void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
if (pSrc == NULL || pDst == NULL) return;
*pDst = *pSrc;
pDst->nType = pSrc->nType;
if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR) {
int32_t len = pSrc->nLen + 1;
if (pSrc->nType == TSDB_DATA_TYPE_NCHAR) {
len = len * TSDB_NCHAR_SIZE;
}
int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE;
char* p = realloc(pDst->pz, len);
assert(p);
pDst->pz = calloc(1, len);
memcpy(pDst->pz, pSrc->pz, len);
memset(p, 0, len);
pDst->pz = p;
memcpy(pDst->pz, pSrc->pz, pSrc->nLen);
pDst->nLen = pSrc->nLen;
return;
}
// this is only for string array
if (pSrc->nType == TSDB_DATA_TYPE_ARRAY) {
if (pSrc->nType >= TSDB_DATA_TYPE_BOOL && pSrc->nType <= TSDB_DATA_TYPE_DOUBLE) {
pDst->i64Key = pSrc->i64Key;
} else if (pSrc->nType == TSDB_DATA_TYPE_ARRAY) { // this is only for string array
size_t num = taosArrayGetSize(pSrc->arr);
pDst->arr = taosArrayInit(num, sizeof(char*));
for(size_t i = 0; i < num; i++) {
@ -166,8 +169,6 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
char* n = strdup(p);
taosArrayPush(pDst->arr, &n);
}
return;
}
pDst->nLen = tDataTypeDesc[pDst->nType].nSize;

View File

@ -0,0 +1,154 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace TDengineDriver
{
enum TDengineDataType {
TSDB_DATA_TYPE_NULL = 0, // 1 bytes
TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes
TSDB_DATA_TYPE_INT = 4, // 4 bytes
TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes
TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
TSDB_DATA_TYPE_BINARY = 8, // string
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
TSDB_DATA_TYPE_NCHAR = 10 // unicode string
}
enum TDengineInitOption
{
TSDB_OPTION_LOCALE = 0,
TSDB_OPTION_CHARSET = 1,
TSDB_OPTION_TIMEZONE = 2,
TDDB_OPTION_CONFIGDIR = 3,
TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
}
class TDengineMeta
{
public string name;
public short size;
public byte type;
public string TypeName()
{
switch ((TDengineDataType)type)
{
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
return "BOOLEAN";
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
return "BYTE";
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
return "SHORT";
case TDengineDataType.TSDB_DATA_TYPE_INT:
return "INT";
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
return "LONG";
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
return "FLOAT";
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
return "DOUBLE";
case TDengineDataType.TSDB_DATA_TYPE_BINARY:
return "STRING";
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
return "TIMESTAMP";
case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
default:
return "undefine";
}
}
}
class TDengine
{
public const int TSDB_CODE_SUCCESS = 0;
[DllImport("taos.dll", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
static extern public void Init();
[DllImport("taos.dll", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
static extern public void Cleanup();
[DllImport("taos.dll", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
static extern public void Options(int option, string value);
[DllImport("taos.dll", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
[DllImport("taos.dll", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_errstr(IntPtr res);
static public string Error(IntPtr res)
{
IntPtr errPtr = taos_errstr(res);
return Marshal.PtrToStringAnsi(errPtr);
}
[DllImport("taos.dll", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
static extern public int ErrorNo(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Query(IntPtr conn, string sqlstr);
[DllImport("taos.dll", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
static extern public int AffectRows(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
static extern public int FieldCount(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_fetch_fields(IntPtr res);
static public List<TDengineMeta> FetchFields(IntPtr res)
{
const int fieldSize = 68;
List<TDengineMeta> metas = new List<TDengineMeta>();
if (res == IntPtr.Zero)
{
return metas;
}
int fieldCount = FieldCount(res);
IntPtr fieldsPtr = taos_fetch_fields(res);
for (int i = 0; i < fieldCount; ++i)
{
int offset = i * fieldSize;
TDengineMeta meta = new TDengineMeta();
meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset);
meta.type = Marshal.ReadByte(fieldsPtr + offset + 65);
meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66);
metas.Add(meta);
}
return metas;
}
[DllImport("taos.dll", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FetchRows(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FreeResult(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
}
}

@ -1 +1 @@
Subproject commit 8d7bf743852897110cbdcc7c4322cd7a74d4167b
Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f

@ -1 +1 @@
Subproject commit d598db167eb256fe67409b7bb3d0eb7fffc3ff8c
Subproject commit ec77d9049a719dabfd1a7c1122a209e201861944

@ -0,0 +1 @@
Subproject commit b62a26ecc164a310104df57691691b237e091c89

View File

@ -5,14 +5,13 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.6</version>
<version>2.0.10</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
<description>TDengine JDBC Driver</description>
<licenses>
<license>
<name>GNU AFFERO GENERAL PUBLIC LICENSE Version 3</name>

View File

@ -56,6 +56,23 @@
<scope>test</scope>
</dependency>
<!-- for restful -->
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.8</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.9</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.58</version>
</dependency>
</dependencies>
<build>
<plugins>

View File

@ -0,0 +1,161 @@
package com.taosdata.jdbc;
import java.io.*;
import java.sql.Driver;
import java.sql.DriverPropertyInfo;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.StringTokenizer;
public abstract class AbstractTaosDriver implements Driver {
private static final String TAOS_CFG_FILENAME = "taos.cfg";
/**
* @param cfgDirPath
* @return return the config dir
**/
protected File loadConfigDir(String cfgDirPath) {
if (cfgDirPath == null)
return loadDefaultConfigDir();
File cfgDir = new File(cfgDirPath);
if (!cfgDir.exists())
return loadDefaultConfigDir();
return cfgDir;
}
/**
* @return search the default config dir, if the config dir is not exist will return null
*/
protected File loadDefaultConfigDir() {
File cfgDir;
File cfgDir_linux = new File("/etc/taos");
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
File cfgDir_windows = new File("C:\\TDengine\\cfg");
cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
return cfgDir;
}
protected List<String> loadConfigEndpoints(File cfgFile) {
List<String> endpoints = new ArrayList<>();
try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
String line = null;
while ((line = reader.readLine()) != null) {
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) {
endpoints.add(line.substring(line.indexOf('p') + 1).trim());
}
if (endpoints.size() > 1)
break;
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return endpoints;
}
protected void loadTaosConfig(Properties info) {
if ((info.getProperty(TSDBDriver.PROPERTY_KEY_HOST) == null ||
info.getProperty(TSDBDriver.PROPERTY_KEY_HOST).isEmpty()) && (
info.getProperty(TSDBDriver.PROPERTY_KEY_PORT) == null ||
info.getProperty(TSDBDriver.PROPERTY_KEY_PORT).isEmpty())) {
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
File cfgFile = cfgDir.listFiles((dir, name) -> TAOS_CFG_FILENAME.equalsIgnoreCase(name))[0];
List<String> endpoints = loadConfigEndpoints(cfgFile);
if (!endpoints.isEmpty()) {
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]);
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]);
}
}
}
protected DriverPropertyInfo[] getPropertyInfo(Properties info) {
DriverPropertyInfo hostProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_HOST, info.getProperty(TSDBDriver.PROPERTY_KEY_HOST));
hostProp.required = false;
hostProp.description = "Hostname";
DriverPropertyInfo portProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_PORT, info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
portProp.required = false;
portProp.description = "Port";
DriverPropertyInfo dbProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_DBNAME, info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME));
dbProp.required = false;
dbProp.description = "Database name";
DriverPropertyInfo userProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_USER, info.getProperty(TSDBDriver.PROPERTY_KEY_USER));
userProp.required = true;
userProp.description = "User";
DriverPropertyInfo passwordProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_PASSWORD, info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
passwordProp.required = true;
passwordProp.description = "Password";
DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5];
propertyInfo[0] = hostProp;
propertyInfo[1] = portProp;
propertyInfo[2] = dbProp;
propertyInfo[3] = userProp;
propertyInfo[4] = passwordProp;
return propertyInfo;
}
protected Properties parseURL(String url, Properties defaults) {
Properties urlProps = (defaults != null) ? defaults : new Properties();
// parse properties
int beginningOfSlashes = url.indexOf("//");
int index = url.indexOf("?");
if (index != -1) {
String paramString = url.substring(index + 1, url.length());
url = url.substring(0, index);
StringTokenizer queryParams = new StringTokenizer(paramString, "&");
while (queryParams.hasMoreElements()) {
String parameterValuePair = queryParams.nextToken();
int indexOfEqual = parameterValuePair.indexOf("=");
String parameter = null;
String value = null;
if (indexOfEqual != -1) {
parameter = parameterValuePair.substring(0, indexOfEqual);
if (indexOfEqual + 1 < parameterValuePair.length()) {
value = parameterValuePair.substring(indexOfEqual + 1);
}
}
if ((value != null && value.length() > 0) && (parameter != null && parameter.length() > 0)) {
urlProps.setProperty(parameter, value);
}
}
}
// parse Product Name
String dbProductName = url.substring(0, beginningOfSlashes);
dbProductName = dbProductName.substring(dbProductName.indexOf(":") + 1);
dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
// parse dbname
url = url.substring(beginningOfSlashes + 2);
int indexOfSlash = url.indexOf("/");
if (indexOfSlash != -1) {
if (indexOfSlash + 1 < url.length()) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_DBNAME, url.substring(indexOfSlash + 1));
}
url = url.substring(0, indexOfSlash);
}
// parse port
int indexOfColon = url.indexOf(":");
if (indexOfColon != -1) {
if (indexOfColon + 1 < url.length()) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_PORT, url.substring(indexOfColon + 1));
}
url = url.substring(0, indexOfColon);
}
// parse host
if (url != null && url.length() > 0 && url.trim().length() > 0) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url);
}
return urlProps;
}
}

View File

@ -16,10 +16,10 @@ package com.taosdata.jdbc;
public class ColumnMetaData {
int colType = 0;
String colName = null;
int colSize = -1;
int colIndex = 0;
private int colType = 0;
private String colName = null;
private int colSize = -1;
private int colIndex = 0;
public int getColSize() {
return colSize;

View File

@ -14,7 +14,6 @@
*****************************************************************************/
package com.taosdata.jdbc;
import java.io.*;
import java.sql.Array;
import java.sql.Blob;
import java.sql.CallableStatement;
@ -35,11 +34,10 @@ import java.util.*;
import java.util.concurrent.Executor;
public class TSDBConnection implements Connection {
protected Properties props = null;
private TSDBJNIConnector connector = null;
protected Properties props = null;
private String catalog = null;
private TSDBDatabaseMetaData dbMetaData = null;
@ -48,15 +46,20 @@ public class TSDBConnection implements Connection {
private int timeoutMilliseconds = 0;
private String tsCharSet = "";
private boolean batchFetch = false;
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
this.dbMetaData = meta;
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME),
info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
String batchLoad = info.getProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD);
if (batchLoad != null) {
this.batchFetch = Boolean.parseBoolean(batchLoad);
}
}
private void connect(String host, int port, String dbName, String user, String password) throws SQLException {
@ -197,12 +200,14 @@ public class TSDBConnection implements Connection {
}
public SQLWarning getWarnings() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
//todo: implement getWarnings according to the warning messages returned from TDengine
return null;
// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void clearWarnings() throws SQLException {
// left blank to support HikariCP connection
//todo: implement getWarnings according to the warning messages returned from TDengine
//todo: implement clearWarnings according to the warning messages returned from TDengine
}
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
@ -223,6 +228,14 @@ public class TSDBConnection implements Connection {
return this.prepareStatement(sql);
}
public Boolean getBatchFetch() {
return this.batchFetch;
}
public void setBatchFetch(Boolean batchFetch) {
this.batchFetch = batchFetch;
}
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}

View File

@ -96,7 +96,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
}
public int getDriverMajorVersion() {
return 0;
return 2;
}
public int getDriverMinorVersion() {

View File

@ -14,13 +14,8 @@
*****************************************************************************/
package com.taosdata.jdbc;
import java.io.*;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.*;
import java.util.logging.Logger;
/**
@ -42,77 +37,59 @@ import java.util.logging.Logger;
* register it with the DriverManager. This means that a user can load and
* register a driver by doing Class.forName("foo.bah.Driver")
*/
public class TSDBDriver implements java.sql.Driver {
public class TSDBDriver extends AbstractTaosDriver {
@Deprecated
private static final String URL_PREFIX1 = "jdbc:TSDB://";
private static final String URL_PREFIX = "jdbc:TAOS://";
/**
* Key used to retrieve the database value from the properties instance passed
* to the driver.
*/
public static final String PROPERTY_KEY_DBNAME = "dbname";
/**
* Key used to retrieve the host value from the properties instance passed to
* the driver.
*/
public static final String PROPERTY_KEY_HOST = "host";
/**
* Key used to retrieve the password value from the properties instance passed
* to the driver.
*/
public static final String PROPERTY_KEY_PASSWORD = "password";
/**
* Key used to retrieve the port number value from the properties instance
* passed to the driver.
*/
public static final String PROPERTY_KEY_PORT = "port";
/**
* Key used to retrieve the database value from the properties instance passed
* to the driver.
*/
public static final String PROPERTY_KEY_DBNAME = "dbname";
/**
* Key used to retrieve the user value from the properties instance passed to
* the driver.
*/
public static final String PROPERTY_KEY_USER = "user";
/**
* Key used to retrieve the password value from the properties instance passed
* to the driver.
*/
public static final String PROPERTY_KEY_PASSWORD = "password";
/**
* Key for the configuration file directory of TSDB client in properties instance
*/
public static final String PROPERTY_KEY_CONFIG_DIR = "cfgdir";
/**
* Key for the timezone used by the TSDB client in properties instance
*/
public static final String PROPERTY_KEY_TIME_ZONE = "timezone";
/**
* Key for the locale used by the TSDB client in properties instance
*/
public static final String PROPERTY_KEY_LOCALE = "locale";
/**
* Key for the char encoding used by the TSDB client in properties instance
*/
public static final String PROPERTY_KEY_CHARSET = "charset";
public static final String PROPERTY_KEY_PROTOCOL = "protocol";
/**
* Index for port coming out of parseHostPortPair().
* fetch data from native function in a batch model
*/
public final static int PORT_NUMBER_INDEX = 1;
/**
* Index for host coming out of parseHostPortPair().
*/
public final static int HOST_NAME_INDEX = 0;
public static final String PROPERTY_KEY_BATCH_LOAD = "batchfetch";
private TSDBDatabaseMetaData dbMetaData = null;
@ -124,74 +101,23 @@ public class TSDBDriver implements java.sql.Driver {
}
}
private List<String> loadConfigEndpoints(File cfgFile) {
List<String> endpoints = new ArrayList<>();
try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
String line = null;
while ((line = reader.readLine()) != null) {
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) {
endpoints.add(line.substring(line.indexOf('p') + 1).trim());
}
if (endpoints.size() > 1)
break;
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return endpoints;
}
/**
* @param cfgDirPath
* @return return the config dir
**/
private File loadConfigDir(String cfgDirPath) {
if (cfgDirPath == null)
return loadDefaultConfigDir();
File cfgDir = new File(cfgDirPath);
if (!cfgDir.exists())
return loadDefaultConfigDir();
return cfgDir;
}
/**
* @return search the default config dir, if the config dir is not exist will return null
*/
private File loadDefaultConfigDir() {
File cfgDir;
File cfgDir_linux = new File("/etc/taos");
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
File cfgDir_windows = new File("C:\\TDengine\\cfg");
cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
return cfgDir;
}
public Connection connect(String url, Properties info) throws SQLException {
if (url == null) {
if (url == null)
throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!"));
}
if (!acceptsURL(url))
return null;
Properties props = null;
if ((props = parseURL(url, info)) == null) {
return null;
}
//load taos.cfg start
if (info.getProperty(TSDBDriver.PROPERTY_KEY_HOST) == null && info.getProperty(TSDBDriver.PROPERTY_KEY_PORT) == null) {
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0];
List<String> endpoints = loadConfigEndpoints(cfgFile);
if (!endpoints.isEmpty()) {
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]);
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]);
}
}
loadTaosConfig(info);
try {
TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), (String) props.get(PROPERTY_KEY_CHARSET),
(String) props.get(PROPERTY_KEY_TIME_ZONE));
TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE),
(String) props.get(PROPERTY_KEY_CHARSET), (String) props.get(PROPERTY_KEY_TIME_ZONE));
Connection newConn = new TSDBConnection(props, this.dbMetaData);
return newConn;
} catch (SQLWarning sqlWarning) {
@ -208,43 +134,15 @@ public class TSDBDriver implements java.sql.Driver {
}
/**
* Parses hostPortPair in the form of [host][:port] into an array, with the
* element of index HOST_NAME_INDEX being the host (or null if not specified),
* and the element of index PORT_NUMBER_INDEX being the port (or null if not
* specified).
*
* @param hostPortPair host and port in form of of [host][:port]
* @return array containing host and port as Strings
* @throws SQLException if a parse error occurs
* @param url the URL of the database
* @return <code>true</code> if this driver understands the given URL;
* <code>false</code> otherwise
* @throws SQLException if a database access error occurs or the url is {@code null}
*/
protected static String[] parseHostPortPair(String hostPortPair) throws SQLException {
String[] splitValues = new String[2];
int portIndex = hostPortPair.indexOf(":");
String hostname = null;
if (portIndex != -1) {
if ((portIndex + 1) < hostPortPair.length()) {
String portAsString = hostPortPair.substring(portIndex + 1);
hostname = hostPortPair.substring(0, portIndex);
splitValues[HOST_NAME_INDEX] = hostname;
splitValues[PORT_NUMBER_INDEX] = portAsString;
} else {
throw new SQLException(TSDBConstants.WrapErrMsg("port is not proper!"));
}
} else {
splitValues[HOST_NAME_INDEX] = hostPortPair;
splitValues[PORT_NUMBER_INDEX] = null;
}
return splitValues;
}
public boolean acceptsURL(String url) throws SQLException {
return (url != null && url.length() > 0 && url.trim().length() > 0) && url.startsWith(URL_PREFIX);
if (url == null)
throw new SQLException(TSDBConstants.WrapErrMsg("url is null"));
return (url != null && url.length() > 0 && url.trim().length() > 0) && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1));
}
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
@ -252,133 +150,80 @@ public class TSDBDriver implements java.sql.Driver {
info = new Properties();
}
if ((url != null) && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1))) {
if (acceptsURL(url)) {
info = parseURL(url, info);
}
DriverPropertyInfo hostProp = new DriverPropertyInfo(PROPERTY_KEY_HOST, info.getProperty(PROPERTY_KEY_HOST));
hostProp.required = true;
DriverPropertyInfo portProp = new DriverPropertyInfo(PROPERTY_KEY_PORT, info.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
portProp.required = false;
DriverPropertyInfo dbProp = new DriverPropertyInfo(PROPERTY_KEY_DBNAME, info.getProperty(PROPERTY_KEY_DBNAME));
dbProp.required = false;
dbProp.description = "Database name";
DriverPropertyInfo userProp = new DriverPropertyInfo(PROPERTY_KEY_USER, info.getProperty(PROPERTY_KEY_USER));
userProp.required = true;
DriverPropertyInfo passwordProp = new DriverPropertyInfo(PROPERTY_KEY_PASSWORD, info.getProperty(PROPERTY_KEY_PASSWORD));
passwordProp.required = true;
DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5];
propertyInfo[0] = hostProp;
propertyInfo[1] = portProp;
propertyInfo[2] = dbProp;
propertyInfo[3] = userProp;
propertyInfo[4] = passwordProp;
return propertyInfo;
return getPropertyInfo(info);
}
/**
* example: jdbc:TSDB://127.0.0.1:0/db?user=root&password=your_password
* example: jdbc:TAOS://127.0.0.1:0/db?user=root&password=your_password
*/
public Properties parseURL(String url, Properties defaults) throws java.sql.SQLException {
@Override
public Properties parseURL(String url, Properties defaults) {
Properties urlProps = (defaults != null) ? defaults : new Properties();
if (url == null) {
if (url == null || url.length() <= 0 || url.trim().length() <= 0)
return null;
}
if (!url.startsWith(URL_PREFIX) && !url.startsWith(URL_PREFIX1)) {
if (!url.startsWith(URL_PREFIX) && !url.startsWith(URL_PREFIX1))
return null;
}
// parse properties
String urlForMeta = url;
String dbProductName = url.substring(url.indexOf(":") + 1);
dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
int beginningOfSlashes = url.indexOf("//");
int index = url.indexOf("?");
if (index != -1) {
String paramString = url.substring(index + 1, url.length());
url = url.substring(0, index);
StringTokenizer queryParams = new StringTokenizer(paramString, "&");
while (queryParams.hasMoreElements()) {
String oneToken = queryParams.nextToken();
String[] pair = oneToken.split("=");
if ((pair[0] != null && pair[0].trim().length() > 0) && (pair[1] != null && pair[1].trim().length() > 0)) {
urlProps.setProperty(pair[0].trim(), pair[1].trim());
}
}
}
// parse Product Name
String dbProductName = url.substring(0, beginningOfSlashes);
dbProductName = dbProductName.substring(dbProductName.indexOf(":") + 1);
dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
// parse database name
url = url.substring(beginningOfSlashes + 2);
String host = url.substring(0, url.indexOf(":"));
url = url.substring(url.indexOf(":") + 1);
urlProps.setProperty(PROPERTY_KEY_HOST, host);
String port = url.substring(0, url.indexOf("/"));
urlProps.setProperty(PROPERTY_KEY_PORT, port);
url = url.substring(url.indexOf("/") + 1);
if (url.indexOf("?") != -1) {
String dbName = url.substring(0, url.indexOf("?"));
urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
url = url.trim().substring(url.indexOf("?") + 1);
} else {
// without user & password so return
if (!url.trim().isEmpty()) {
String dbName = url.trim();
urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
int indexOfSlash = url.indexOf("/");
if (indexOfSlash != -1) {
if (indexOfSlash + 1 < url.length()) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_DBNAME, url.substring(indexOfSlash + 1));
}
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty("user"));
return urlProps;
url = url.substring(0, indexOfSlash);
}
String user = "";
if (url.indexOf("&") == -1) {
String[] kvPair = url.trim().split("=");
if (kvPair.length == 2) {
setPropertyValue(urlProps, kvPair);
return urlProps;
// parse port
int indexOfColon = url.indexOf(":");
if (indexOfColon != -1) {
if (indexOfColon + 1 < url.length()) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_PORT, url.substring(indexOfColon + 1));
}
url = url.substring(0, indexOfColon);
}
String[] queryStrings = url.trim().split("&");
for (String queryStr : queryStrings) {
String[] kvPair = queryStr.trim().split("=");
if (kvPair.length < 2) {
continue;
}
setPropertyValue(urlProps, kvPair);
if (url != null && url.length() > 0 && url.trim().length() > 0) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url);
}
user = urlProps.getProperty(PROPERTY_KEY_USER).toString();
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, user);
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty(TSDBDriver.PROPERTY_KEY_USER));
return urlProps;
}
public void setPropertyValue(Properties property, String[] keyValuePair) {
switch (keyValuePair[0].toLowerCase()) {
case PROPERTY_KEY_USER:
property.setProperty(PROPERTY_KEY_USER, keyValuePair[1]);
break;
case PROPERTY_KEY_PASSWORD:
property.setProperty(PROPERTY_KEY_PASSWORD, keyValuePair[1]);
break;
case PROPERTY_KEY_TIME_ZONE:
property.setProperty(PROPERTY_KEY_TIME_ZONE, keyValuePair[1]);
break;
case PROPERTY_KEY_LOCALE:
property.setProperty(PROPERTY_KEY_LOCALE, keyValuePair[1]);
break;
case PROPERTY_KEY_CHARSET:
property.setProperty(PROPERTY_KEY_CHARSET, keyValuePair[1]);
break;
case PROPERTY_KEY_CONFIG_DIR:
property.setProperty(PROPERTY_KEY_CONFIG_DIR, keyValuePair[1]);
break;
}
}
public int getMajorVersion() {
return 1;
return 2;
}
public int getMinorVersion() {
return 1;
return 0;
}
public boolean jdbcCompliant() {
@ -389,33 +234,4 @@ public class TSDBDriver implements java.sql.Driver {
return null;
}
/**
* Returns the host property
*
* @param props the java.util.Properties instance to retrieve the hostname from.
* @return the host
*/
public String host(Properties props) {
return props.getProperty(PROPERTY_KEY_HOST, "localhost");
}
/**
* Returns the port number property
*
* @param props the properties to get the port number from
* @return the port number
*/
public int port(Properties props) {
return Integer.parseInt(props.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
}
/**
* Returns the database property from <code>props</code>
*
* @param props the Properties to look for the database property.
* @return the database name.
*/
public String database(Properties props) {
return props.getProperty(PROPERTY_KEY_DBNAME);
}
}

View File

@ -111,8 +111,8 @@ public class TSDBJNIConnector {
* @throws SQLException
*/
public long executeQuery(String sql) throws SQLException {
// close previous result set if the user forgets to invoke the
// free method to close previous result set.
// close previous result set if the user forgets to invoke the
// free method to close previous result set.
if (!this.isResultsetClosed) {
freeResultSet(taosResultSetPointer);
}
@ -122,7 +122,7 @@ public class TSDBJNIConnector {
pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos);
} catch (Exception e) {
e.printStackTrace();
this.freeResultSet(pSql);
this.freeResultSetImp(this.taos, pSql);
throw new SQLException(TSDBConstants.WrapErrMsg("Unsupported encoding"));
}
@ -131,7 +131,7 @@ public class TSDBJNIConnector {
affectedRows = -1;
String msg = this.getErrMsg(pSql);
this.freeResultSet(pSql);
this.freeResultSetImp(this.taos, pSql);
throw new SQLException(TSDBConstants.WrapErrMsg(msg), "", code);
}
@ -173,7 +173,7 @@ public class TSDBJNIConnector {
private native long getResultSetImp(long connection, long pSql);
public boolean isUpdateQuery(long pSql) {
return isUpdateQueryImp(this.taos, pSql) == 1? true:false;
return isUpdateQueryImp(this.taos, pSql) == 1 ? true : false;
}
private native long isUpdateQueryImp(long connection, long pSql);
@ -243,6 +243,11 @@ public class TSDBJNIConnector {
private native int fetchRowImp(long connection, long resultSet, TSDBResultSetRowData rowData);
public int fetchBlock(long resultSet, TSDBResultSetBlockData blockData) {
return this.fetchBlockImp(this.taos, resultSet, blockData);
}
private native int fetchBlockImp(long connection, long resultSet, TSDBResultSetBlockData blockData);
/**
* Execute close operation from C to release connection pointer by JNI
*
@ -274,7 +279,7 @@ public class TSDBJNIConnector {
* Consume a subscription
*/
long consume(long subscription) {
return this.consumeImp(subscription);
return this.consumeImp(subscription);
}
private native long consumeImp(long subscription);

View File

@ -47,10 +47,14 @@ public class TSDBResultSet implements ResultSet {
private List<ColumnMetaData> columnMetaDataList = new ArrayList<ColumnMetaData>();
private TSDBResultSetRowData rowData;
private TSDBResultSetBlockData blockData;
private boolean batchFetch = false;
private boolean lastWasNull = false;
private final int COLUMN_INDEX_START_VALUE = 1;
private int rowIndex = 0;
public TSDBJNIConnector getJniConnector() {
return jniConnector;
}
@ -67,6 +71,14 @@ public class TSDBResultSet implements ResultSet {
this.resultSetPointer = resultSetPointer;
}
public void setBatchFetch(boolean batchFetch) {
this.batchFetch = batchFetch;
}
public Boolean getBatchFetch() {
return this.batchFetch;
}
public List<ColumnMetaData> getColumnMetaDataList() {
return columnMetaDataList;
}
@ -94,8 +106,8 @@ public class TSDBResultSet implements ResultSet {
public TSDBResultSet() {
}
public TSDBResultSet(TSDBJNIConnector connecter, long resultSetPointer) throws SQLException {
this.jniConnector = connecter;
public TSDBResultSet(TSDBJNIConnector connector, long resultSetPointer) throws SQLException {
this.jniConnector = connector;
this.resultSetPointer = resultSetPointer;
int code = this.jniConnector.getSchemaMetaData(this.resultSetPointer, this.columnMetaDataList);
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
@ -107,6 +119,7 @@ public class TSDBResultSet implements ResultSet {
}
this.rowData = new TSDBResultSetRowData(this.columnMetaDataList.size());
this.blockData = new TSDBResultSetBlockData(this.columnMetaDataList, this.columnMetaDataList.size());
}
public <T> T unwrap(Class<T> iface) throws SQLException {
@ -118,21 +131,42 @@ public class TSDBResultSet implements ResultSet {
}
public boolean next() throws SQLException {
if (rowData != null) {
this.rowData.clear();
}
if (this.getBatchFetch()) {
if (this.blockData.forward()) {
return true;
}
int code = this.jniConnector.fetchBlock(this.resultSetPointer, this.blockData);
this.blockData.reset();
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} else if (code == TSDBConstants.JNI_RESULT_SET_NULL) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL));
} else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0));
} else if (code == TSDBConstants.JNI_FETCH_END) {
return false;
}
int code = this.jniConnector.fetchRow(this.resultSetPointer, this.rowData);
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} else if (code == TSDBConstants.JNI_RESULT_SET_NULL) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL));
} else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0));
} else if (code == TSDBConstants.JNI_FETCH_END) {
return false;
} else {
return true;
} else {
if (rowData != null) {
this.rowData.clear();
}
int code = this.jniConnector.fetchRow(this.resultSetPointer, this.rowData);
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} else if (code == TSDBConstants.JNI_RESULT_SET_NULL) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL));
} else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0));
} else if (code == TSDBConstants.JNI_FETCH_END) {
return false;
} else {
return true;
}
}
}
@ -155,21 +189,30 @@ public class TSDBResultSet implements ResultSet {
String res = null;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return this.blockData.getString(colIndex);
}
return res;
}
public boolean getBoolean(int columnIndex) throws SQLException {
boolean res = false;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getBoolean(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getBoolean(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
} else {
return this.blockData.getBoolean(colIndex);
}
return res;
}
@ -177,66 +220,91 @@ public class TSDBResultSet implements ResultSet {
byte res = 0;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = (byte) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = (byte) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return (byte) this.blockData.getInt(colIndex);
}
return res;
}
public short getShort(int columnIndex) throws SQLException {
short res = 0;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = (short) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = (short) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return (short) this.blockData.getInt(colIndex);
}
return res;
}
public int getInt(int columnIndex) throws SQLException {
int res = 0;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return this.blockData.getInt(colIndex);
}
return res;
}
public long getLong(int columnIndex) throws SQLException {
long res = 0l;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return this.blockData.getLong(colIndex);
}
return res;
}
public float getFloat(int columnIndex) throws SQLException {
float res = 0;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getFloat(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getFloat(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return (float) this.blockData.getDouble(colIndex);
}
return res;
}
public double getDouble(int columnIndex) throws SQLException {
double res = 0;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getDouble(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getDouble(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return this.blockData.getDouble(colIndex);
}
return res;
}
/*
@ -249,25 +317,11 @@ public class TSDBResultSet implements ResultSet {
*/
@Deprecated
public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException {
BigDecimal res = null;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()));
}
return res;
return new BigDecimal(getLong(columnIndex));
}
public byte[] getBytes(int columnIndex) throws SQLException {
byte[] res = null;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType()).getBytes();
}
return res;
return getString(columnIndex).getBytes();
}
public Date getDate(int columnIndex) throws SQLException {
@ -284,11 +338,15 @@ public class TSDBResultSet implements ResultSet {
Timestamp res = null;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getTimestamp(colIndex);
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getTimestamp(colIndex);
}
return res;
} else {
return this.blockData.getTimestamp(columnIndex);
}
return res;
}
public InputStream getAsciiStream(int columnIndex) throws SQLException {
@ -400,8 +458,12 @@ public class TSDBResultSet implements ResultSet {
public Object getObject(int columnIndex) throws SQLException {
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
return this.rowData.get(colIndex);
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
return this.rowData.get(colIndex);
} else {
return this.blockData.get(colIndex);
}
}
public Object getObject(String columnLabel) throws SQLException {
@ -433,8 +495,12 @@ public class TSDBResultSet implements ResultSet {
public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
return new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()));
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
return new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()));
} else {
return new BigDecimal(this.blockData.getLong(colIndex));
}
}
public BigDecimal getBigDecimal(String columnLabel) throws SQLException {

View File

@ -0,0 +1,497 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.DoubleBuffer;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import java.nio.LongBuffer;
import java.nio.ShortBuffer;
import java.sql.SQLDataException;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class TSDBResultSetBlockData {
private int numOfRows = 0;
private int rowIndex = 0;
private List<ColumnMetaData> columnMetaDataList;
private ArrayList<Object> colData = null;
public TSDBResultSetBlockData(List<ColumnMetaData> colMeta, int numOfCols) {
this.columnMetaDataList = colMeta;
this.colData = new ArrayList<Object>(numOfCols);
}
public TSDBResultSetBlockData() {
this.colData = new ArrayList<Object>();
}
public void clear() {
int size = this.colData.size();
if (this.colData != null) {
this.colData.clear();
}
setNumOfCols(size);
}
public int getNumOfRows() {
return this.numOfRows;
}
public void setNumOfRows(int numOfRows) {
this.numOfRows = numOfRows;
}
public int getNumOfCols() {
return this.colData.size();
}
public void setNumOfCols(int numOfCols) {
this.colData = new ArrayList<Object>(numOfCols);
this.colData.addAll(Collections.nCopies(numOfCols, null));
}
public boolean hasMore() {
return this.rowIndex < this.numOfRows;
}
public boolean forward() {
if (this.rowIndex > this.numOfRows) {
return false;
}
return ((++this.rowIndex) < this.numOfRows);
}
public void reset() {
this.rowIndex = 0;
}
public void setBoolean(int col, boolean value) {
colData.set(col, value);
}
public void setByteArray(int col, int length, byte[] value) {
try {
switch (this.columnMetaDataList.get(col).getColType()) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
buf.order(ByteOrder.LITTLE_ENDIAN).asCharBuffer();
this.colData.set(col, buf);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_TINYINT: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
buf.order(ByteOrder.LITTLE_ENDIAN);
this.colData.set(col, buf);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
ShortBuffer sb = buf.order(ByteOrder.LITTLE_ENDIAN).asShortBuffer();
this.colData.set(col, sb);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_INT: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
IntBuffer ib = buf.order(ByteOrder.LITTLE_ENDIAN).asIntBuffer();
this.colData.set(col, ib);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
LongBuffer lb = buf.order(ByteOrder.LITTLE_ENDIAN).asLongBuffer();
this.colData.set(col, lb);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
FloatBuffer fb = buf.order(ByteOrder.LITTLE_ENDIAN).asFloatBuffer();
this.colData.set(col, fb);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
DoubleBuffer db = buf.order(ByteOrder.LITTLE_ENDIAN).asDoubleBuffer();
this.colData.set(col, db);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
buf.order(ByteOrder.LITTLE_ENDIAN);
this.colData.set(col, buf);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
LongBuffer lb = buf.order(ByteOrder.LITTLE_ENDIAN).asLongBuffer();
this.colData.set(col, lb);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
buf.order(ByteOrder.LITTLE_ENDIAN);
this.colData.set(col, buf);
break;
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
private static class NullType {
private static final byte NULL_BOOL_VAL = 0x2;
private static final String NULL_STR = "null";
public String toString() {
return NullType.NULL_STR;
}
public static boolean isBooleanNull(byte val) {
return val == NullType.NULL_BOOL_VAL;
}
private static boolean isTinyIntNull(byte val) {
return val == Byte.MIN_VALUE;
}
private static boolean isSmallIntNull(short val) {
return val == Short.MIN_VALUE;
}
private static boolean isIntNull(int val) {
return val == Integer.MIN_VALUE;
}
private static boolean isBigIntNull(long val) {
return val == Long.MIN_VALUE;
}
private static boolean isFloatNull(float val) {
return Float.isNaN(val);
}
private static boolean isDoubleNull(double val) {
return Double.isNaN(val);
}
private static boolean isBinaryNull(byte[] val, int length) {
if (length != Byte.BYTES) {
return false;
}
return val[0] == 0xFF;
}
private static boolean isNcharNull(byte[] val, int length) {
if (length != Integer.BYTES) {
return false;
}
return (val[0] & val[1] & val[2] & val[3]) == 0xFF;
}
}
/**
* The original type may not be a string type, but will be converted to by
* calling this method
*
* @param col column index
* @return
* @throws SQLException
*/
public String getString(int col) throws SQLException {
Object obj = get(col);
if (obj == null) {
return new NullType().toString();
}
return obj.toString();
}
public int getInt(int col) {
Object obj = get(col);
if (obj == null) {
return 0;
}
int type = this.columnMetaDataList.get(col).getColType();
switch (type) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
case TSDBConstants.TSDB_DATA_TYPE_INT: {
return (int) obj;
}
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
return ((Long) obj).intValue();
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
return ((Double) obj).intValue();
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
return Integer.parseInt((String) obj);
}
}
return 0;
}
public boolean getBoolean(int col) throws SQLException {
Object obj = get(col);
if (obj == null) {
return Boolean.FALSE;
}
int type = this.columnMetaDataList.get(col).getColType();
switch (type) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
case TSDBConstants.TSDB_DATA_TYPE_INT: {
return ((int) obj == 0L) ? Boolean.FALSE : Boolean.TRUE;
}
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
return (((Long) obj) == 0L) ? Boolean.FALSE : Boolean.TRUE;
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
return (((Double) obj) == 0) ? Boolean.FALSE : Boolean.TRUE;
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
if ("TRUE".compareToIgnoreCase((String) obj) == 0) {
return Boolean.TRUE;
} else if ("FALSE".compareToIgnoreCase((String) obj) == 0) {
return Boolean.TRUE;
} else {
throw new SQLDataException();
}
}
}
return Boolean.FALSE;
}
public long getLong(int col) throws SQLException {
Object obj = get(col);
if (obj == null) {
return 0;
}
int type = this.columnMetaDataList.get(col).getColType();
switch (type) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
case TSDBConstants.TSDB_DATA_TYPE_INT: {
return (int) obj;
}
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
return (long) obj;
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
return ((Double) obj).longValue();
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
return Long.parseLong((String) obj);
}
}
return 0;
}
public Timestamp getTimestamp(int col) {
try {
return new Timestamp(getLong(col));
} catch (SQLException e) {
e.printStackTrace();
}
return null;
}
public double getDouble(int col) {
Object obj = get(col);
if (obj == null) {
return 0;
}
int type = this.columnMetaDataList.get(col).getColType();
switch (type) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
case TSDBConstants.TSDB_DATA_TYPE_INT: {
return (int) obj;
}
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
return (long) obj;
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
return (double) obj;
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
return Double.parseDouble((String) obj);
}
}
return 0;
}
public Object get(int col) {
int fieldSize = this.columnMetaDataList.get(col).getColSize();
switch (this.columnMetaDataList.get(col).getColType()) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
byte val = bb.get(this.rowIndex);
if (NullType.isBooleanNull(val)) {
return null;
}
return (val == 0x0) ? Boolean.FALSE : Boolean.TRUE;
}
case TSDBConstants.TSDB_DATA_TYPE_TINYINT: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
byte val = bb.get(this.rowIndex);
if (NullType.isTinyIntNull(val)) {
return null;
}
return val;
}
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: {
ShortBuffer sb = (ShortBuffer) this.colData.get(col);
short val = sb.get(this.rowIndex);
if (NullType.isSmallIntNull(val)) {
return null;
}
return val;
}
case TSDBConstants.TSDB_DATA_TYPE_INT: {
IntBuffer ib = (IntBuffer) this.colData.get(col);
int val = ib.get(this.rowIndex);
if (NullType.isIntNull(val)) {
return null;
}
return val;
}
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: {
LongBuffer lb = (LongBuffer) this.colData.get(col);
long val = lb.get(this.rowIndex);
if (NullType.isBigIntNull(val)) {
return null;
}
return (long) val;
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: {
FloatBuffer fb = (FloatBuffer) this.colData.get(col);
float val = fb.get(this.rowIndex);
if (NullType.isFloatNull(val)) {
return null;
}
return val;
}
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
DoubleBuffer lb = (DoubleBuffer) this.colData.get(col);
double val = lb.get(this.rowIndex);
if (NullType.isDoubleNull(val)) {
return null;
}
return val;
}
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
bb.position(fieldSize * this.rowIndex);
int length = bb.getShort();
byte[] dest = new byte[length];
bb.get(dest, 0, length);
if (NullType.isBinaryNull(dest, length)) {
return null;
}
return new String(dest);
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
bb.position(fieldSize * this.rowIndex);
int length = bb.getShort();
byte[] dest = new byte[length];
bb.get(dest, 0, length);
if (NullType.isNcharNull(dest, length)) {
return null;
}
try {
String ss = TaosGlobalConfig.getCharset();
return new String(dest, ss);
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
}
}
return 0;
}
}

View File

@ -218,5 +218,4 @@ public class TSDBResultSetRowData {
public void setData(ArrayList<Object> data) {
this.data = (ArrayList<Object>) data.clone();
}
}

View File

@ -19,7 +19,7 @@ import java.util.ArrayList;
import java.util.List;
public class TSDBStatement implements Statement {
private TSDBJNIConnector connecter = null;
private TSDBJNIConnector connector = null;
/**
* To store batched commands
@ -45,9 +45,9 @@ public class TSDBStatement implements Statement {
this.connection = connection;
}
TSDBStatement(TSDBConnection connection, TSDBJNIConnector connecter) {
TSDBStatement(TSDBConnection connection, TSDBJNIConnector connector) {
this.connection = connection;
this.connecter = connecter;
this.connector = connector;
this.isClosed = false;
}
@ -65,25 +65,27 @@ public class TSDBStatement implements Statement {
}
// TODO make sure it is not a update query
pSql = this.connecter.executeQuery(sql);
pSql = this.connector.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet();
long resultSetPointer = this.connector.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql);
this.connector.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
// create/insert/update/delete/alter
if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
this.connecter.freeResultSet(pSql);
this.connector.freeResultSet(pSql);
return null;
}
if (!this.connecter.isUpdateQuery(pSql)) {
return new TSDBResultSet(this.connecter, resultSetPointer);
if (!this.connector.isUpdateQuery(pSql)) {
TSDBResultSet res = new TSDBResultSet(this.connector, resultSetPointer);
res.setBatchFetch(this.connection.getBatchFetch());
return res;
} else {
this.connecter.freeResultSet(pSql);
this.connector.freeResultSet(pSql);
return null;
}
@ -95,28 +97,28 @@ public class TSDBStatement implements Statement {
}
// TODO check if current query is update query
pSql = this.connecter.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet();
pSql = this.connector.executeQuery(sql);
long resultSetPointer = this.connector.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql);
this.connector.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
this.affectedRows = this.connecter.getAffectedRows(pSql);
this.connecter.freeResultSet(pSql);
this.affectedRows = this.connector.getAffectedRows(pSql);
this.connector.freeResultSet(pSql);
return this.affectedRows;
}
public String getErrorMsg(long pSql) {
return this.connecter.getErrMsg(pSql);
return this.connector.getErrMsg(pSql);
}
public void close() throws SQLException {
if (!isClosed) {
if (!this.connecter.isResultsetClosed()) {
this.connecter.freeResultSet();
if (!this.connector.isResultsetClosed()) {
this.connector.freeResultSet();
}
isClosed = true;
}
@ -136,7 +138,7 @@ public class TSDBStatement implements Statement {
}
public void setMaxRows(int max) throws SQLException {
// always set maxRows to zero, meaning unlimitted rows in a resultSet
// always set maxRows to zero, meaning unlimited rows in a resultSet
}
public void setEscapeProcessing(boolean enable) throws SQLException {
@ -172,15 +174,15 @@ public class TSDBStatement implements Statement {
throw new SQLException("Invalid method call on a closed statement.");
}
boolean res = true;
pSql = this.connecter.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet();
pSql = this.connector.executeQuery(sql);
long resultSetPointer = this.connector.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql);
this.connector.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
// no result set is retrieved
this.connecter.freeResultSet(pSql);
this.connector.freeResultSet(pSql);
res = false;
}
@ -191,10 +193,10 @@ public class TSDBStatement implements Statement {
if (isClosed) {
throw new SQLException("Invalid method call on a closed statement.");
}
long resultSetPointer = connecter.getResultSet();
long resultSetPointer = connector.getResultSet();
TSDBResultSet resSet = null;
if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
resSet = new TSDBResultSet(connecter, resultSetPointer);
resSet = new TSDBResultSet(connector, resultSetPointer);
}
return resSet;
}
@ -267,7 +269,7 @@ public class TSDBStatement implements Statement {
}
public Connection getConnection() throws SQLException {
if (this.connecter != null)
if (this.connector != null)
return this.connection;
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}

View File

@ -0,0 +1,319 @@
package com.taosdata.jdbc.rs;
import com.taosdata.jdbc.TSDBConstants;
import java.sql.*;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Executor;
public class RestfulConnection implements Connection {
private final String host;
private final int port;
private final Properties props;
private final String database;
private final String url;
public RestfulConnection(String host, String port, Properties props, String database, String url) {
this.host = host;
this.port = Integer.parseInt(port);
this.props = props;
this.database = database;
this.url = url;
}
@Override
public Statement createStatement() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg("restful TDengine connection is closed."));
return new RestfulStatement(this, this.database);
}
@Override
public PreparedStatement prepareStatement(String sql) throws SQLException {
return null;
}
@Override
public CallableStatement prepareCall(String sql) throws SQLException {
return null;
}
@Override
public String nativeSQL(String sql) throws SQLException {
return null;
}
@Override
public void setAutoCommit(boolean autoCommit) throws SQLException {
}
@Override
public boolean getAutoCommit() throws SQLException {
return false;
}
@Override
public void commit() throws SQLException {
}
@Override
public void rollback() throws SQLException {
}
@Override
public void close() throws SQLException {
}
@Override
public boolean isClosed() throws SQLException {
return false;
}
@Override
public DatabaseMetaData getMetaData() throws SQLException {
//TODO: RestfulDatabaseMetaData is not implemented
return new RestfulDatabaseMetaData();
}
@Override
public void setReadOnly(boolean readOnly) throws SQLException {
}
@Override
public boolean isReadOnly() throws SQLException {
return false;
}
@Override
public void setCatalog(String catalog) throws SQLException {
}
@Override
public String getCatalog() throws SQLException {
return null;
}
@Override
public void setTransactionIsolation(int level) throws SQLException {
}
@Override
public int getTransactionIsolation() throws SQLException {
return 0;
}
@Override
public SQLWarning getWarnings() throws SQLException {
return null;
}
@Override
public void clearWarnings() throws SQLException {
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
return null;
}
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
return null;
}
@Override
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
return null;
}
@Override
public Map<String, Class<?>> getTypeMap() throws SQLException {
return null;
}
@Override
public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
}
@Override
public void setHoldability(int holdability) throws SQLException {
}
@Override
public int getHoldability() throws SQLException {
return 0;
}
@Override
public Savepoint setSavepoint() throws SQLException {
return null;
}
@Override
public Savepoint setSavepoint(String name) throws SQLException {
return null;
}
@Override
public void rollback(Savepoint savepoint) throws SQLException {
}
@Override
public void releaseSavepoint(Savepoint savepoint) throws SQLException {
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
return null;
}
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
return null;
}
@Override
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
return null;
}
@Override
public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
return null;
}
@Override
public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
return null;
}
@Override
public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
return null;
}
@Override
public Clob createClob() throws SQLException {
return null;
}
@Override
public Blob createBlob() throws SQLException {
return null;
}
@Override
public NClob createNClob() throws SQLException {
return null;
}
@Override
public SQLXML createSQLXML() throws SQLException {
return null;
}
@Override
public boolean isValid(int timeout) throws SQLException {
return false;
}
@Override
public void setClientInfo(String name, String value) throws SQLClientInfoException {
}
@Override
public void setClientInfo(Properties properties) throws SQLClientInfoException {
}
@Override
public String getClientInfo(String name) throws SQLException {
return null;
}
@Override
public Properties getClientInfo() throws SQLException {
return null;
}
@Override
public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
return null;
}
@Override
public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
return null;
}
@Override
public void setSchema(String schema) throws SQLException {
}
@Override
public String getSchema() throws SQLException {
return null;
}
@Override
public void abort(Executor executor) throws SQLException {
}
@Override
public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
}
@Override
public int getNetworkTimeout() throws SQLException {
return 0;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return false;
}
public String getHost() {
return host;
}
public int getPort() {
return port;
}
public Properties getProps() {
return props;
}
public String getDatabase() {
return database;
}
public String getUrl() {
return url;
}
}

View File

@ -0,0 +1,886 @@
package com.taosdata.jdbc.rs;
import java.sql.*;
public class RestfulDatabaseMetaData implements DatabaseMetaData {
@Override
public boolean allProceduresAreCallable() throws SQLException {
return false;
}
@Override
public boolean allTablesAreSelectable() throws SQLException {
return false;
}
@Override
public String getURL() throws SQLException {
return null;
}
@Override
public String getUserName() throws SQLException {
return null;
}
@Override
public boolean isReadOnly() throws SQLException {
return false;
}
@Override
public boolean nullsAreSortedHigh() throws SQLException {
return false;
}
@Override
public boolean nullsAreSortedLow() throws SQLException {
return false;
}
@Override
public boolean nullsAreSortedAtStart() throws SQLException {
return false;
}
@Override
public boolean nullsAreSortedAtEnd() throws SQLException {
return false;
}
@Override
public String getDatabaseProductName() throws SQLException {
return null;
}
@Override
public String getDatabaseProductVersion() throws SQLException {
return null;
}
@Override
public String getDriverName() throws SQLException {
return null;
}
@Override
public String getDriverVersion() throws SQLException {
return null;
}
@Override
public int getDriverMajorVersion() {
return 0;
}
@Override
public int getDriverMinorVersion() {
return 0;
}
@Override
public boolean usesLocalFiles() throws SQLException {
return false;
}
@Override
public boolean usesLocalFilePerTable() throws SQLException {
return false;
}
@Override
public boolean supportsMixedCaseIdentifiers() throws SQLException {
return false;
}
@Override
public boolean storesUpperCaseIdentifiers() throws SQLException {
return false;
}
@Override
public boolean storesLowerCaseIdentifiers() throws SQLException {
return false;
}
@Override
public boolean storesMixedCaseIdentifiers() throws SQLException {
return false;
}
@Override
public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
return false;
}
@Override
public boolean storesUpperCaseQuotedIdentifiers() throws SQLException {
return false;
}
@Override
public boolean storesLowerCaseQuotedIdentifiers() throws SQLException {
return false;
}
@Override
public boolean storesMixedCaseQuotedIdentifiers() throws SQLException {
return false;
}
@Override
public String getIdentifierQuoteString() throws SQLException {
return null;
}
@Override
public String getSQLKeywords() throws SQLException {
return null;
}
@Override
public String getNumericFunctions() throws SQLException {
return null;
}
@Override
public String getStringFunctions() throws SQLException {
return null;
}
@Override
public String getSystemFunctions() throws SQLException {
return null;
}
@Override
public String getTimeDateFunctions() throws SQLException {
return null;
}
@Override
public String getSearchStringEscape() throws SQLException {
return null;
}
@Override
public String getExtraNameCharacters() throws SQLException {
return null;
}
@Override
public boolean supportsAlterTableWithAddColumn() throws SQLException {
return false;
}
@Override
public boolean supportsAlterTableWithDropColumn() throws SQLException {
return false;
}
@Override
public boolean supportsColumnAliasing() throws SQLException {
return false;
}
@Override
public boolean nullPlusNonNullIsNull() throws SQLException {
return false;
}
@Override
public boolean supportsConvert() throws SQLException {
return false;
}
@Override
public boolean supportsConvert(int fromType, int toType) throws SQLException {
return false;
}
@Override
public boolean supportsTableCorrelationNames() throws SQLException {
return false;
}
@Override
public boolean supportsDifferentTableCorrelationNames() throws SQLException {
return false;
}
@Override
public boolean supportsExpressionsInOrderBy() throws SQLException {
return false;
}
@Override
public boolean supportsOrderByUnrelated() throws SQLException {
return false;
}
@Override
public boolean supportsGroupBy() throws SQLException {
return false;
}
@Override
public boolean supportsGroupByUnrelated() throws SQLException {
return false;
}
@Override
public boolean supportsGroupByBeyondSelect() throws SQLException {
return false;
}
@Override
public boolean supportsLikeEscapeClause() throws SQLException {
return false;
}
@Override
public boolean supportsMultipleResultSets() throws SQLException {
return false;
}
@Override
public boolean supportsMultipleTransactions() throws SQLException {
return false;
}
@Override
public boolean supportsNonNullableColumns() throws SQLException {
return false;
}
@Override
public boolean supportsMinimumSQLGrammar() throws SQLException {
return false;
}
@Override
public boolean supportsCoreSQLGrammar() throws SQLException {
return false;
}
@Override
public boolean supportsExtendedSQLGrammar() throws SQLException {
return false;
}
@Override
public boolean supportsANSI92EntryLevelSQL() throws SQLException {
return false;
}
@Override
public boolean supportsANSI92IntermediateSQL() throws SQLException {
return false;
}
@Override
public boolean supportsANSI92FullSQL() throws SQLException {
return false;
}
@Override
public boolean supportsIntegrityEnhancementFacility() throws SQLException {
return false;
}
@Override
public boolean supportsOuterJoins() throws SQLException {
return false;
}
@Override
public boolean supportsFullOuterJoins() throws SQLException {
return false;
}
@Override
public boolean supportsLimitedOuterJoins() throws SQLException {
return false;
}
@Override
public String getSchemaTerm() throws SQLException {
return null;
}
@Override
public String getProcedureTerm() throws SQLException {
return null;
}
@Override
public String getCatalogTerm() throws SQLException {
return null;
}
@Override
public boolean isCatalogAtStart() throws SQLException {
return false;
}
@Override
public String getCatalogSeparator() throws SQLException {
return null;
}
@Override
public boolean supportsSchemasInDataManipulation() throws SQLException {
return false;
}
@Override
public boolean supportsSchemasInProcedureCalls() throws SQLException {
return false;
}
@Override
public boolean supportsSchemasInTableDefinitions() throws SQLException {
return false;
}
@Override
public boolean supportsSchemasInIndexDefinitions() throws SQLException {
return false;
}
@Override
public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException {
return false;
}
@Override
public boolean supportsCatalogsInDataManipulation() throws SQLException {
return false;
}
@Override
public boolean supportsCatalogsInProcedureCalls() throws SQLException {
return false;
}
@Override
public boolean supportsCatalogsInTableDefinitions() throws SQLException {
return false;
}
@Override
public boolean supportsCatalogsInIndexDefinitions() throws SQLException {
return false;
}
@Override
public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException {
return false;
}
@Override
public boolean supportsPositionedDelete() throws SQLException {
return false;
}
@Override
public boolean supportsPositionedUpdate() throws SQLException {
return false;
}
@Override
public boolean supportsSelectForUpdate() throws SQLException {
return false;
}
@Override
public boolean supportsStoredProcedures() throws SQLException {
return false;
}
@Override
public boolean supportsSubqueriesInComparisons() throws SQLException {
return false;
}
@Override
public boolean supportsSubqueriesInExists() throws SQLException {
return false;
}
@Override
public boolean supportsSubqueriesInIns() throws SQLException {
return false;
}
@Override
public boolean supportsSubqueriesInQuantifieds() throws SQLException {
return false;
}
@Override
public boolean supportsCorrelatedSubqueries() throws SQLException {
return false;
}
@Override
public boolean supportsUnion() throws SQLException {
return false;
}
@Override
public boolean supportsUnionAll() throws SQLException {
return false;
}
@Override
public boolean supportsOpenCursorsAcrossCommit() throws SQLException {
return false;
}
@Override
public boolean supportsOpenCursorsAcrossRollback() throws SQLException {
return false;
}
@Override
public boolean supportsOpenStatementsAcrossCommit() throws SQLException {
return false;
}
@Override
public boolean supportsOpenStatementsAcrossRollback() throws SQLException {
return false;
}
@Override
public int getMaxBinaryLiteralLength() throws SQLException {
return 0;
}
@Override
public int getMaxCharLiteralLength() throws SQLException {
return 0;
}
@Override
public int getMaxColumnNameLength() throws SQLException {
return 0;
}
@Override
public int getMaxColumnsInGroupBy() throws SQLException {
return 0;
}
@Override
public int getMaxColumnsInIndex() throws SQLException {
return 0;
}
@Override
public int getMaxColumnsInOrderBy() throws SQLException {
return 0;
}
@Override
public int getMaxColumnsInSelect() throws SQLException {
return 0;
}
@Override
public int getMaxColumnsInTable() throws SQLException {
return 0;
}
@Override
public int getMaxConnections() throws SQLException {
return 0;
}
@Override
public int getMaxCursorNameLength() throws SQLException {
return 0;
}
@Override
public int getMaxIndexLength() throws SQLException {
return 0;
}
@Override
public int getMaxSchemaNameLength() throws SQLException {
return 0;
}
@Override
public int getMaxProcedureNameLength() throws SQLException {
return 0;
}
@Override
public int getMaxCatalogNameLength() throws SQLException {
return 0;
}
@Override
public int getMaxRowSize() throws SQLException {
return 0;
}
@Override
public boolean doesMaxRowSizeIncludeBlobs() throws SQLException {
return false;
}
@Override
public int getMaxStatementLength() throws SQLException {
return 0;
}
@Override
public int getMaxStatements() throws SQLException {
return 0;
}
@Override
public int getMaxTableNameLength() throws SQLException {
return 0;
}
@Override
public int getMaxTablesInSelect() throws SQLException {
return 0;
}
@Override
public int getMaxUserNameLength() throws SQLException {
return 0;
}
@Override
public int getDefaultTransactionIsolation() throws SQLException {
return 0;
}
@Override
public boolean supportsTransactions() throws SQLException {
return false;
}
@Override
public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
return false;
}
@Override
public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException {
return false;
}
@Override
public boolean supportsDataManipulationTransactionsOnly() throws SQLException {
return false;
}
@Override
public boolean dataDefinitionCausesTransactionCommit() throws SQLException {
return false;
}
@Override
public boolean dataDefinitionIgnoredInTransactions() throws SQLException {
return false;
}
@Override
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException {
return null;
}
@Override
public ResultSet getSchemas() throws SQLException {
return null;
}
@Override
public ResultSet getCatalogs() throws SQLException {
return null;
}
@Override
public ResultSet getTableTypes() throws SQLException {
return null;
}
@Override
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException {
return null;
}
@Override
public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException {
return null;
}
@Override
public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
return null;
}
@Override
public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException {
return null;
}
@Override
public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException {
return null;
}
@Override
public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException {
return null;
}
@Override
public ResultSet getTypeInfo() throws SQLException {
return null;
}
@Override
public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException {
return null;
}
@Override
public boolean supportsResultSetType(int type) throws SQLException {
return false;
}
@Override
public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException {
return false;
}
@Override
public boolean ownUpdatesAreVisible(int type) throws SQLException {
return false;
}
@Override
public boolean ownDeletesAreVisible(int type) throws SQLException {
return false;
}
@Override
public boolean ownInsertsAreVisible(int type) throws SQLException {
return false;
}
@Override
public boolean othersUpdatesAreVisible(int type) throws SQLException {
return false;
}
@Override
public boolean othersDeletesAreVisible(int type) throws SQLException {
return false;
}
@Override
public boolean othersInsertsAreVisible(int type) throws SQLException {
return false;
}
@Override
public boolean updatesAreDetected(int type) throws SQLException {
return false;
}
@Override
public boolean deletesAreDetected(int type) throws SQLException {
return false;
}
@Override
public boolean insertsAreDetected(int type) throws SQLException {
return false;
}
@Override
public boolean supportsBatchUpdates() throws SQLException {
return false;
}
@Override
public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException {
return null;
}
@Override
public Connection getConnection() throws SQLException {
return null;
}
@Override
public boolean supportsSavepoints() throws SQLException {
return false;
}
@Override
public boolean supportsNamedParameters() throws SQLException {
return false;
}
@Override
public boolean supportsMultipleOpenResults() throws SQLException {
return false;
}
@Override
public boolean supportsGetGeneratedKeys() throws SQLException {
return false;
}
@Override
public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) throws SQLException {
return null;
}
@Override
public boolean supportsResultSetHoldability(int holdability) throws SQLException {
return false;
}
@Override
public int getResultSetHoldability() throws SQLException {
return 0;
}
@Override
public int getDatabaseMajorVersion() throws SQLException {
return 0;
}
@Override
public int getDatabaseMinorVersion() throws SQLException {
return 0;
}
@Override
public int getJDBCMajorVersion() throws SQLException {
return 0;
}
@Override
public int getJDBCMinorVersion() throws SQLException {
return 0;
}
@Override
public int getSQLStateType() throws SQLException {
return 0;
}
@Override
public boolean locatorsUpdateCopy() throws SQLException {
return false;
}
@Override
public boolean supportsStatementPooling() throws SQLException {
return false;
}
@Override
public RowIdLifetime getRowIdLifetime() throws SQLException {
return null;
}
@Override
public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
return null;
}
@Override
public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
return false;
}
@Override
public boolean autoCommitFailureClosesAllResultSets() throws SQLException {
return false;
}
@Override
public ResultSet getClientInfoProperties() throws SQLException {
return null;
}
@Override
public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
return null;
}
@Override
public boolean generatedKeyAlwaysReturned() throws SQLException {
return false;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return false;
}
}

View File

@ -0,0 +1,91 @@
package com.taosdata.jdbc.rs;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.taosdata.jdbc.AbstractTaosDriver;
import com.taosdata.jdbc.TSDBConstants;
import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.rs.util.HttpClientPoolUtil;
import java.sql.*;
import java.util.Properties;
import java.util.logging.Logger;
public class RestfulDriver extends AbstractTaosDriver {
private static final String URL_PREFIX = "jdbc:TAOS-RS://";
static {
try {
DriverManager.registerDriver(new RestfulDriver());
} catch (SQLException e) {
throw new RuntimeException(TSDBConstants.WrapErrMsg("can not register Restful JDBC driver"), e);
}
}
@Override
public Connection connect(String url, Properties info) throws SQLException {
// throw SQLException if url is null
if (url == null)
throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!"));
// return null if url is not be accepted
if (!acceptsURL(url))
return null;
Properties props = parseURL(url, info);
String host = props.getProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041");
String database = props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME);
String loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":"
+ props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/"
+ props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/"
+ props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + "";
String result = HttpClientPoolUtil.execute(loginUrl);
JSONObject jsonResult = JSON.parseObject(result);
String status = jsonResult.getString("status");
if (!status.equals("succ")) {
throw new SQLException(jsonResult.getString("desc"));
}
return new RestfulConnection(host, port, props, database, url);
}
@Override
public boolean acceptsURL(String url) throws SQLException {
if (url == null)
throw new SQLException(TSDBConstants.WrapErrMsg("url is null"));
return (url != null && url.length() > 0 && url.trim().length() > 0) && url.startsWith(URL_PREFIX);
}
@Override
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
if (info == null) {
info = new Properties();
}
if (acceptsURL(url)) {
info = parseURL(url, info);
}
return getPropertyInfo(info);
}
@Override
public int getMajorVersion() {
return 2;
}
@Override
public int getMinorVersion() {
return 0;
}
@Override
public boolean jdbcCompliant() {
return false;
}
@Override
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
return null;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,129 @@
package com.taosdata.jdbc.rs;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.List;
public class RestfulResultSetMetaData implements ResultSetMetaData {
private List<String> fields;
public RestfulResultSetMetaData(List<String> fields) {
this.fields = fields;
}
@Override
public int getColumnCount() throws SQLException {
return fields.size();
}
@Override
public boolean isAutoIncrement(int column) throws SQLException {
return false;
}
@Override
public boolean isCaseSensitive(int column) throws SQLException {
return false;
}
@Override
public boolean isSearchable(int column) throws SQLException {
return false;
}
@Override
public boolean isCurrency(int column) throws SQLException {
return false;
}
@Override
public int isNullable(int column) throws SQLException {
return 0;
}
@Override
public boolean isSigned(int column) throws SQLException {
return false;
}
@Override
public int getColumnDisplaySize(int column) throws SQLException {
return 0;
}
@Override
public String getColumnLabel(int column) throws SQLException {
return fields.get(column - 1);
}
@Override
public String getColumnName(int column) throws SQLException {
return null;
}
@Override
public String getSchemaName(int column) throws SQLException {
return null;
}
@Override
public int getPrecision(int column) throws SQLException {
return 0;
}
@Override
public int getScale(int column) throws SQLException {
return 0;
}
@Override
public String getTableName(int column) throws SQLException {
return null;
}
@Override
public String getCatalogName(int column) throws SQLException {
return null;
}
@Override
public int getColumnType(int column) throws SQLException {
return 0;
}
@Override
public String getColumnTypeName(int column) throws SQLException {
return null;
}
@Override
public boolean isReadOnly(int column) throws SQLException {
return false;
}
@Override
public boolean isWritable(int column) throws SQLException {
return false;
}
@Override
public boolean isDefinitelyWritable(int column) throws SQLException {
return false;
}
@Override
public String getColumnClassName(int column) throws SQLException {
return null;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return false;
}
}

View File

@ -0,0 +1,280 @@
package com.taosdata.jdbc.rs;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.taosdata.jdbc.TSDBConstants;
import com.taosdata.jdbc.rs.util.HttpClientPoolUtil;
import java.sql.*;
import java.util.Arrays;
import java.util.List;
public class RestfulStatement implements Statement {
private final String catalog;
private final RestfulConnection conn;
public RestfulStatement(RestfulConnection c, String catalog) {
this.conn = c;
this.catalog = catalog;
}
@Override
public ResultSet executeQuery(String sql) throws SQLException {
final String url = "http://" + conn.getHost() + ":"+conn.getPort()+"/rest/sql";
String result = HttpClientPoolUtil.execute(url, sql);
String fields = "";
List<String> words = Arrays.asList(sql.split(" "));
if (words.get(0).equalsIgnoreCase("select")) {
int index = 0;
if (words.contains("from")) {
index = words.indexOf("from");
}
if (words.contains("FROM")) {
index = words.indexOf("FROM");
}
fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + words.get(index + 1));
}
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " +
jsonObject.getString("desc") + "\n" +
"error code: " + jsonObject.getString("code")));
}
String dataStr = jsonObject.getString("data");
if ("use".equalsIgnoreCase(fields.split(" ")[0])) {
return new RestfulResultSet(dataStr, "");
}
JSONObject jsonField = JSON.parseObject(fields);
if (jsonField == null) {
return new RestfulResultSet(dataStr, "");
}
if (jsonField.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " +
jsonField.getString("desc") + "\n" +
"error code: " + jsonField.getString("code")));
}
String fieldData = jsonField.getString("data");
return new RestfulResultSet(dataStr, fieldData);
}
@Override
public int executeUpdate(String sql) throws SQLException {
return 0;
}
@Override
public void close() throws SQLException {
}
@Override
public int getMaxFieldSize() throws SQLException {
return 0;
}
@Override
public void setMaxFieldSize(int max) throws SQLException {
}
@Override
public int getMaxRows() throws SQLException {
return 0;
}
@Override
public void setMaxRows(int max) throws SQLException {
}
@Override
public void setEscapeProcessing(boolean enable) throws SQLException {
}
@Override
public int getQueryTimeout() throws SQLException {
return 0;
}
@Override
public void setQueryTimeout(int seconds) throws SQLException {
}
@Override
public void cancel() throws SQLException {
}
@Override
public SQLWarning getWarnings() throws SQLException {
return null;
}
@Override
public void clearWarnings() throws SQLException {
}
@Override
public void setCursorName(String name) throws SQLException {
}
@Override
public boolean execute(String sql) throws SQLException {
return false;
}
@Override
public ResultSet getResultSet() throws SQLException {
return null;
}
@Override
public int getUpdateCount() throws SQLException {
return 0;
}
@Override
public boolean getMoreResults() throws SQLException {
return false;
}
@Override
public void setFetchDirection(int direction) throws SQLException {
}
@Override
public int getFetchDirection() throws SQLException {
return 0;
}
@Override
public void setFetchSize(int rows) throws SQLException {
}
@Override
public int getFetchSize() throws SQLException {
return 0;
}
@Override
public int getResultSetConcurrency() throws SQLException {
return 0;
}
@Override
public int getResultSetType() throws SQLException {
return 0;
}
@Override
public void addBatch(String sql) throws SQLException {
}
@Override
public void clearBatch() throws SQLException {
}
@Override
public int[] executeBatch() throws SQLException {
return new int[0];
}
@Override
public Connection getConnection() throws SQLException {
return null;
}
@Override
public boolean getMoreResults(int current) throws SQLException {
return false;
}
@Override
public ResultSet getGeneratedKeys() throws SQLException {
return null;
}
@Override
public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
return 0;
}
@Override
public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
return 0;
}
@Override
public int executeUpdate(String sql, String[] columnNames) throws SQLException {
return 0;
}
@Override
public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
return false;
}
@Override
public boolean execute(String sql, int[] columnIndexes) throws SQLException {
return false;
}
@Override
public boolean execute(String sql, String[] columnNames) throws SQLException {
return false;
}
@Override
public int getResultSetHoldability() throws SQLException {
return 0;
}
@Override
public boolean isClosed() throws SQLException {
return false;
}
@Override
public void setPoolable(boolean poolable) throws SQLException {
}
@Override
public boolean isPoolable() throws SQLException {
return false;
}
@Override
public void closeOnCompletion() throws SQLException {
}
@Override
public boolean isCloseOnCompletion() throws SQLException {
return false;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return false;
}
}

View File

@ -0,0 +1,222 @@
package com.taosdata.jdbc.rs.util;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpEntity;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.*;
import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator;
import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils;
public class HttpClientPoolUtil {
public static PoolingHttpClientConnectionManager cm = null;
public static CloseableHttpClient httpClient = null;
/**
* 默认content 类型
*/
private static final String DEFAULT_CONTENT_TYPE = "application/json";
/**
* 默认请求超时时间30s
*/
private static final int DEFAULT_TIME_OUT = 15000;
private static final int count = 32;
private static final int totalCount = 1000;
private static final int Http_Default_Keep_Time = 15000;
/**
* 初始化连接池
*/
public static synchronized void initPools() {
if (httpClient == null) {
cm = new PoolingHttpClientConnectionManager();
cm.setDefaultMaxPerRoute(count);
cm.setMaxTotal(totalCount);
httpClient = HttpClients.custom().setKeepAliveStrategy(defaultStrategy).setConnectionManager(cm).build();
}
}
/**
* Http connection keepAlive 设置
*/
public static ConnectionKeepAliveStrategy defaultStrategy = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
int keepTime = Http_Default_Keep_Time * 1000;
while (it.hasNext()) {
HeaderElement headerElement = it.nextElement();
String param = headerElement.getName();
String value = headerElement.getValue();
if (value != null && param.equalsIgnoreCase("timeout")) {
try {
return Long.parseLong(value) * 1000;
} catch (Exception e) {
new Exception(
"format KeepAlive timeout exception, exception:" + e.toString())
.printStackTrace();
}
}
}
return keepTime;
};
public static CloseableHttpClient getHttpClient() {
return httpClient;
}
public static PoolingHttpClientConnectionManager getHttpConnectionManager() {
return cm;
}
/**
* 执行http post请求
* 默认采用Content-Typeapplication/jsonAcceptapplication/json
*
* @param uri 请求地址
* @param data 请求数据
* @return responseBody
*/
public static String execute(String uri, String data) {
long startTime = System.currentTimeMillis();
HttpEntity httpEntity = null;
HttpEntityEnclosingRequestBase method = null;
String responseBody = "";
try {
if (httpClient == null) {
initPools();
}
method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
method.setEntity(new StringEntity(data));
HttpContext context = HttpClientContext.create();
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity();
if (httpEntity != null) {
responseBody = EntityUtils.toString(httpEntity, "UTF-8");
}
} catch (Exception e) {
if (method != null) {
method.abort();
}
// e.printStackTrace();
// logger.error("execute post request exception, url:" + uri + ", exception:" + e.toString()
// + ", cost time(ms):" + (System.currentTimeMillis() - startTime));
new Exception("execute post request exception, url:"
+ uri + ", exception:" + e.toString() +
", cost time(ms):" + (System.currentTimeMillis() - startTime))
.printStackTrace();
} finally {
if (httpEntity != null) {
try {
EntityUtils.consumeQuietly(httpEntity);
} catch (Exception e) {
// e.printStackTrace();
// logger.error("close response exception, url:" + uri + ", exception:" + e.toString()
// + ", cost time(ms):" + (System.currentTimeMillis() - startTime));
new Exception(
"close response exception, url:" + uri +
", exception:" + e.toString()
+ ", cost time(ms):" + (System.currentTimeMillis() - startTime))
.printStackTrace();
}
}
}
return responseBody;
}
/**
* * 创建请求
*
* @param uri 请求url
* @param methodName 请求的方法类型
* @param contentType contentType类型
* @param timeout 超时时间
* @return HttpRequestBase 返回类型
* @author lisc
*/
public static HttpRequestBase getRequest(String uri, String methodName, String contentType, int timeout) {
if (httpClient == null) {
initPools();
}
HttpRequestBase method;
if (timeout <= 0) {
timeout = DEFAULT_TIME_OUT;
}
RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(timeout * 1000)
.setConnectTimeout(timeout * 1000).setConnectionRequestTimeout(timeout * 1000)
.setExpectContinueEnabled(false).build();
if (HttpPut.METHOD_NAME.equalsIgnoreCase(methodName)) {
method = new HttpPut(uri);
} else if (HttpPost.METHOD_NAME.equalsIgnoreCase(methodName)) {
method = new HttpPost(uri);
} else if (HttpGet.METHOD_NAME.equalsIgnoreCase(methodName)) {
method = new HttpGet(uri);
} else {
method = new HttpPost(uri);
}
if (StringUtils.isBlank(contentType)) {
contentType = DEFAULT_CONTENT_TYPE;
}
method.addHeader("Content-Type", contentType);
method.addHeader("Accept", contentType);
method.setConfig(requestConfig);
return method;
}
/**
* 执行GET 请求
*
* @param uri 网址
* @return responseBody
*/
public static String execute(String uri) {
long startTime = System.currentTimeMillis();
HttpEntity httpEntity = null;
HttpRequestBase method = null;
String responseBody = "";
try {
if (httpClient == null) {
initPools();
}
method = getRequest(uri, HttpGet.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
HttpContext context = HttpClientContext.create();
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity();
if (httpEntity != null) {
responseBody = EntityUtils.toString(httpEntity, "UTF-8");
// logger.info("请求URL: " + uri + "+ 返回状态码:" + httpResponse.getStatusLine().getStatusCode());
}
} catch (Exception e) {
if (method != null) {
method.abort();
}
e.printStackTrace();
// logger.error("execute get request exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):"
// + (System.currentTimeMillis() - startTime));
System.out.println("log:调用 HttpClientPoolUtil execute get request exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):"
+ (System.currentTimeMillis() - startTime));
} finally {
if (httpEntity != null) {
try {
EntityUtils.consumeQuietly(httpEntity);
} catch (Exception e) {
// e.printStackTrace();
// logger.error("close response exception, url:" + uri + ", exception:" + e.toString()
// + ",cost time(ms):" + (System.currentTimeMillis() - startTime));
new Exception("close response exception, url:" + uri + ", exception:" + e.toString()
+ ",cost time(ms):" + (System.currentTimeMillis() - startTime))
.printStackTrace();
}
}
}
return responseBody;
}
}

View File

@ -5,14 +5,14 @@ import com.taosdata.jdbc.utils.TDNodes;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class BaseTest {
public abstract class BaseTest {
private static boolean testCluster = false;
private static TDNodes nodes = new TDNodes();
@BeforeClass
public static void setupEnv() {
try{
try {
if (nodes.getTDNode(1).getTaosdPid() != null) {
System.out.println("Kill taosd before running JDBC test");
nodes.getTDNode(1).setRunning(1);

View File

@ -1,47 +1,210 @@
package com.taosdata.jdbc;
import org.junit.BeforeClass;
import org.junit.Test;
import java.sql.SQLException;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.sql.*;
import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.*;
public class TSDBDriverTest {
@Test
public void urlParserTest() throws SQLException {
TSDBDriver driver = new TSDBDriver();
String url = "jdbc:TSDB://127.0.0.1:0/db";
private static final String[] validURLs = {
"jdbc:TAOS://localhost:0",
"jdbc:TAOS://localhost",
"jdbc:TAOS://localhost:6030/test",
"jdbc:TAOS://localhost:6030",
"jdbc:TAOS://localhost:6030/",
"jdbc:TSDB://localhost:6030",
"jdbc:TSDB://localhost:6030/",
"jdbc:TAOS://127.0.0.1:0/db?user=root&password=taosdata",
"jdbc:TAOS://:",
"jdbc:TAOS://:/",
"jdbc:TAOS://:/test",
"jdbc:TAOS://localhost:0/?user=root&password=taosdata"
};
private static boolean islibLoaded = false;
private static boolean isTaosdActived;
Properties properties = new Properties();
driver.parseURL(url, properties);
assertEquals(properties.get("host"), "127.0.0.1");
assertEquals(properties.get("port"), "0");
assertEquals(properties.get("dbname"), "db");
assertEquals(properties.get("user"), "root");
assertEquals(properties.get("password"), "your_password");
private Connection conn;
url = "jdbc:TSDB://127.0.0.1:0/log?charset=UTF-8";
properties = new Properties();
driver.parseURL(url, properties);
assertEquals(properties.get("host"), "127.0.0.1");
assertEquals(properties.get("port"), "0");
assertEquals(properties.get("dbname"), "log");
assertEquals(properties.get("charset"), "UTF-8");
@BeforeClass
public static void before() {
String osName = System.getProperty("os.name").toLowerCase();
if (!osName.equals("linux"))
return;
// try to load taos lib
try {
System.loadLibrary("taos");
islibLoaded = true;
} catch (UnsatisfiedLinkError error) {
System.out.println("load tdengine lib failed.");
error.printStackTrace();
}
// check taosd is activated
try {
String[] cmd = {"/bin/bash", "-c", "ps -ef | grep taosd | grep -v \"grep\""};
Process exec = Runtime.getRuntime().exec(cmd);
BufferedReader reader = new BufferedReader(new InputStreamReader(exec.getInputStream()));
int lineCnt = 0;
while (reader.readLine() != null) {
lineCnt++;
}
if (lineCnt > 0)
isTaosdActived = true;
} catch (IOException e) {
e.printStackTrace();
}
url = "jdbc:TSDB://127.0.0.1:0/";
properties = new Properties();
driver.parseURL(url, properties);
assertEquals(properties.get("host"), "127.0.0.1");
assertEquals(properties.get("port"), "0");
assertEquals(properties.get("dbname"), null);
url = "jdbc:TSDB://127.0.0.1:0/db";
properties = new Properties();
driver.parseURL(url, properties);
assertEquals(properties.get("host"), "127.0.0.1");
assertEquals(properties.get("port"), "0");
assertEquals(properties.get("dbname"), "db");
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) {
e.printStackTrace();
}
}
@Test
public void testConnectWithJdbcURL() {
final String url = "jdbc:TAOS://localhost:6030/log?user=root&password=taosdata";
try {
if (islibLoaded && isTaosdActived) {
conn = DriverManager.getConnection(url);
assertNotNull("failure - connection should not be null", conn);
}
} catch (SQLException e) {
e.printStackTrace();
fail("failure - should not throw Exception");
}
}
@Test
public void testConnectWithProperties() {
final String jdbcUrl = "jdbc:TAOS://localhost:6030/log?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
try {
if (islibLoaded && isTaosdActived) {
conn = DriverManager.getConnection(jdbcUrl, connProps);
assertNotNull("failure - connection should not be null", conn);
}
} catch (SQLException e) {
e.printStackTrace();
fail("failure - should not throw Exception");
}
}
@Test
public void testConnectWithConfigFile() {
String jdbcUrl = "jdbc:TAOS://:/log?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
try {
if (islibLoaded && isTaosdActived) {
conn = DriverManager.getConnection(jdbcUrl, connProps);
assertNotNull("failure - connection should not be null", conn);
}
} catch (SQLException e) {
e.printStackTrace();
fail("failure - should not throw Exception");
}
}
@Test(expected = SQLException.class)
public void testAcceptsURL() throws SQLException {
Driver driver = new TSDBDriver();
for (String url : validURLs) {
assertTrue("failure - acceptsURL(\" " + url + " \") should be true", driver.acceptsURL(url));
}
driver.acceptsURL(null);
fail("acceptsURL throws exception when parameter is null");
}
@Test
public void testParseURL() {
TSDBDriver driver = new TSDBDriver();
String url = "jdbc:TAOS://127.0.0.1:0/db?user=root&password=taosdata&charset=UTF-8";
Properties config = new Properties();
Properties actual = driver.parseURL(url, config);
assertEquals("failure - host should be 127.0.0.1", "127.0.0.1", actual.get("host"));
assertEquals("failure - port should be 0", "0", actual.get("port"));
assertEquals("failure - dbname should be db", "db", actual.get("dbname"));
assertEquals("failure - user should be root", "root", actual.get("user"));
assertEquals("failure - password should be taosdata", "taosdata", actual.get("password"));
assertEquals("failure - charset should be UTF-8", "UTF-8", actual.get("charset"));
url = "jdbc:TAOS://127.0.0.1:0";
config = new Properties();
actual = driver.parseURL(url, config);
assertEquals("failure - host should be 127.0.0.1", "127.0.0.1", actual.getProperty("host"));
assertEquals("failure - port should be 0", "0", actual.get("port"));
assertNull("failure - dbname should be null", actual.get("dbname"));
url = "jdbc:TAOS://127.0.0.1:0/db";
config = new Properties();
actual = driver.parseURL(url, config);
assertEquals("failure - host should be 127.0.0.1", "127.0.0.1", actual.getProperty("host"));
assertEquals("failure - port should be 0", "0", actual.get("port"));
assertEquals("failure - dbname should be db", "db", actual.get("dbname"));
url = "jdbc:TAOS://:/?";
config = new Properties();
config.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
config.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
actual = driver.parseURL(url, config);
assertEquals("failure - user should be root", "root", actual.getProperty("user"));
assertEquals("failure - password should be taosdata", "taosdata", actual.getProperty("password"));
assertNull("failure - host should be null", actual.getProperty("host"));
assertNull("failure - port should be null", actual.getProperty("port"));
assertNull("failure - dbname should be null", actual.getProperty("dbname"));
}
@Test
public void testGetPropertyInfo() throws SQLException {
Driver driver = new TSDBDriver();
final String url = "jdbc:TAOS://localhost:6030/log?user=root&password=taosdata";
Properties connProps = new Properties();
DriverPropertyInfo[] propertyInfo = driver.getPropertyInfo(url, connProps);
for (DriverPropertyInfo info : propertyInfo) {
if (info.name.equals(TSDBDriver.PROPERTY_KEY_HOST))
assertEquals("failure - host should be localhost", "localhost", info.value);
if (info.name.equals(TSDBDriver.PROPERTY_KEY_PORT))
assertEquals("failure - port should be 6030", "6030", info.value);
if (info.name.equals(TSDBDriver.PROPERTY_KEY_DBNAME))
assertEquals("failure - dbname should be test", "log", info.value);
if (info.name.equals(TSDBDriver.PROPERTY_KEY_USER))
assertEquals("failure - user should be root", "root", info.value);
if (info.name.equals(TSDBDriver.PROPERTY_KEY_PASSWORD))
assertEquals("failure - password should be root", "taosdata", info.value);
}
}
@Test
public void testGetMajorVersion() {
assertEquals("failure - getMajorVersion should be 2", 2, new TSDBDriver().getMajorVersion());
}
@Test
public void testGetMinorVersion() {
assertEquals("failure - getMinorVersion should be 0", 0, new TSDBDriver().getMinorVersion());
}
@Test
public void testJdbcCompliant() {
assertFalse("failure - jdbcCompliant should be false", new TSDBDriver().jdbcCompliant());
}
@Test
public void testGetParentLogger() throws SQLFeatureNotSupportedException {
assertNull("failure - getParentLogger should be be null", new TSDBDriver().getParentLogger());
}
}

View File

@ -0,0 +1,36 @@
package com.taosdata.jdbc.cases;
import org.junit.Test;
import java.sql.*;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.concurrent.TimeUnit;
public class FailOverTest {
private static final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss");
@Test
public void testFailOver() throws ClassNotFoundException {
Class.forName("com.taosdata.jdbc.TSDBDriver");
final String url = "jdbc:TAOS://:/?user=root&password=taosdata";
long end = System.currentTimeMillis() + 1000 * 60 * 5;
while (System.currentTimeMillis() < end) {
try (Connection conn = DriverManager.getConnection(url)) {
Statement stmt = conn.createStatement();
ResultSet resultSet = stmt.executeQuery("select server_status()");
resultSet.next();
int status = resultSet.getInt("server_status()");
System.out.println(">>>>>>>>>" + sdf.format(new Date()) + " status : " + status);
stmt.close();
TimeUnit.SECONDS.sleep(5);
} catch (SQLException | InterruptedException e) {
e.printStackTrace();
}
}
}
}

View File

@ -0,0 +1,40 @@
package com.taosdata.jdbc.rs;
import org.junit.Assert;
import org.junit.Test;
import java.sql.*;
public class RestfulDriverTest {
@Test
public void testCase001() {
try {
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://master:6041/?user=root&password=taosdata");
Statement statement = connection.createStatement();
ResultSet resultSet = statement.executeQuery("select * from log.log");
ResultSetMetaData metaData = resultSet.getMetaData();
while (resultSet.next()) {
for (int i = 1; i <= metaData.getColumnCount(); i++) {
String column = metaData.getColumnLabel(i);
String value = resultSet.getString(i);
System.out.print(column + ":" + value + "\t");
}
System.out.println();
}
statement.close();
connection.close();
} catch (SQLException | ClassNotFoundException e) {
e.printStackTrace();
}
}
@Test
public void testAcceptUrl() throws SQLException {
Driver driver = new RestfulDriver();
boolean isAccept = driver.acceptsURL("jdbc:TAOS-RS://master:6041");
Assert.assertTrue(isAccept);
}
}

View File

@ -3,7 +3,6 @@ PROJECT(TDengine)
IF (TD_LINUX_64)
find_program(HAVE_ODBCINST NAMES odbcinst)
IF (HAVE_ODBCINST)
include(CheckSymbolExists)
# shall we revert CMAKE_REQUIRED_LIBRARIES and how?
@ -14,20 +13,43 @@ IF (TD_LINUX_64)
message(WARNING "unixodbc-dev is not installed yet, you may install it under ubuntu by typing: sudo apt install unixodbc-dev")
else ()
message(STATUS "unixodbc/unixodbc-dev are installed, and odbc connector will be built")
AUX_SOURCE_DIRECTORY(src SRC)
# generate dynamic library (*.so)
ADD_LIBRARY(todbc SHARED ${SRC})
SET_TARGET_PROPERTIES(todbc PROPERTIES CLEAN_DIRECT_OUTPUT 1)
SET_TARGET_PROPERTIES(todbc PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1)
TARGET_LINK_LIBRARIES(todbc taos)
install(CODE "execute_process(COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/src/install.sh ${CMAKE_BINARY_DIR})")
ADD_SUBDIRECTORY(tests)
find_package(FLEX)
if(NOT FLEX_FOUND)
message(FATAL_ERROR "you need to install flex first")
else ()
if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_LESS 5.0.0)
message(WARNING "gcc 4.8.0 will complain too much about flex-generated code, we just bypass building ODBC driver in such case")
else ()
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wconversion")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wconversion")
ADD_SUBDIRECTORY(src)
ADD_SUBDIRECTORY(tools)
ADD_SUBDIRECTORY(tests)
endif ()
endif()
endif()
ELSE ()
message(WARNING "unixodbc is not installed yet, you may install it under ubuntu by typing: sudo apt install unixodbc")
ENDIF ()
ENDIF ()
IF (TD_WINDOWS_64)
find_package(ODBC)
if (NOT ODBC_FOUND)
message(FATAL_ERROR "you need to install ODBC first")
else ()
message(STATUS "ODBC_INCLUDE_DIRS: ${ODBC_INCLUDE_DIRS}")
message(STATUS "ODBC_LIBRARIES: ${ODBC_LIBRARIES}")
message(STATUS "ODBC_CONFIG: ${ODBC_CONFIG}")
endif ()
find_package(FLEX)
if(NOT FLEX_FOUND)
message(WARNING "you need to install flex first\n"
"you may go to: https://github.com/lexxmark/winflexbison\n"
"or download from: https://github.com/lexxmark/winflexbison/releases")
else ()
ADD_SUBDIRECTORY(src)
ADD_SUBDIRECTORY(tools)
ADD_SUBDIRECTORY(tests)
endif()
ENDIF ()

View File

@ -0,0 +1,88 @@
# ODBC Driver #
- **very initial implementation of ODBC driver for TAOS
- **currently partially supported ODBC functions are: `
SQLAllocEnv
SQLFreeEnv
SQLAllocConnect
SQLFreeConnect
SQLConnect
SQLDisconnect
SQLAllocStmt
SQLAllocHandle
SQLFreeStmt
SQLExecDirect
SQLExecDirectW
SQLNumResultCols
SQLRowCount
SQLColAttribute
SQLGetData
SQLFetch
SQLPrepare
SQLExecute
SQLGetDiagField
SQLGetDiagRec
SQLBindParameter
SQLDriverConnect
SQLSetConnectAttr
SQLDescribeCol
SQLNumParams
SQLSetStmtAttr
ConfigDSN
`
- **internationalized, you can specify different charset/code page for easy going. eg.: insert `utf-8.zh_cn` characters into database located in linux machine, while query them out in `gb2312/gb18030/...` code page in your chinese windows machine, or vice-versa. and much fun, insert `gb2312/gb18030/...` characters into database located in linux box from
your japanese windows box, and query them out in your local chinese windows machine.
- **enable ODBC-aware software to communicate with TAOS.
- **enable any language with ODBC-bindings/ODBC-plugings to communicate with TAOS
- **still going on...
# Building and Testing
**Note**: all `work` is done in TDengine's project directory
# Building under Linux, use Ubuntu as example
```
sudo apt install unixodbc unixodbc-dev flex
rm -rf debug && cmake -B debug && cmake --build debug && cmake --install debug && echo yes
```
# Building under Windows, use Windows 10 as example
- install windows `flex` port. We use [https://github.com/lexxmark/winflexbison](url) at the moment. Please be noted to append `<path_to_win_flex.exe>` to your `PATH`.
- install Microsoft Visual Studio, take VS2015 as example here
- `"C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64`
- `rmdir /s /q debug`
- `cmake -G "NMake Makefiles" -B debug`
- `cmake --build debug`
- `cmake --install debug`
- open your `Command Prompt` with Administrator's priviledge
- remove previously installed TAOS ODBC driver: run `C:\TDengine\todbcinst -u -f -n TAOS`
- install TAOS ODBC driver that was just built: run `C:\TDengine\todbcinst -i -n TAOS -p C:\TDengine\driver`
- add a new user dsn: run `odbcconf CONFIGDSN TAOS "DSN=TAOS_DSN|Server=<fqdn>:<port>`
# Test
we highly suggest that you build both in linux(ubuntu) and windows(windows 10) platform, because currently TAOS only has it's server-side port on linux platform.
**Note1**: content within <> shall be modified to match your environment
**Note2**: `.stmts` source files are all encoded in `UTF-8`
## start taosd in linux, suppose charset is `UTF-8` as default
```
taosd -c ./debug/test/cfg
```
## create data in linux
```
./debug/build/bin/tcodbc --dsn TAOS_DSN --uid <uid> --pwd <pwd> --sts ./src/connector/odbc/tests/create_data.stmts
--<or with driver connection string -->
./debug/build/bin/tcodbc --dcs 'Driver=TAOS;UID=<uid>;PWD=<pwd>;Server=<fqdn>:<port>;client_enc=UTF-8' ./src/connector/odbc/tests/create_data.stmts
```
## query data in windows
```
.\debug\build\bin\tcodbc --dsn TAOS_DSN --uid <uid> --pwd <pwd> --sts .\src\connector\odbc\tests\query_data.stmts
--<or with driver connection string -->
.\debug\build\bin\tcodbc --dcs "Driver=TAOS;UID=<uid>;PWD=<pwd>;Server=<fqdn>:<port>;client_enc=UTF-8" .\src\connector\odbc\tests\query_data.stmts
```

View File

@ -0,0 +1,54 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
IF (TD_LINUX_64)
FLEX_TARGET(todbcFlexScanner
todbc_scanner.l
${CMAKE_CURRENT_BINARY_DIR}/todbc_scanner.c
)
set(todbc_flex_scanner_src
${FLEX_todbcFlexScanner_OUTPUTS}
)
AUX_SOURCE_DIRECTORY(. SRC)
# generate dynamic library (*.so)
ADD_LIBRARY(todbc SHARED ${SRC} ${todbc_flex_scanner_src})
SET_TARGET_PROPERTIES(todbc PROPERTIES CLEAN_DIRECT_OUTPUT 1)
SET_TARGET_PROPERTIES(todbc PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1)
TARGET_LINK_LIBRARIES(todbc taos odbcinst)
target_include_directories(todbc PUBLIC .)
install(CODE "execute_process(COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/install.sh ${CMAKE_BINARY_DIR})")
ENDIF ()
IF (TD_WINDOWS_64)
FLEX_TARGET(todbcFlexScanner
todbc_scanner.l
${CMAKE_CURRENT_BINARY_DIR}/todbc_scanner.c
)
set(todbc_flex_scanner_src
${FLEX_todbcFlexScanner_OUTPUTS}
)
AUX_SOURCE_DIRECTORY(. SRC)
# generate dynamic library (*.dll)
ADD_LIBRARY(todbc SHARED
${SRC}
${todbc_flex_scanner_src}
${CMAKE_CURRENT_BINARY_DIR}/todbc.rc
todbc.def)
TARGET_LINK_LIBRARIES(todbc taos_static odbccp32 legacy_stdio_definitions)
target_include_directories(todbc PUBLIC .)
target_compile_definitions(todbc PRIVATE "todbc_EXPORT")
CONFIGURE_FILE("todbc.rc.in"
"${CMAKE_CURRENT_BINARY_DIR}/todbc.rc")
SET_TARGET_PROPERTIES(todbc PROPERTIES LINK_FLAGS
/DEF:todbc.def)
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /GL")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /GL")
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/todbc.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/todbc.exp DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/todbc.dll DESTINATION driver)
ENDIF ()

View File

@ -9,16 +9,18 @@ rm -f "${BLD_DIR}/template.dsn"
cat > "${BLD_DIR}/template.ini" <<EOF
[TAOS]
Description = taos odbc driver
Driver = ${BLD_DIR}/build/lib/libtodbc.so
Description=taos odbc driver
Driver=${BLD_DIR}/build/lib/libtodbc.so
EOF
cat > "${BLD_DIR}/template.dsn" <<EOF
[TAOS_DSN]
Description=Connection to TAOS
Driver=TAOS
Server=localhost:6030
EOF
# better remove first ?
sudo odbcinst -i -d -f "${BLD_DIR}/template.ini" &&
odbcinst -i -s -f "${BLD_DIR}/template.dsn" &&
echo "odbc install done"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,31 @@
EXPORTS
SQLAllocEnv
SQLFreeEnv
SQLAllocConnect
SQLFreeConnect
SQLConnect
SQLDisconnect
SQLAllocStmt
SQLAllocHandle
SQLFreeStmt
SQLExecDirect
SQLExecDirectW
SQLNumResultCols
SQLRowCount
SQLColAttribute
SQLGetData
SQLFetch
SQLPrepare
SQLExecute
SQLGetDiagField
SQLGetDiagRec
SQLBindParameter
SQLDriverConnect
SQLSetConnectAttr
SQLDescribeCol
SQLNumParams
SQLSetStmtAttr
ConfigDSN
ConfigTranslator
ConfigDriver

View File

@ -0,0 +1,31 @@
1 VERSIONINFO
FILEVERSION ${TD_VER_NUMBER}
PRODUCTVERSION ${TD_VER_NUMBER}
FILEFLAGSMASK 0x17L
#ifdef _DEBUG
FILEFLAGS 0x1L
#else
FILEFLAGS 0x0L
#endif
FILEOS 0x4L
FILETYPE 0x0L
FILESUBTYPE 0x0L
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904b0"
BEGIN
VALUE "FileDescription", "ODBC Driver for TDengine"
VALUE "FileVersion", "${TD_VER_NUMBER}"
VALUE "InternalName", "todbc.dll(${TD_VER_CPUTYPE})"
VALUE "LegalCopyright", "Copyright (C) 2020 TAOS Data"
VALUE "OriginalFilename", ""
VALUE "ProductName", "todbc.dll(${TD_VER_CPUTYPE})"
VALUE "ProductVersion", "${TD_VER_NUMBER}"
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1200
END
END

View File

@ -0,0 +1,5 @@
INSTALLDRIVER "TAOS ODBC|Driver=todbc.dll|FileUsage=0|ConnectFunctions=YYN"

View File

@ -0,0 +1,660 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "todbc_conv.h"
#include "todbc_log.h"
#include <errno.h>
#include <limits.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
const char* tsdb_conv_code_str(TSDB_CONV_CODE code) {
switch (code) {
case TSDB_CONV_OK: return "TSDB_CONV_OK";
case TSDB_CONV_NOT_AVAIL: return "TSDB_CONV_NOT_AVAIL";
case TSDB_CONV_OOM: return "TSDB_CONV_OOM";
case TSDB_CONV_OOR: return "TSDB_CONV_OOR";
case TSDB_CONV_TRUNC_FRACTION: return "TSDB_CONV_TRUNC_FRACTION";
case TSDB_CONV_TRUNC: return "TSDB_CONV_TRUNC";
case TSDB_CONV_CHAR_NOT_NUM: return "TSDB_CONV_CHAR_NOT_NUM";
case TSDB_CONV_CHAR_NOT_TS: return "TSDB_CONV_CHAR_NOT_TS";
case TSDB_CONV_NOT_VALID_TS: return "TSDB_CONV_NOT_VALID_TS";
case TSDB_CONV_GENERAL: return "TSDB_CONV_GENERAL";
case TSDB_CONV_SRC_TOO_LARGE: return "TSDB_CONV_SRC_TOO_LARGE";
case TSDB_CONV_SRC_BAD_SEQ: return "TSDB_CONV_SRC_BAD_SEQ";
case TSDB_CONV_SRC_INCOMPLETE: return "TSDB_CONV_SRC_INCOMPLETE";
case TSDB_CONV_SRC_GENERAL: return "TSDB_CONV_SRC_GENERAL";
case TSDB_CONV_BAD_CHAR: return "TSDB_CONV_BAD_CHAR";
default: return "UNKNOWN";
};
}
// src: int
TSDB_CONV_CODE tsdb_int64_to_bit(int64_t src, int8_t *dst) {
*dst = (int8_t)src;
if (src==0 || src==1) return TSDB_CONV_OK;
return TSDB_CONV_OOR;
}
TSDB_CONV_CODE tsdb_int64_to_tinyint(int64_t src, int8_t *dst) {
*dst = (int8_t)src;
if (src == *dst) return TSDB_CONV_OK;
return TSDB_CONV_OOR;
}
TSDB_CONV_CODE tsdb_int64_to_smallint(int64_t src, int16_t *dst) {
*dst = (int16_t)src;
if (src == *dst) return TSDB_CONV_OK;
return TSDB_CONV_OOR;
}
TSDB_CONV_CODE tsdb_int64_to_int(int64_t src, int32_t *dst) {
*dst = (int32_t)src;
if (src == *dst) return TSDB_CONV_OK;
return TSDB_CONV_OOR;
}
TSDB_CONV_CODE tsdb_int64_to_bigint(int64_t src, int64_t *dst) {
*dst = src;
return TSDB_CONV_OK;
}
TSDB_CONV_CODE tsdb_int64_to_ts(int64_t src, int64_t *dst) {
*dst = src;
time_t t = (time_t)(src / 1000);
struct tm tm = {0};
if (localtime_r(&t, &tm)) return TSDB_CONV_OK;
return TSDB_CONV_OOR;
}
TSDB_CONV_CODE tsdb_int64_to_float(int64_t src, float *dst) {
*dst = (float)src;
int64_t v = (int64_t)*dst;
if (v==src) return TSDB_CONV_OK;
return TSDB_CONV_OOR;
}
TSDB_CONV_CODE tsdb_int64_to_double(int64_t src, double *dst) {
*dst = (double)src;
int64_t v = (int64_t)*dst;
if (v==src) return TSDB_CONV_OK;
return TSDB_CONV_OOR;
}
TSDB_CONV_CODE tsdb_int64_to_char(int64_t src, char *dst, size_t dlen) {
int n = snprintf(dst, dlen, "%" PRId64 "", src);
DASSERT(n>=0);
if (n<dlen) return TSDB_CONV_OK;
return TSDB_CONV_TRUNC;
}
// src: double
TSDB_CONV_CODE tsdb_double_to_bit(double src, int8_t *dst) {
*dst = (int8_t)src;
if (src<0 || src>=2) return TSDB_CONV_OOR;
if (src == *dst) return TSDB_CONV_OK;
int64_t v = (int64_t)src;
if (v == *dst) return TSDB_CONV_TRUNC_FRACTION;
return TSDB_CONV_TRUNC;
}
TSDB_CONV_CODE tsdb_double_to_tinyint(double src, int8_t *dst) {
*dst = (int8_t)src;
if (src<SCHAR_MIN || src>SCHAR_MAX) return TSDB_CONV_OOR;
if (src == *dst) return TSDB_CONV_OK;
int64_t v = (int64_t)src;
if (v == *dst) return TSDB_CONV_TRUNC_FRACTION;
return TSDB_CONV_TRUNC;
}
TSDB_CONV_CODE tsdb_double_to_smallint(double src, int16_t *dst) {
*dst = (int16_t)src;
if (src<SHRT_MIN || src>SHRT_MAX) return TSDB_CONV_OOR;
if (src == *dst) return TSDB_CONV_OK;
int64_t v = (int64_t)src;
if (v == *dst) return TSDB_CONV_TRUNC_FRACTION;
return TSDB_CONV_TRUNC;
}
TSDB_CONV_CODE tsdb_double_to_int(double src, int32_t *dst) {
*dst = (int32_t)src;
if (src<LONG_MIN || src>LONG_MAX) return TSDB_CONV_OOR;
if (src == *dst) return TSDB_CONV_OK;
int64_t v = (int64_t)src;
if (v == *dst) return TSDB_CONV_TRUNC_FRACTION;
return TSDB_CONV_TRUNC;
}
TSDB_CONV_CODE tsdb_double_to_bigint(double src, int64_t *dst) {
*dst = (int64_t)src;
if (src<LLONG_MIN || src>LLONG_MAX) return TSDB_CONV_OOR;
if (src == *dst) return TSDB_CONV_OK;
int64_t v = (int64_t)src;
if (v == *dst) return TSDB_CONV_TRUNC_FRACTION;
return TSDB_CONV_TRUNC;
}
TSDB_CONV_CODE tsdb_double_to_ts(double src, int64_t *dst) {
TSDB_CONV_CODE code = tsdb_double_to_bigint(src, dst);
if (code==TSDB_CONV_OK || code==TSDB_CONV_TRUNC_FRACTION) {
int64_t v = (int64_t)src;
time_t t = (time_t)(v / 1000);
struct tm tm = {0};
if (localtime_r(&t, &tm)) return TSDB_CONV_OK;
return TSDB_CONV_OOR;
}
return code;
}
TSDB_CONV_CODE tsdb_double_to_char(double src, char *dst, size_t dlen) {
int n = snprintf(dst, dlen, "%lg", src);
DASSERT(n>=0);
if (n<dlen) return TSDB_CONV_OK;
return TSDB_CONV_TRUNC;
}
// src: SQL_TIMESTAMP_STRUCT
TSDB_CONV_CODE tsdb_timestamp_to_char(SQL_TIMESTAMP_STRUCT src, char *dst, size_t dlen) {
int n = snprintf(dst, dlen, "%04d-%02d-%02d %02d:%02d:%02d.%03d",
src.year, src.month, src.day,
src.hour, src.minute, src.second,
src.fraction / 1000000);
DASSERT(n>=0);
if (n<dlen) return TSDB_CONV_OK;
if (strlen(dst)>=19) return TSDB_CONV_TRUNC_FRACTION;
return TSDB_CONV_TRUNC;
}
// src: chars
TSDB_CONV_CODE tsdb_chars_to_bit(const char *src, size_t smax, int8_t *dst) {
if (strcmp(src, "0")==0) {
*dst = 0;
return TSDB_CONV_OK;
}
if (strcmp(src, "1")==0) {
*dst = 1;
return TSDB_CONV_OK;
}
double v;
int bytes;
int n = sscanf(src, "%lg%n", &v, &bytes);
if (n!=1) return TSDB_CONV_CHAR_NOT_NUM;
if (bytes!=strlen(src)) return TSDB_CONV_CHAR_NOT_NUM;
if (v<0 || v>=2) return TSDB_CONV_OOR;
return TSDB_CONV_TRUNC_FRACTION;
}
TSDB_CONV_CODE tsdb_chars_to_tinyint(const char *src, size_t smax, int8_t *dst) {
int64_t v;
TSDB_CONV_CODE code = tsdb_chars_to_bigint(src, smax, &v);
if (code!=TSDB_CONV_OK) return code;
*dst = (int8_t)v;
if (v==*dst) return TSDB_CONV_OK;
return TSDB_CONV_OOR;
}
TSDB_CONV_CODE tsdb_chars_to_smallint(const char *src, size_t smax, int16_t *dst) {
int64_t v;
TSDB_CONV_CODE code = tsdb_chars_to_bigint(src, smax, &v);
if (code!=TSDB_CONV_OK) return code;
*dst = (int16_t)v;
if (v==*dst) return TSDB_CONV_OK;
return TSDB_CONV_OOR;
}
TSDB_CONV_CODE tsdb_chars_to_int(const char *src, size_t smax, int32_t *dst) {
int64_t v;
TSDB_CONV_CODE code = tsdb_chars_to_bigint(src, smax, &v);
if (code!=TSDB_CONV_OK) return code;
*dst = (int32_t)v;
if (v==*dst) return TSDB_CONV_OK;
return TSDB_CONV_OOR;
}
TSDB_CONV_CODE tsdb_chars_to_bigint(const char *src, size_t smax, int64_t *dst) {
int bytes;
int n = sscanf(src, "%" PRId64 "%n", dst, &bytes);
if (n!=1) return TSDB_CONV_CHAR_NOT_NUM;
if (bytes==strlen(src)) {
return TSDB_CONV_OK;
}
double v;
n = sscanf(src, "%lg%n", &v, &bytes);
if (n!=1) return TSDB_CONV_CHAR_NOT_NUM;
if (bytes==strlen(src)) {
return TSDB_CONV_TRUNC_FRACTION;
}
return TSDB_CONV_OK;
}
TSDB_CONV_CODE tsdb_chars_to_ts(const char *src, size_t smax, int64_t *dst) {
int64_t v;
TSDB_CONV_CODE code = tsdb_chars_to_bigint(src, smax, &v);
if (code!=TSDB_CONV_OK) return code;
*dst = v;
if (v==*dst) {
time_t t = (time_t)(v / 1000);
struct tm tm = {0};
if (localtime_r(&t, &tm)) return TSDB_CONV_OK;
}
return TSDB_CONV_OOR;
}
TSDB_CONV_CODE tsdb_chars_to_float(const char *src, size_t smax, float *dst) {
int bytes;
int n = sscanf(src, "%g%n", dst, &bytes);
if (n==1 && bytes==strlen(src)) {
return TSDB_CONV_OK;
}
return TSDB_CONV_CHAR_NOT_NUM;
}
TSDB_CONV_CODE tsdb_chars_to_double(const char *src, size_t smax, double *dst) {
int bytes;
int n = sscanf(src, "%lg%n", dst, &bytes);
if (n==1 && bytes==strlen(src)) {
return TSDB_CONV_OK;
}
return TSDB_CONV_CHAR_NOT_NUM;
}
TSDB_CONV_CODE tsdb_chars_to_timestamp(const char *src, size_t smax, SQL_TIMESTAMP_STRUCT *dst) {
int64_t v = 0;
// why cast to 'char*' ?
int r = taosParseTime((char*)src, &v, (int32_t)smax, TSDB_TIME_PRECISION_MILLI, 0);
if (r) {
return TSDB_CONV_CHAR_NOT_TS;
}
time_t t = v/1000;
struct tm vtm = {0};
localtime_r(&t, &vtm);
dst->year = (SQLSMALLINT)(vtm.tm_year + 1900);
dst->month = (SQLUSMALLINT)(vtm.tm_mon + 1);
dst->day = (SQLUSMALLINT)(vtm.tm_mday);
dst->hour = (SQLUSMALLINT)(vtm.tm_hour);
dst->minute = (SQLUSMALLINT)(vtm.tm_min);
dst->second = (SQLUSMALLINT)(vtm.tm_sec);
dst->fraction = (SQLUINTEGER)(v%1000 * 1000000);
return TSDB_CONV_OK;
}
TSDB_CONV_CODE tsdb_chars_to_timestamp_ts(const char *src, size_t smax, int64_t *dst) {
// why cast to 'char*' ?
int r = taosParseTime((char*)src, dst, (int32_t)smax, TSDB_TIME_PRECISION_MILLI, 0);
if (r) {
return TSDB_CONV_CHAR_NOT_TS;
}
return TSDB_CONV_OK;
}
TSDB_CONV_CODE tsdb_chars_to_char(const char *src, size_t smax, char *dst, size_t dmax) {
int n = snprintf(dst, dmax, "%s", src);
DASSERT(n>=0);
if (n<dmax) return TSDB_CONV_OK;
return TSDB_CONV_TRUNC;
}
char* stack_buffer_alloc(stack_buffer_t *buffer, size_t bytes) {
if (!buffer) return NULL;
// align-by-size_of-size_t-bytes
if (bytes==0) bytes = sizeof(size_t);
bytes = (bytes + sizeof(size_t) - 1) / sizeof(size_t) * sizeof(size_t);
size_t next = buffer->next + bytes;
if (next>sizeof(buffer->buf)) return NULL;
char *p = buffer->buf + buffer->next;
buffer->next = next;
return p;
}
int is_owned_by_stack_buffer(stack_buffer_t *buffer, const char *ptr) {
if (!buffer) return 0;
if (ptr>=buffer->buf && ptr<buffer->buf+buffer->next) return 1;
return 0;
}
struct tsdb_conv_s {
iconv_t cnv;
unsigned int direct:1;
};
static tsdb_conv_t no_conversion = {0};
static pthread_once_t once = PTHREAD_ONCE_INIT;
static void once_init(void) {
no_conversion.cnv = (iconv_t)-1;
no_conversion.direct = 1;
}
tsdb_conv_t* tsdb_conv_direct() { // get a non-conversion-converter
pthread_once(&once, once_init);
return &no_conversion;
}
tsdb_conv_t* tsdb_conv_open(const char *from_enc, const char *to_enc) {
pthread_once(&once, once_init);
tsdb_conv_t *cnv = (tsdb_conv_t*)calloc(1, sizeof(*cnv));
if (!cnv) return NULL;
if (strcmp(from_enc, to_enc)==0 && 0) {
cnv->cnv = (iconv_t)-1;
cnv->direct = 1;
return cnv;
}
cnv->cnv = iconv_open(to_enc, from_enc);
if (cnv->cnv == (iconv_t)-1) {
free(cnv);
return NULL;
}
cnv->direct = 0;
return cnv;
}
void tsdb_conv_close(tsdb_conv_t *cnv) {
if (!cnv) return;
if (cnv == &no_conversion) return;
if (!cnv->direct) {
if (cnv->cnv != (iconv_t)-1) {
iconv_close(cnv->cnv);
}
}
cnv->cnv = (iconv_t)-1;
cnv->direct = 0;
free(cnv);
}
TSDB_CONV_CODE tsdb_conv_write(tsdb_conv_t *cnv, const char *src, size_t *slen, char *dst, size_t *dlen) {
if (!cnv) return TSDB_CONV_NOT_AVAIL;
if (cnv->direct) {
size_t n = (*slen > *dlen) ? *dlen : *slen;
memcpy(dst, src, n);
*slen -= n;
*dlen -= n;
if (*dlen) dst[n] = '\0';
return TSDB_CONV_OK;
}
if (!cnv->cnv) return TSDB_CONV_NOT_AVAIL;
size_t r = iconv(cnv->cnv, (char**)&src, slen, &dst, dlen);
if (r==(size_t)-1) return TSDB_CONV_BAD_CHAR;
if (*slen) return TSDB_CONV_TRUNC;
if (*dlen) *dst = '\0';
return TSDB_CONV_OK;
}
TSDB_CONV_CODE tsdb_conv_write_int64(tsdb_conv_t *cnv, int64_t val, char *dst, size_t *dlen) {
char utf8[64];
int n = snprintf(utf8, sizeof(utf8), "%" PRId64 "", val);
DASSERT(n>=0);
DASSERT(n<sizeof(utf8));
size_t len = (size_t)n;
TSDB_CONV_CODE code = tsdb_conv_write(cnv, utf8, &len, dst, dlen);
*dlen = (size_t)n+1;
return code;
}
TSDB_CONV_CODE tsdb_conv_write_double(tsdb_conv_t *cnv, double val, char *dst, size_t *dlen) {
char utf8[256];
int n = snprintf(utf8, sizeof(utf8), "%g", val);
DASSERT(n>=0);
DASSERT(n<sizeof(utf8));
size_t len = (size_t)n;
TSDB_CONV_CODE code = tsdb_conv_write(cnv, utf8, &len, dst, dlen);
*dlen = (size_t)n+1;
return code;
}
TSDB_CONV_CODE tsdb_conv_write_timestamp(tsdb_conv_t *cnv, SQL_TIMESTAMP_STRUCT val, char *dst, size_t *dlen) {
char utf8[256];
int n = snprintf(utf8, sizeof(utf8), "%04d-%02d-%02d %02d:%02d:%02d.%03d",
val.year, val.month, val.day,
val.hour, val.minute, val.second,
val.fraction / 1000000);
DASSERT(n>=0);
DASSERT(n<sizeof(utf8));
size_t len = (size_t)n;
TSDB_CONV_CODE code = tsdb_conv_write(cnv, utf8, &len, dst, dlen);
*dlen = (size_t)n+1;
return code;
}
TSDB_CONV_CODE tsdb_conv_chars_to_bit(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int8_t *dst) {
const char *utf8 = NULL;
TSDB_CONV_CODE code = tsdb_conv(cnv, buffer, src, slen, &utf8, NULL);
if (code) return code;
code = tsdb_chars_to_bit(utf8, sizeof(utf8), dst);
tsdb_conv_free(cnv, utf8, buffer, src);
return code;
}
TSDB_CONV_CODE tsdb_conv_chars_to_tinyint(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int8_t *dst) {
const char *utf8 = NULL;
TSDB_CONV_CODE code = tsdb_conv(cnv, buffer, src, slen, &utf8, NULL);
if (code) return code;
code = tsdb_chars_to_tinyint(utf8, sizeof(utf8), dst);
tsdb_conv_free(cnv, utf8, buffer, src);
return code;
}
TSDB_CONV_CODE tsdb_conv_chars_to_smallint(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int16_t *dst) {
const char *utf8 = NULL;
TSDB_CONV_CODE code = tsdb_conv(cnv, buffer, src, slen, &utf8, NULL);
if (code) return code;
code = tsdb_chars_to_smallint(utf8, sizeof(utf8), dst);
tsdb_conv_free(cnv, utf8, buffer, src);
return code;
}
TSDB_CONV_CODE tsdb_conv_chars_to_int(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int32_t *dst) {
const char *utf8 = NULL;
TSDB_CONV_CODE code = tsdb_conv(cnv, buffer, src, slen, &utf8, NULL);
if (code) return code;
code = tsdb_chars_to_int(utf8, sizeof(utf8), dst);
tsdb_conv_free(cnv, utf8, buffer, src);
return code;
}
TSDB_CONV_CODE tsdb_conv_chars_to_bigint(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int64_t *dst) {
const char *utf8 = NULL;
TSDB_CONV_CODE code = tsdb_conv(cnv, buffer, src, slen, &utf8, NULL);
if (code) return code;
code = tsdb_chars_to_bigint(utf8, sizeof(utf8), dst);
tsdb_conv_free(cnv, utf8, buffer, src);
return code;
}
TSDB_CONV_CODE tsdb_conv_chars_to_ts(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int64_t *dst) {
const char *utf8 = NULL;
TSDB_CONV_CODE code = tsdb_conv(cnv, buffer, src, slen, &utf8, NULL);
if (code) return code;
code = tsdb_chars_to_ts(utf8, sizeof(utf8), dst);
tsdb_conv_free(cnv, utf8, buffer, src);
return code;
}
TSDB_CONV_CODE tsdb_conv_chars_to_float(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, float *dst) {
const char *utf8 = NULL;
TSDB_CONV_CODE code = tsdb_conv(cnv, buffer, src, slen, &utf8, NULL);
if (code) return code;
code = tsdb_chars_to_float(utf8, sizeof(utf8), dst);
tsdb_conv_free(cnv, utf8, buffer, src);
return code;
}
TSDB_CONV_CODE tsdb_conv_chars_to_double(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, double *dst) {
const char *utf8 = NULL;
TSDB_CONV_CODE code = tsdb_conv(cnv, buffer, src, slen, &utf8, NULL);
if (code) return code;
code = tsdb_chars_to_double(utf8, sizeof(utf8), dst);
tsdb_conv_free(cnv, utf8, buffer, src);
return code;
}
TSDB_CONV_CODE tsdb_conv_chars_to_timestamp(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, SQL_TIMESTAMP_STRUCT *dst) {
const char *utf8 = NULL;
TSDB_CONV_CODE code = tsdb_conv(cnv, buffer, src, slen, &utf8, NULL);
if (code) return code;
code = tsdb_chars_to_timestamp(utf8, sizeof(utf8), dst);
tsdb_conv_free(cnv, utf8, buffer, src);
return code;
}
TSDB_CONV_CODE tsdb_conv_chars_to_timestamp_ts(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int64_t *dst) {
const char *utf8 = NULL;
TSDB_CONV_CODE code = tsdb_conv(cnv, buffer, src, slen, &utf8, NULL);
if (code) return code;
code = tsdb_chars_to_timestamp_ts(utf8, sizeof(utf8), dst);
tsdb_conv_free(cnv, utf8, buffer, src);
return code;
}
TSDB_CONV_CODE tsdb_conv(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, const char **dst, size_t *dlen) {
if (!cnv) return TSDB_CONV_NOT_AVAIL;
char *buf;
size_t blen;
if (cnv->direct) {
if (src[slen]=='\0') { // access violation?
*dst = src;
if (dlen) *dlen = slen;
return TSDB_CONV_OK;
}
blen = slen + 1;
} else {
blen = (slen + 1) * 4;
}
buf = stack_buffer_alloc(buffer, blen);
if (!buf) {
buf = (char*)malloc(blen);
if (!buf) return TSDB_CONV_OOM;
}
if (cnv->direct) {
size_t n = slen;
DASSERT(blen > n);
memcpy(buf, src, n);
buf[n] = '\0';
*dst = buf;
if (dlen) *dlen = n;
return TSDB_CONV_OK;
}
const char *orig_s = src;
char *orig_d = buf;
size_t orig_blen = blen;
TSDB_CONV_CODE code;
size_t r = iconv(cnv->cnv, (char**)&src, &slen, &buf, &blen);
do {
if (r==(size_t)-1) {
switch(errno) {
case E2BIG: {
code = TSDB_CONV_SRC_TOO_LARGE;
} break;
case EILSEQ: {
code = TSDB_CONV_SRC_BAD_SEQ;
} break;
case EINVAL: {
code = TSDB_CONV_SRC_INCOMPLETE;
} break;
default: {
code = TSDB_CONV_SRC_GENERAL;
} break;
}
break;
}
if (slen) {
code = TSDB_CONV_TRUNC;
break;
}
DASSERT(blen);
*buf = '\0';
*dst = orig_d;
if (dlen) *dlen = orig_blen - blen;
return TSDB_CONV_OK;
} while (0);
if (orig_d!=(char*)orig_s && !is_owned_by_stack_buffer(buffer, orig_d)) free(orig_d);
return code;
}
void tsdb_conv_free(tsdb_conv_t *cnv, const char *ptr, stack_buffer_t *buffer, const char *src) {
if (ptr!=src && !is_owned_by_stack_buffer(buffer, ptr)) free((char*)ptr);
}

View File

@ -0,0 +1,109 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _todbc_conv_h_
#define _todbc_conv_h_
#include "os.h"
#include <iconv.h>
#include <sql.h>
typedef enum {
TSDB_CONV_OK = 0,
TSDB_CONV_NOT_AVAIL,
TSDB_CONV_OOM,
TSDB_CONV_OOR,
TSDB_CONV_TRUNC_FRACTION,
TSDB_CONV_TRUNC,
TSDB_CONV_CHAR_NOT_NUM,
TSDB_CONV_CHAR_NOT_TS,
TSDB_CONV_NOT_VALID_TS,
TSDB_CONV_GENERAL,
TSDB_CONV_BAD_CHAR,
TSDB_CONV_SRC_TOO_LARGE,
TSDB_CONV_SRC_BAD_SEQ,
TSDB_CONV_SRC_INCOMPLETE,
TSDB_CONV_SRC_GENERAL,
} TSDB_CONV_CODE;
const char* tsdb_conv_code_str(TSDB_CONV_CODE code);
typedef struct stack_buffer_s stack_buffer_t;
struct stack_buffer_s {
char buf[1024*16];
size_t next;
};
char* stack_buffer_alloc(stack_buffer_t *buffer, size_t bytes);
int is_owned_by_stack_buffer(stack_buffer_t *buffer, const char *ptr);
typedef struct tsdb_conv_s tsdb_conv_t;
tsdb_conv_t* tsdb_conv_direct(); // get a non-conversion-converter
tsdb_conv_t* tsdb_conv_open(const char *from_enc, const char *to_enc);
void tsdb_conv_close(tsdb_conv_t *cnv);
TSDB_CONV_CODE tsdb_conv_write(tsdb_conv_t *cnv, const char *src, size_t *slen, char *dst, size_t *dlen);
TSDB_CONV_CODE tsdb_conv_write_int64(tsdb_conv_t *cnv, int64_t val, char *dst, size_t *dlen);
TSDB_CONV_CODE tsdb_conv_write_double(tsdb_conv_t *cnv, double val, char *dst, size_t *dlen);
TSDB_CONV_CODE tsdb_conv_write_timestamp(tsdb_conv_t *cnv, SQL_TIMESTAMP_STRUCT val, char *dst, size_t *dlen);
TSDB_CONV_CODE tsdb_conv_chars_to_bit(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int8_t *dst);
TSDB_CONV_CODE tsdb_conv_chars_to_tinyint(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int8_t *dst);
TSDB_CONV_CODE tsdb_conv_chars_to_smallint(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int16_t *dst);
TSDB_CONV_CODE tsdb_conv_chars_to_int(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int32_t *dst);
TSDB_CONV_CODE tsdb_conv_chars_to_bigint(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int64_t *dst);
TSDB_CONV_CODE tsdb_conv_chars_to_ts(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int64_t *dst);
TSDB_CONV_CODE tsdb_conv_chars_to_float(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, float *dst);
TSDB_CONV_CODE tsdb_conv_chars_to_double(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, double *dst);
TSDB_CONV_CODE tsdb_conv_chars_to_timestamp(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, SQL_TIMESTAMP_STRUCT *dst);
TSDB_CONV_CODE tsdb_conv_chars_to_timestamp_ts(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, int64_t *dst);
TSDB_CONV_CODE tsdb_conv(tsdb_conv_t *cnv, stack_buffer_t *buffer, const char *src, size_t slen, const char **dst, size_t *dlen);
void tsdb_conv_free(tsdb_conv_t *cnv, const char *ptr, stack_buffer_t *buffer, const char *src);
TSDB_CONV_CODE tsdb_int64_to_bit(int64_t src, int8_t *dst);
TSDB_CONV_CODE tsdb_int64_to_tinyint(int64_t src, int8_t *dst);
TSDB_CONV_CODE tsdb_int64_to_smallint(int64_t src, int16_t *dst);
TSDB_CONV_CODE tsdb_int64_to_int(int64_t src, int32_t *dst);
TSDB_CONV_CODE tsdb_int64_to_bigint(int64_t src, int64_t *dst);
TSDB_CONV_CODE tsdb_int64_to_ts(int64_t src, int64_t *dst);
TSDB_CONV_CODE tsdb_int64_to_float(int64_t src, float *dst);
TSDB_CONV_CODE tsdb_int64_to_double(int64_t src, double *dst);
TSDB_CONV_CODE tsdb_int64_to_char(int64_t src, char *dst, size_t dlen);
TSDB_CONV_CODE tsdb_double_to_bit(double src, int8_t *dst);
TSDB_CONV_CODE tsdb_double_to_tinyint(double src, int8_t *dst);
TSDB_CONV_CODE tsdb_double_to_smallint(double src, int16_t *dst);
TSDB_CONV_CODE tsdb_double_to_int(double src, int32_t *dst);
TSDB_CONV_CODE tsdb_double_to_bigint(double src, int64_t *dst);
TSDB_CONV_CODE tsdb_double_to_ts(double src, int64_t *dst);
TSDB_CONV_CODE tsdb_double_to_char(double src, char *dst, size_t dlen);
TSDB_CONV_CODE tsdb_timestamp_to_char(SQL_TIMESTAMP_STRUCT src, char *dst, size_t dlen);
TSDB_CONV_CODE tsdb_chars_to_bit(const char *src, size_t smax, int8_t *dst);
TSDB_CONV_CODE tsdb_chars_to_tinyint(const char *src, size_t smax, int8_t *dst);
TSDB_CONV_CODE tsdb_chars_to_smallint(const char *src, size_t smax, int16_t *dst);
TSDB_CONV_CODE tsdb_chars_to_int(const char *src, size_t smax, int32_t *dst);
TSDB_CONV_CODE tsdb_chars_to_bigint(const char *src, size_t smax, int64_t *dst);
TSDB_CONV_CODE tsdb_chars_to_ts(const char *src, size_t smax, int64_t *dst);
TSDB_CONV_CODE tsdb_chars_to_float(const char *src, size_t smax, float *dst);
TSDB_CONV_CODE tsdb_chars_to_double(const char *src, size_t smax, double *dst);
TSDB_CONV_CODE tsdb_chars_to_timestamp(const char *src, size_t smax, SQL_TIMESTAMP_STRUCT *dst);
TSDB_CONV_CODE tsdb_chars_to_char(const char *src, size_t smax, char *dst, size_t dmax);
#endif // _todbc_conv_h_

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TODBC_FLEX_H_
#define _TODBC_FLEX_H_
typedef struct conn_val_s conn_val_t;
struct conn_val_s {
char *key;
char *dsn;
char *uid;
char *pwd;
char *db;
char *server;
char *svr_enc;
char *cli_enc;
};
void conn_val_reset(conn_val_t *val);
int todbc_parse_conn_string(const char *conn, conn_val_t *val);
#endif // _TODBC_FLEX_H_

Some files were not shown because too many files have changed in this diff Show More