Merge branch 'develop' into xiaoping/add_test_case

This commit is contained in:
Ping Xiao 2020-12-05 18:12:08 +08:00
commit 11837506d2
624 changed files with 52374 additions and 19447 deletions

6
.gitmodules vendored
View File

@ -4,3 +4,9 @@
[submodule "src/connector/grafanaplugin"]
path = src/connector/grafanaplugin
url = https://github.com/taosdata/grafanaplugin
[submodule "tests/examples/rust"]
path = tests/examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
[submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension
url = https://github.com/huskar-t/hivemq-tdengine-extension.git

View File

@ -32,6 +32,8 @@ matrix:
- python3-setuptools
- valgrind
- psmisc
- unixodbc
- unixodbc-dev
before_script:
- export TZ=Asia/Harbin

190
Jenkinsfile vendored
View File

@ -4,19 +4,22 @@ pipeline {
WK = '/var/lib/jenkins/workspace/TDinternal'
WKC= '/var/lib/jenkins/workspace/TDinternal/community'
}
stages {
stage('Parallel test stage') {
parallel {
stage('pytest') {
agent{label 'master'}
agent{label '184'}
steps {
sh '''
date
cd ${WKC}
git reset --hard
git checkout develop
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout develop
git pull
export TZ=Asia/Harbin
@ -26,6 +29,7 @@ pipeline {
cd debug
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
#./test-all.sh smoke
./test-all.sh pytest
@ -33,15 +37,17 @@ pipeline {
}
}
stage('test_b1') {
agent{label '184'}
agent{label 'master'}
steps {
sh '''
date
cd ${WKC}
git reset --hard
git checkout develop
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout develop
git pull
export TZ=Asia/Harbin
@ -62,12 +68,14 @@ pipeline {
agent{label "185"}
steps {
sh '''
cd ${WKC}
git reset --hard
git checkout develop
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout develop
git pull
export TZ=Asia/Harbin
@ -78,7 +86,20 @@ pipeline {
cmake .. > /dev/null
make > /dev/null
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./crash_gen.sh -a -p -t 4 -s 2000
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WKC}/tests/pytest
./handle_crash_gen_val_log.sh
'''
}
sh '''
date
cd ${WKC}/tests
./test-all.sh b2
@ -89,14 +110,17 @@ pipeline {
stage('test_valgrind') {
agent{label "186"}
steps {
sh '''
date
cd ${WKC}
git reset --hard
git checkout develop
git pull
git submodule update
cd ${WK}
git reset --hard
git checkout develop
git pull
export TZ=Asia/Harbin
@ -116,10 +140,162 @@ pipeline {
date'''
}
}
stage('connector'){
agent{label "release"}
steps{
sh'''
cd ${WORKSPACE}
git checkout develop
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/gotest
bash batchtest.sh
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker
python3 PythonChecker.py
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/
mvn clean package assembly:single >/dev/null
java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1
'''
}
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC#
dotnet run
'''
}
}
}
stage('arm64_build'){
agent{label 'arm64'}
steps{
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
stage('arm32_build'){
agent{label 'arm32'}
steps{
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
sh '''
cd ${WK}
git fetch
git checkout develop
git pull
cd ${WKC}
git fetch
git checkout develop
git pull
git submodule update
cd ${WKC}/packaging
./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0
'''
}
}
}
}
}
}
post {
success {
emailext (
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>变更概要:${CHANGES}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "yqliu@taosdata.com,pxiao@taosdata.com",
from: "support@taosdata.com"
)
}
}
}

View File

@ -61,7 +61,7 @@ The use of each configuration item is:
* **port**: This is the `http` service port which enables other application to manage rules by `restful API`.
* **database**: rules are stored in a `sqlite` database, this is the path of the database file (if the file does not exist, the alert application creates it automatically).
* **tdengine**: connection string of `TDEngine` server, note in most cases the database information should be put in a rule, thus it should NOT be included here.
* **tdengine**: connection string of `TDEngine` server (please refer the documentation of GO connector for the detailed format of this string), note the database name should be put in the `sql` field of a rule in most cases, thus it should NOT be included in the string.
* **log > level**: log level, could be `production` or `debug`.
* **log > path**: log output file path.
* **receivers > alertManager**: the alert application pushes alerts to `AlertManager` at this URL.

View File

@ -58,7 +58,7 @@ $ go build
* **port**:报警监测程序支持使用 `restful API` 对规则进行管理,这个参数用于配置 `http` 服务的侦听端口。
* **database**:报警监测程序将规则保存到了一个 `sqlite` 数据库中,这个参数用于指定数据库文件的路径(不需要提前创建这个文件,如果它不存在,程序会自动创建它)。
* **tdengine**`TDEngine` 的连接信息,一般来说,数据库信息应该在报警规则中指定,所以这里 **不** 应包含这一部分信息
* **tdengine**`TDEngine` 的连接字符串(这个字符串的详细格式说明请见 GO 连接器的文档),一般来说,数据库名应该在报警规则的 `sql` 语句中指定,所以这个字符串中 **不** 应包含数据库名
* **log > level**:日志的记录级别,可选 `production``debug`
* **log > path**:日志文件的路径。
* **receivers > alertManager**:报警监测程序会将报警推送到 `AlertManager`,在这里指定 `AlertManager` 的接收地址。

View File

@ -84,6 +84,7 @@ func (alert *Alert) doRefresh(firing bool, rule *Rule) bool {
case firing && (alert.State == AlertStateWaiting):
alert.StartsAt = time.Now()
alert.EndsAt = time.Time{}
if rule.For.Nanoseconds() > 0 {
alert.State = AlertStatePending
return false
@ -95,6 +96,7 @@ func (alert *Alert) doRefresh(firing bool, rule *Rule) bool {
return false
}
alert.StartsAt = alert.StartsAt.Add(rule.For.Duration)
alert.EndsAt = time.Time{}
alert.State = AlertStateFiring
case firing && (alert.State == AlertStateFiring):

View File

@ -13,6 +13,7 @@ ELSEIF (TD_WINDOWS)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/nodejs DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/C\# DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .)
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)
@ -31,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.0-dist.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.14-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.5.1")
SET(TD_VER_NUMBER "2.0.8.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -51,8 +51,8 @@ INTERVAL(1M)</code></pre>
<li><p>mseconds查询数据库更新的时间间隔单位为毫秒。一般设置为1000毫秒。返回值为指向TDengine_SUB 结构的指针,如果返回为空,表示失败。</p></li>
</ul><li><p><code>TAOS_ROW taos_consume(TAOS_SUB *tsub)</code>
</p><p>该函数用来获取订阅的结果用户应用程序将其置于一个无限循环语句。如果数据库有新记录到达该API将返回该最新的记录。如果没有新的记录该API将阻塞。如果返回值为空说明系统出错。参数说明</p></li><ul><li><p>tsubtaos_subscribe的结构体指针。</p></li></ul><li><p><code>void taos_unsubscribe(TAOS_SUB *tsub)</code></p><p>取消订阅。应用程序退出时,务必调用该函数以避免资源泄露。</p></li>
<li><p><code>int taos_num_subfields(TAOS_SUB *tsub)</code></p><p>获取返回的一行记录中数据包含多少列。</p></li>
<li><p><code>TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub)</code></p><p>获取每列数据的属性数据类型、名字、长度与taos_num_subfileds配合使用可解析返回的每行数据。</p></li></ul>
<li><p><code>int taos_num_fields(TAOS_SUB *tsub)</code></p><p>获取返回的一行记录中数据包含多少列。</p></li>
<li><p><code>TAOS_FIELD *taos_fetch_fields(TAOS_SUB *tsub)</code></p><p>获取每列数据的属性数据类型、名字、长度与taos_num_subfileds配合使用可解析返回的每行数据。</p></li></ul>
<p>示例代码:请看安装包中的的示范程序</p>
<a class='anchor' id='缓存-(Cache)'></a><h2>缓存 (Cache)</h2>
<p>TDengine采用时间驱动缓存管理策略First-In-First-OutFIFO又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式Least-Recent-UseLRU直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候将最早的数据批量写入磁盘。一般意义上来说对于物联网数据的使用用户最为关心最近产生的数据即当前状态。TDengine充分利用了这一特性将最近到达的当前状态数据保存在缓存中。</p>

View File

@ -64,9 +64,9 @@
<p>该API用来获取最新消息应用程序一般会将其置于一个无限循环语句中。其中参数tsub是taos_subscribe的返回值。如果数据库有新的记录该API将返回返回参数是一行记录。如果没有新的记录该API将阻塞。如果返回值为空说明系统出错需要检查系统是否还在正常运行。</p></li>
<li><p><code>void taos_unsubscribe(TAOS_SUB *tsub)</code></p>
<p>该API用于取消订阅参数tsub是taos_subscribe的返回值。应用程序退出时需要调用该API否则有资源泄露。</p></li>
<li><p><code>int taos_num_subfields(TAOS_SUB *tsub)</code></p>
<li><p><code>int taos_num_fields(TAOS_SUB *tsub)</code></p>
<p>该API用来获取返回的一排数据中数据的列数</p></li>
<li><p><code>TAOS_FIELD *taos_fetch_subfields(TAOS_RES *res)</code></p>
<li><p><code>TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)</code></p>
<p>该API用来获取每列数据的属性数据类型、名字、字节数与taos_num_subfileds配合使用可用来解析返回的一排数据。</p></li>
</ul>
<a class='anchor' id='Java-Connector'></a><h2>Java Connector</h2>

View File

@ -72,9 +72,9 @@ The API is used to start a subscription session by given a handle. The parameter
The API used to get the new data from a TDengine server. It should be put in an infinite loop. The parameter <em>tsub</em> is the handle returned by <em>taos_subscribe</em>. If new data are updated, the API will return a row of the result. Otherwise, the API is blocked until new data arrives. If <em>NULL</em> pointer is returned, it means an error occurs.</p></li>
<li><p><code>void taos_unsubscribe(TAOS_SUB *tsub)</code>
Stop a subscription session by the handle returned by <em>taos_subscribe</em>.</p></li>
<li><p><code>int taos_num_subfields(TAOS_SUB *tsub)</code>
<li><p><code>int taos_num_fields(TAOS_SUB *tsub)</code>
The API used to get the number of fields in a row.</p></li>
<li><p><code>TAOS_FIELD *taos_fetch_subfields(TAOS_RES *res)</code>
<li><p><code>TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)</code>
The API used to get the description of each column.</p></li>
</ul>
<a class='anchor' id='Java-Connector'></a><h2>Java Connector</h2>

View File

@ -87,6 +87,7 @@ TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修
- httpPort: RESTful服务使用的端口号所有的HTTP请求TCP都需要向该接口发起查询/写入请求。
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
- logDir日志文件目录客户端和服务器的运行日志文件将写入该目录。默认值/var/log/taos。
- tempDir临时文件目录客户端和服务器的临时文件主要是查询时用于保存中间结果的问题将写入该目录。 默认值Linux下为 /tmp/Windows下为环境变量 tmp 或 temp 指向的目录。
- arbitrator系统中裁决器的end point, 缺省值为空。
- rolednode的可选角色。0-any; 既可作为mnode也可分配vnode1-mgmt;只能作为mnode不能分配vnode2-dnode;不能作为mnode只能分配vnode
- debugFlag运行日志开关。131输出错误和警告日志135 输出错误、警告和调试日志143 输出错误、警告、调试和跟踪日志。默认值131或135不同模块有不同的默认值

View File

@ -34,7 +34,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台
- [SQL写入](https://www.taosdata.com/cn/documentation20/insert/#SQL写入)使用SQL insert命令向一张或多张表写入单条或多条记录
- [Telegraf写入](https://www.taosdata.com/cn/documentation20/insert/#Telegraf直接写入)配置Telegraf, 不用任何代码,将采集数据直接写入
- [Prometheus写入](https://www.taosdata.com/cn/documentation20/insert/#Prometheus直接写入)配置Prometheus, 不用任何代码,将数据直接写入
- [EMQ X Broker](https://www.taosdata.com/cn/documentation20/insert/#EMQ-X-Broker直接写入)配置EMQ X不用任何代码就可将MQTT数据直接写入
- [EMQ X Broker](https://www.taosdata.com/cn/documentation20/insert/#EMQ-X-Broker直接写入)配置EMQ X不用任何代码就可将 MQTT 数据直接写入
- [HiveMQ Broker](https://www.taosdata.com/cn/documentation20/insert/#HiveMQ-Broker直接写入):通过 HiveMQ Extension不用任何代码就可将 MQTT 数据直接写入
## [高效查询数据](https://www.taosdata.com/cn/documentation20/queries)

View File

@ -10,7 +10,7 @@ TDengine的模块之一是时序数据库。但除此之外为减少研发的
* __硬件或云服务成本降至1/5__由于超强性能计算资源不到通用大数据方案的1/5通过列式存储和先进的压缩算法存储空间不到通用数据库的1/10。
* __全栈时序数据处理引擎__将数据库、消息队列、缓存、流式计算等功能融为一体应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件大幅降低应用开发和维护的复杂度成本。
* __强大的分析功能__无论是十年前还是一秒钟前的数据指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, Matlab随时进行。
* __与第三方工具无缝连接__不用一行代码即可与Telegraf, Grafana, EMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
* __与第三方工具无缝连接__不用一行代码即可与Telegraf, Grafana, EMQ, HiveMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。
* __零运维成本、零学习成本__安装集群简单快捷无需分库分表实时备份。类似标准SQL支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似零学习成本。
采用TDengine可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是因充分利用了物联网时序数据的特点它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。

View File

@ -44,6 +44,8 @@ CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);
```
其中d1001是表名meters是超级表的表名后面紧跟标签Location的具体标签值”Beijing.Chaoyang"标签groupId的具体标签值2。虽然在创建表时需要指定标签值但可以事后修改。详细细则请见 TAOS SQL。
**注意:**目前 TDengine 没有从技术层面限制使用一个 database dbA的超级表作为模板建立另一个 database dbB的子表后续会禁止这种用法不建议使用这种方法建表。
TDengine建议将数据采集点的全局唯一ID作为表名(比如设备序列号。但对于有的场景并没有唯一的ID可以将多个ID组合成一个唯一的ID。不建议将具有唯一性的ID作为标签值。
**自动建表**:在某些特殊场景中,用户在写数据时并不确定某个数据采集点的表是否存在,此时可在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。比如:

View File

@ -90,7 +90,7 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
```mysql
ALTER DATABASE db_name REPLICA 2;
```
REPLICA参数是指修改数据库副本数取值范围[1, 3]。在集群中使用副本数必须小于dnode的数目。
REPLICA参数是指修改数据库副本数取值范围[1, 3]。在集群中使用,副本数必须小于或等于dnode的数目。
```mysql
ALTER DATABASE db_name KEEP 365;
@ -844,7 +844,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
- **PERCENTILE**
```mysql
SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause];
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
```
功能说明:统计表中某列的值百分比分位数。
返回结果数据类型: 双精度浮点数Double。
@ -1016,9 +1016,9 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER
```
## TAOS SQL 边界限制
- 数据库名最大长度为33
- 表名最大长度为193每行数据最大长度16k个字符
- 列名最大长度为65最多允许1024列最少需要2列第一列必须是时间戳
- 数据库名最大长度为32
- 表名最大长度为192每行数据最大长度16k个字符
- 列名最大长度为64最多允许1024列最少需要2列第一列必须是时间戳
- 标签最多允许128个可以0个标签总长度不超过16k个字符
- SQL语句最大长度65480个字符但可通过系统配置参数maxSQLLength修改最长可配置为1M
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制

View File

@ -35,7 +35,7 @@ TDengine相对于通用数据库有超高的压缩比在绝大多数场景
Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
```
示例1000万台智能电表每台电表每15分钟采集一次数据每次采集的数据128字节那么一年的原始数据量是10000000\*128\*24\*60/15*365 = 44851T。TDengine大概需要消耗44851/5=8970T, 8.9P空间。
示例1000万台智能电表每台电表每15分钟采集一次数据每次采集的数据128字节那么一年的原始数据量是10000000\*128\*24\*60/15*365 = 44.8512T。TDengine大概需要消耗44.851/5=8.97024T空间。
用户可以通过参数keep设置数据在磁盘中的最大保存时长。为进一步减少存储成本TDengine还提供多级存储最冷的数据可以存放在最廉价的存储介质上应用的访问不用做任何调整只是读取速度降低了。
@ -95,6 +95,7 @@ TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修
- logKeepDays日志文件的最长保存时间。大于0时日志文件会被重命名为taosdlog.xxx其中xxx为日志文件最后修改的时间戳单位为秒。默认值0天。
- maxSQLLength单条SQL语句允许最长限制。默认值65380字节。
- telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息0表示不允许1表示允许。 默认值1。
- stream: 是否启用连续查询流计算功能0表示不允许1表示允许。 默认值1。
**注意:**对于端口TDengine会使用从serverPort起13个连续的TCP和UDP端口号请务必在防火墙打开。因此如果是缺省配置需要打开从6030都6042共13个端口而且必须TCP和UDP都打开。
@ -253,7 +254,7 @@ ALTER USER <user_name> PASS <'password'>;
修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
```
ALTER USER <user_name> PRIVILEDGE <super|write|read>;
ALTER USER <user_name> PRIVILEGE <super|write|read>;
```
修改用户权限为super/write/read不需要添加单引号

View File

@ -4,16 +4,99 @@
### 物联网典型场景
在典型的物联网、车联网、运维监测场景中,往往有多种不同类型的数据采集设备,采集一个到多个不同的物理量。而同一种采集设备类型,往往又有多个具体的采集设备分布在不同的地点。大数据处理系统就是要将各种采集的数据汇总,然后进行计算和分析。对于同一类设备,其采集的数据都是很规则的。以智能电表为例,假设每个智能电表采集电流、电压、相位三个量,其采集的数据类似如下的表格:
| Device ID | Time Stamp | current | voltage | phase | location | groupId |
| :-------: | :-----------: | :-----: | :-----: | :---: | :--------------: | :-----: |
| d1001 | 1538548685000 | 10.3 | 219 | 0.31 | Beijing.Chaoyang | 2 |
| d1002 | 1538548684000 | 10.2 | 220 | 0.23 | Beijing.Chaoyang | 3 |
| d1003 | 1538548686500 | 11.5 | 221 | 0.35 | Beijing.Haidian | 3 |
| d1004 | 1538548685500 | 13.4 | 223 | 0.29 | Beijing.Haidian | 2 |
| d1001 | 1538548695000 | 12.6 | 218 | 0.33 | Beijing.Chaoyang | 2 |
| d1004 | 1538548696600 | 11.8 | 221 | 0.28 | Beijing.Haidian | 2 |
| d1002 | 1538548696650 | 10.3 | 218 | 0.25 | Beijing.Chaoyang | 3 |
| d1001 | 1538548696800 | 12.3 | 221 | 0.31 | Beijing.Chaoyang | 2 |
<figure><table>
<thead><tr>
<th style="text-align:center;">设备ID</th>
<th style="text-align:center;">时间戳</th>
<th style="text-align:center;" colspan="3">采集量</th>
<th style="text-align:center;" colspan="2">标签</th>
</tr>
<tr>
<th style="text-align:center;">Device ID</th>
<th style="text-align:center;">Time Stamp</th>
<th style="text-align:center;">current</th>
<th style="text-align:center;">voltage</th>
<th style="text-align:center;">phase</th>
<th style="text-align:center;">location</th>
<th style="text-align:center;">groupId</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align:center;">d1001</td>
<td style="text-align:center;">1538548685000</td>
<td style="text-align:center;">10.3</td>
<td style="text-align:center;">219</td>
<td style="text-align:center;">0.31</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1002</td>
<td style="text-align:center;">1538548684000</td>
<td style="text-align:center;">10.2</td>
<td style="text-align:center;">220</td>
<td style="text-align:center;">0.23</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">3</td>
</tr>
<tr>
<td style="text-align:center;">d1003</td>
<td style="text-align:center;">1538548686500</td>
<td style="text-align:center;">11.5</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.35</td>
<td style="text-align:center;">Beijing.Haidian</td>
<td style="text-align:center;">3</td>
</tr>
<tr>
<td style="text-align:center;">d1004</td>
<td style="text-align:center;">1538548685500</td>
<td style="text-align:center;">13.4</td>
<td style="text-align:center;">223</td>
<td style="text-align:center;">0.29</td>
<td style="text-align:center;">Beijing.Haidian</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1001</td>
<td style="text-align:center;">1538548695000</td>
<td style="text-align:center;">12.6</td>
<td style="text-align:center;">218</td>
<td style="text-align:center;">0.33</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1004</td>
<td style="text-align:center;">1538548696600</td>
<td style="text-align:center;">11.8</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.28</td>
<td style="text-align:center;">Beijing.Haidian</td>
<td style="text-align:center;">2</td>
</tr>
<tr>
<td style="text-align:center;">d1002</td>
<td style="text-align:center;">1538548696650</td>
<td style="text-align:center;">10.3</td>
<td style="text-align:center;">218</td>
<td style="text-align:center;">0.25</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">3</td>
</tr>
<tr>
<td style="text-align:center;">d1001</td>
<td style="text-align:center;">1538548696800</td>
<td style="text-align:center;">12.3</td>
<td style="text-align:center;">221</td>
<td style="text-align:center;">0.31</td>
<td style="text-align:center;">Beijing.Chaoyang</td>
<td style="text-align:center;">2</td>
</tr>
</tbody>
</table></figure>
<center> 表1智能电表数据示例</center>
@ -221,14 +304,14 @@ TDengine采用时间驱动缓存管理策略First-In-First-OutFIFO
TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中可以更加快速地响应用户针对最近一条或一批数据的查询分析整体上提供更快的数据库查询响应能力。从这个意义上来说**可通过设置合适的配置参数将TDengine作为数据缓存来使用而不需要再部署Redis或其他额外的缓存系统**可有效地简化系统架构降低运维的成本。需要注意的是TDengine重启以后系统的缓存将被清空之前缓存的数据均会被批量写入磁盘缓存的数据将不会像专门的Key-value缓存系统再将之前缓存的数据重新加载到缓存中。
每个vnode有自己独立的内存而且由多个固定大小的内存块组成不同vnode之间完全隔离。数据写入时类似于日志的写法数据被顺序追加写入内存但每个vnode维护有自己的skip list便于迅速查找。当一以上的内存块写满时启动落盘操作而且后续写的操作在新的内存块进行。这样一个vnode里有一内存块是保留有最近的数据的以达到缓存、快速查找的目的。一个vnode的内存块的个数由配置参数blocks决定内存块的大小由配置参数cache决定。
每个vnode有自己独立的内存而且由多个固定大小的内存块组成不同vnode之间完全隔离。数据写入时类似于日志的写法数据被顺序追加写入内存但每个vnode维护有自己的skip list便于迅速查找。当三分之一以上的内存块写满时启动落盘操作而且后续写的操作在新的内存块进行。这样一个vnode里有三分之一内存块是保留有最近的数据的以达到缓存、快速查找的目的。一个vnode的内存块的个数由配置参数blocks决定内存块的大小由配置参数cache决定。
### 持久化存储
TDengine采用数据驱动的方式让缓存中的数据写入硬盘进行持久化存储。当vnode中缓存的数据达到一定规模时为了不阻塞后续数据的写入TDengine也会拉起落盘线程将缓存的数据写入持久化存储。TDengine在数据落盘时会打开新的数据库日志文件在落盘成功后则会删除老的数据库日志文件避免日志文件无限制的增长。
为充分利用时序数据特点TDengine将一个vnode保存在持久化存储的数据切分成多个文件每个文件只保存固定天数的数据这个天数由系统配置参数days决定。切分成多个文件后给定查询的起止日期无需任何索引就可以立即定位需要打开哪些数据文件大大加快读取速度。
对于采集的数据一般有保留时长这个时长由系统配置参数keep决定。超过这个设置天数的数据文件将被系统自动删除,释放存储空间。
对于采集的数据一般有保留时长这个时长由系统配置参数keep决定。超过这个设置天数的数据文件将被系统自动删除释放存储空间。
给定days与keep两个参数一个vnode总的数据文件数为keep/days。总的数据文件个数不宜过大也不宜过小。10到100以内合适。基于这个原则可以设置合理的days。 目前的版本参数keep可以修改但对于参数days一但设置后不可修改。

View File

@ -37,7 +37,7 @@ fqdn h1.taosdata.com
// 配置本数据节点的端口号缺省是6030
serverPort 6030
// 服务端节点数为偶数的时候,需要配置请参考《Arbitrator的使用》的部分
// 使用场景请参考《Arbitrator的使用》的部分
arbitrator ha.taosdata.com:6042
```
@ -213,6 +213,6 @@ SHOW MNODES;
## Arbitrator的使用
如果副本数为偶数当一个vnode group里一半或超过一半的vnode不工作时是无法从中选出master的。同理一半或超过一半的mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到arbitrator, 那么节点B就能正常工作。
如果副本数为偶数当一个vnode group里一半vnode不工作时是无法从中选出master的。同理一半mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到arbitrator, 那么节点B就能正常工作。
TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6042。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了当副本数为偶数数系统将自动连接配置的arbitrator。
TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/)在TDengine Arbitrator Linux一节中选择适合的版本下载并安装。该程序对系统资源几乎没有要求,只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6042。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了当副本数为偶数数系统将自动连接配置的arbitrator。如果副本数为奇数即使配置了arbitrator, 系统也不会去建立连接。

View File

@ -142,7 +142,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine
获取最近一次API调用失败的原因返回值为错误代码。
**注意**:对于单个数据库连接在同一时刻只能有一个线程使用该连接调用API否则会有未定义的行为出现并可能导致客户端crash。客户端应用可以通过建立多个连接进行多线程的数据写入或查询处理
**注意**:对于每个数据库应用2.0及以上版本 TDengine 推荐只建立一个连接。同时在应用中将该连接 (TAOS*) 结构体传递到不同的线程共享使用。基于 TAOS 结构体发出的查询、写入等操作具有多线程安全性。C 语言的连接器可以按照需求动态建立面向数据库的新连接(该过程对用户不可见),同时建议只有在程序最后退出的时候才调用 taos_close 关闭连接
### 异步查询API
@ -616,6 +616,43 @@ HTTP请求URL采用`sqlutc`时返回结果集的时间戳将采用UTC时间
- httpEnableCompress: 是否支持压缩默认不支持目前TDengine仅支持gzip压缩格式
- httpDebugFlag: 日志开关131仅错误和报警信息135调试信息143非常详细的调试信息默认131
## CSharp Connector
在Windows系统上C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作后续版本将提供ORMdapper框架驱动。
#### 安装TDengine客户端
C#连接器需要使用`libtaos.so`和`taos.h`。因此在使用C#连接器之前需在程序运行的Windows环境安装TDengine的Windows客户端以便获得相关驱动文件。
安装完成后,在文件夹`C:/TDengine/examples/C#`中,将会看到两个文件
- TDengineDriver.cs 调用taos.dll文件的Native C方法
- TDengineTest.cs 参考程序示例
在文件夹`C:\Windows\System32`,将会看到`taos.dll`文件
#### 使用方法
- 将C#接口文件TDengineDriver.cs加入到应用程序所在.NET项目中
- 参考TDengineTest.cs来定义数据库连接参数及执行数据插入、查询等操作的方法
- 因为C#接口需要用到`taos.dll`文件,用户可以将`taos.dll`文件加入.NET解决方案中
#### 注意事项
- `taos.dll`文件使用x64平台编译所以.NET项目在生成.exe文件时“解决方案”/“项目”的“平台”请均选择“x64”。
- 此.NET接口目前已经在Visual Studio 2013/2015/2017中验证过其它VS版本尚待验证。
#### 第三方驱动
Maikebing.Data.Taos是一个TDengine的ADO.Net提供器支持linuxwindows。该开发包由热心贡献者`麦壳饼@@maikebing`提供,具体请参考
```
//接口下载
https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos
//用法说明
https://www.taosdata.com/blog/2020/11/02/1901.html
```
## Go Connector

View File

@ -36,16 +36,20 @@
5. ping服务器FQDN如果没有反应请检查你的网络DNS设置或客户端所在计算机的系统hosts文件
6. 检查防火墙设置确认TCP/UDP 端口6030-6042 是打开的
6. 检查防火墙设置Ubuntu 使用 ufw statusCentOS 使用 firewall-cmd --list-port确认TCP/UDP 端口6030-6042 是打开的
7. 对于Linux上的JDBCODBC, Python, Go等接口类似连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH*里
7. 对于Linux上的JDBCODBC, Python, Go等接口类似连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里
8. 对于windows上的JDBC, ODBC, Python, Go等连接确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
8. 对于windows上的JDBC, ODBC, Python, Go等连接确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
9. 如果仍不能排除连接故障请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
检查UDP端口连接是否工作`nc -vuz {hostIP} {port} `
检查服务器侧TCP端口连接是否工作`nc -l {port}`
检查客户端侧TCP端口连接是否工作`nc {hostIP} {port}`
9. 如果仍不能排除连接故障
* Linux 系统请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
检查UDP端口连接是否工作`nc -vuz {hostIP} {port} `
检查服务器侧TCP端口连接是否工作`nc -l {port}`
检查客户端侧TCP端口连接是否工作`nc {hostIP} {port}`
* Windows 系统请使用 PowerShell 命令 Net-TestConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问
10. 也可以使用taos程序内嵌的网络连通检测功能来验证服务器和客户端之间指定的端口连接是否通畅包括TCP和UDP[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。
@ -101,7 +105,20 @@ Connection = DriverManager.getConnection(url, properties);
<version>2.0.4</version>
</dependency>
```
## 14. 怎么报告问题?
## 14. taos connect failed, reason: invalid timestamp
常见原因是服务器和客户端时间没有校准可以通过和时间服务器同步的方式Linux 下使用 ntpdate 命令Windows 在系统时间设置中选择自动同步)校准。
## 15. 表名显示不全
由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
## 16. 怎么报告问题?
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
1. /var/log/taos
2. /etc/taos

View File

@ -1,6 +1,6 @@
# 高效写入数据
TDengine支持多种接口写入数据包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, CSV文件等后续还将提供Kafka, OPC等接口。数据可以单条插入也可以批量插入可以插入一个数据采集点的数据也可以同时插入多个数据采集点的数据。支持多线程插入支持时间乱序数据插入也支持历史数据插入。
TDengine支持多种接口写入数据包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV文件等后续还将提供Kafka, OPC等接口。数据可以单条插入也可以批量插入可以插入一个数据采集点的数据也可以同时插入多个数据采集点的数据。支持多线程插入支持时间乱序数据插入也支持历史数据插入。
## SQL写入
@ -218,7 +218,15 @@ use telegraf;
select * from cpu;
```
## EMQ X Broker直接写入
MQTT是一流行的物联网数据传输协议[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件无需任何代码只需要在EMQ Dashboard里使用“规则”做简单配置即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDEngine也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考 [EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)。
MQTT是一流行的物联网数据传输协议TDengine 可以很方便的接入 MQTT Broker 接受的数据并写入到 TDengine。
## EMQ Broker 直接写入
[EMQ](https://github.com/emqx/emqx)是一开源的MQTT Broker软件无需任何代码只需要在EMQ Dashboard里使用“规则”做简单配置即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考 [EMQ 官方文档](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine)。
## HiveMQ Broker 直接写入
[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理主要用于企业和新兴的机器到机器M2M通讯和内部传输满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)。

View File

@ -61,7 +61,7 @@ vnode与其子模块是通过API直接调用而不是通过消息队列传递
mnode是整个系统的大脑负责整个系统的资源调度负责meta data的管理与存储。
一个运行的系统里只有一个mnode但它有多个副本由系统配置参数numOfMpeers控制。这些副本分布在不同的dnode里目的是保证系统的高可靠运行。副本之间的数据复制是采用同步而非异步的方式以确保数据的一致性确保数据不会丢失。这些副本会自动选举一个Master其他副本是slave。所有数据更新类的操作都只能在master上进行而查询类的可以在slave节点上进行。代码实现上同步模块与vnode共享但mnode被分配一个特殊的vgroup ID: 1而且quorum大于1。整个集群系统是由多个dnode组成的运行的mnode的副本数不可能超过dnode的个数但不会超过配置的副本数。如果某个mnode副本宕机一段时间只要超过半数的mnode副本仍在运行运行的mnode会自动根据整个系统的资源情况在其他dnode里再启动一个mnode, 以保证运行的副本数。
一个运行的系统里只有一个mnode但它有多个副本由系统配置参数numOfMnodes控制。这些副本分布在不同的dnode里目的是保证系统的高可靠运行。副本之间的数据复制是采用同步而非异步的方式以确保数据的一致性确保数据不会丢失。这些副本会自动选举一个Master其他副本是slave。所有数据更新类的操作都只能在master上进行而查询类的可以在slave节点上进行。代码实现上同步模块与vnode共享但mnode被分配一个特殊的vgroup ID: 1而且quorum大于1。整个集群系统是由多个dnode组成的运行的mnode的副本数不可能超过dnode的个数但不会超过配置的副本数。如果某个mnode副本宕机一段时间只要超过半数的mnode副本仍在运行运行的mnode会自动根据整个系统的资源情况在其他dnode里再启动一个mnode, 以保证运行的副本数。
各个dnode通过信息交换保存有mnode各个副本的End Point列表并向其中的master节点定时间隔由系统配置参数statusInterval控制发送status消息消息体里包含该dnode的CPU、内存、剩余存储空间、vnode个数以及各个vnode的状态(存储空间、原始数据大小、记录条数、角色等。这样mnode就了解整个系统的资源情况如果用户创建新的表就可以决定需要在哪个dnode创建如果增加或删除dnode, 或者监测到某dnode数据过热、或离线太长就可以决定需要挪动那些vnode以实现负载均衡。

View File

@ -20,6 +20,9 @@
# data file's directory
# dataDir /var/lib/taos
# temporary file's directory
# tempDir /tmp/
# the arbitrator's fully qualified domain name (FQDN) for TDengine system, for cluster only
# arbitrator arbitrator_hostname:6042
@ -233,7 +236,7 @@
# httpDebugFlag 131
# debug flag for monitor
# monitorDebugFlag 131
# monDebugFlag 131
# debug flag for query
# qDebugflag 131
@ -248,7 +251,7 @@
# cqDebugFlag 131
# enable/disable recording the SQL in taos client
# tscEnableRecordSql 0
# enableRecordSql 0
# generate core file when service crash
# enableCoreFile 1
@ -256,3 +259,11 @@
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
# enable/disable telemetry reporting
# telemetryReporting 1
# enable/disable stream (continuous query)
# stream 1
# only 50% CPU resources will be used in query processing
# halfCoresForQuery 0

View File

@ -48,6 +48,7 @@ cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_pat
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
@ -58,7 +59,7 @@ cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_pat
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||:
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/
chmod 755 ${pkg_dir}/DEBIAN/*

View File

@ -156,9 +156,15 @@ build_time=$(date +"%F %R")
# get commint id from git
gitinfo=$(git rev-parse --verify HEAD)
enterprise_dir="${top_dir}/../enterprise"
cd ${enterprise_dir}
gitinfoOfInternal=$(git rev-parse --verify HEAD)
if [[ "$verMode" == "cluster" ]]; then
enterprise_dir="${top_dir}/../enterprise"
cd ${enterprise_dir}
gitinfoOfInternal=$(git rev-parse --verify HEAD)
else
gitinfoOfInternal=NULL
fi
cd ${curr_dir}
# 2. cmake executable file
@ -193,23 +199,35 @@ cd ${curr_dir}
# 3. Call the corresponding script for packaging
if [ "$osType" != "Darwin" ]; then
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir}
ret='0'
command -v dpkg >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir}
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/deb
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
else
echo "==========dpkg command not exist, so not release deb package!!!"
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/deb
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
echo "====do rpm package for the centos system===="
output_dir="${top_dir}/rpms"
if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir}
ret='0'
command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
if [ "$ret" -eq 0 ]; then
echo "====do rpm package for the centos system===="
output_dir="${top_dir}/rpms"
if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir}
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/rpm
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
else
echo "==========rpmbuild command not exist, so not release rpm package!!!"
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/rpm
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
fi
echo "====do tar.gz package for all systems===="

View File

@ -2,7 +2,7 @@
%define cfg_install_dir /etc/taos
%define __strip /bin/true
Name: TDengine
Name: tdengine
Version: %{_version}
Release: 3%{?dist}
Summary: tdengine from taosdata
@ -58,6 +58,7 @@ cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/scri
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
@ -65,7 +66,7 @@ cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/conn
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
#Scripts executed before installation
@ -134,6 +135,7 @@ if [ $1 -eq 0 ];then
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
#${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :

21
packaging/tools/get_client.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/bash
#
log_dir=$1
result_file=$2
if [ ! -n "$1" ];then
echo "Pleas input the director of taosdlog."
echo "usage: ./get_client.sh <taosdlog directory> <result file>"
exit 1
else
log_dir=$1
fi
if [ ! -n "$2" ];then
result_file=clientInfo.txt
else
result_file=$2
fi
grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file}

View File

@ -172,6 +172,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@ -182,6 +183,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
[ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
@ -312,7 +314,7 @@ function install_data() {
}
function install_connector() {
${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
${csudo} cp -rf ${script_dir}/connector/ ${install_main_dir}/
}
function install_examples() {

View File

@ -84,8 +84,9 @@ function install_main_path() {
function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/taos || :
if [ "$osType" == "Darwin" ]; then
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
fi
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@ -94,8 +95,9 @@ function install_bin() {
#Make link
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
if [ "$osType" == "Darwin" ]; then
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
fi
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
@ -163,7 +165,7 @@ function install_log() {
}
function install_connector() {
${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
${csudo} cp -rf ${script_dir}/connector/ ${install_main_dir}/
}
function install_examples() {

View File

@ -84,8 +84,9 @@ function install_main_path() {
function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/power || :
if [ "$osType" == "Darwin" ]; then
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
fi
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@ -94,8 +95,9 @@ function install_bin() {
#Make link
[ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || :
if [ "$osType" == "Darwin" ]; then
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
[ -x ${install_main_dir}/bin/powerdump ] && ${csudo} ln -s ${install_main_dir}/bin/powerdump ${bin_link_dir}/powerdump || :
fi
[ -x ${install_main_dir}/bin/remove_client_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_power.sh ${bin_link_dir}/rmpower || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :

View File

@ -172,6 +172,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerd || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@ -182,6 +183,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || :
[ -x ${install_main_dir}/bin/powerd ] && ${csudo} ln -s ${install_main_dir}/bin/powerd ${bin_link_dir}/powerd || :
[ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
[ -x ${install_main_dir}/bin/powerdump ] && ${csudo} ln -s ${install_main_dir}/bin/powerdump ${bin_link_dir}/powerdump || :
[ -x ${install_main_dir}/bin/remove_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_power.sh ${bin_link_dir}/rmpower || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :

View File

@ -278,11 +278,11 @@ function install_service_on_sysvinit() {
# Install taosd service
if ((${os_type}==1)); then
${csudo} cp -f ${script_dir}/../deb/init.d/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../deb/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
${csudo} cp -f ${script_dir}/../deb/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../deb/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
elif ((${os_type}==2)); then
${csudo} cp -f ${script_dir}/../rpm/init.d/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../rpm/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
${csudo} cp -f ${script_dir}/../rpm/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../rpm/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
fi
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"

View File

@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh"
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
else
@ -110,7 +110,7 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/

View File

@ -77,7 +77,9 @@ if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
fi
else
cp ${bin_files} ${install_dir}/bin
@ -123,7 +125,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/R ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt
cp -r ${examples_dir}/go ${install_dir}/examples
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/src/taosapp/taosapp.go
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/taosdemo.go
fi
# Copy driver
mkdir -p ${install_dir}/driver
@ -135,7 +137,7 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/

View File

@ -36,7 +36,7 @@ if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
else
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh"
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
@ -124,7 +124,7 @@ cp ${lib_files} ${install_dir}/driver
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector

View File

@ -77,8 +77,10 @@ else
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin
fi
chmod a+x ${install_dir}/bin/* || :
@ -146,7 +148,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/R ${install_dir}/examples
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt
cp -r ${examples_dir}/go ${install_dir}/examples
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/src/taosapp/taosapp.go
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/taosdemo.go
fi
# Copy driver
mkdir -p ${install_dir}/driver
@ -156,7 +158,7 @@ cp ${lib_files} ${install_dir}/driver
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector

View File

@ -10,6 +10,7 @@ data_dir="/var/lib/taos"
log_dir="/var/log/taos"
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
install_main_dir="/usr/local/taos"
# static directory
cfg_dir="/usr/local/taos/cfg"
@ -80,8 +81,10 @@ function install_lib() {
${csudo} ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
}
function install_bin() {
@ -89,6 +92,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@ -98,6 +102,7 @@ function install_bin() {
[ -x ${bin_dir}/taos ] && ${csudo} ln -s ${bin_dir}/taos ${bin_link_dir}/taos || :
[ -x ${bin_dir}/taosd ] && ${csudo} ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || :
[ -x ${bin_dir}/taosdemo ] && ${csudo} ln -s ${bin_dir}/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${bin_dir}/taosdump ] && ${csudo} ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || :
[ -x ${bin_dir}/set_core.sh ] && ${csudo} ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || :
}
@ -120,8 +125,11 @@ function install_config() {
echo -e -n "${GREEN}Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join${NC}"
echo
echo -e -n "${GREEN}OR leave it blank to build one${NC}:"
read firstEp
while true; do
#read firstEp
if exec < /dev/tty; then
read firstEp;
fi
while true; do
if [ ! -z "$firstEp" ]; then
# check the format of the firstEp
#if [[ $firstEp == $FQDN_PATTERN ]]; then
@ -135,6 +143,29 @@ function install_config() {
break
fi
done
# user email
#EMAIL_PATTERN='^[A-Za-z0-9\u4e00-\u9fa5]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$'
#EMAIL_PATTERN='^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$'
#EMAIL_PATTERN="^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$"
echo
echo -e -n "${GREEN}Enter your email address for priority support or enter empty to skip${NC}: "
read emailAddr
while true; do
if [ ! -z "$emailAddr" ]; then
# check the format of the emailAddr
#if [[ "$emailAddr" =~ $EMAIL_PATTERN ]]; then
# Write the email address to temp file
email_file="${install_main_dir}/email"
${csudo} bash -c "echo $emailAddr > ${email_file}"
break
#else
# read -p "Please enter the correct email address: " emailAddr
#fi
else
break
fi
done
}
function clean_service_on_sysvinit() {

View File

@ -72,6 +72,7 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || :

View File

@ -38,6 +38,7 @@ function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/set_core || :
}

View File

@ -38,6 +38,7 @@ function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/set_core || :
}

View File

@ -72,6 +72,7 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerd || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || :

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18
version: '2.0.5.1'
version: '2.0.8.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.5.1
- usr/lib/libtaos.so.2.0.8.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so

View File

@ -20,4 +20,6 @@ ADD_SUBDIRECTORY(tsdb)
ADD_SUBDIRECTORY(wal)
ADD_SUBDIRECTORY(cq)
ADD_SUBDIRECTORY(dnode)
#ADD_SUBDIRECTORY(connector/odbc)
ADD_SUBDIRECTORY(connector/jdbc)

57
src/balance/inc/bnInt.h Normal file
View File

@ -0,0 +1,57 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_BALANCE_INT_H
#define TDENGINE_BALANCE_INT_H
#ifdef __cplusplus
extern "C" {
#endif
#include "mnodeInt.h"
#include "mnodeDef.h"
#include "mnodeDnode.h"
typedef struct {
int32_t size;
int32_t maxSize;
SDnodeObj **list;
} SBnDnodes;
typedef struct {
void * timer;
bool stop;
pthread_mutex_t mutex;
pthread_cond_t cond;
pthread_t thread;
} SBnThread;
typedef struct {
pthread_mutex_t mutex;
} SBnMgmt;
int32_t bnInit();
void bnCleanUp();
bool bnStart();
void bnCheckStatus();
void bnCheckModules();
extern SBnDnodes tsBnDnodes;
extern void *tsMnodeTmr;
#ifdef __cplusplus
}
#endif
#endif

34
src/balance/inc/bnScore.h Normal file
View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_BALANCE_SCORE_H
#define TDENGINE_BALANCE_SCORE_H
#ifdef __cplusplus
extern "C" {
#endif
#include "bnInt.h"
void bnInitDnodes();
void bnCleanupDnodes();
void bnAccquireDnodes();
void bnReleaseDnodes();
float bnTryCalcDnodeScore(SDnodeObj *pDnode, int32_t extraVnode);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,33 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_BALANCE_THREAD_H
#define TDENGINE_BALANCE_THREAD_H
#ifdef __cplusplus
extern "C" {
#endif
#include "bnInt.h"
int32_t bnInitThread();
void bnCleanupThread();
void bnNotify();
void bnStartTimer(int64_t mseconds);
#ifdef __cplusplus
}
#endif
#endif

File diff suppressed because it is too large Load Diff

683
src/balance/src/bnMain.c Normal file
View File

@ -0,0 +1,683 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "tsync.h"
#include "tglobal.h"
#include "dnode.h"
#include "bnInt.h"
#include "bnScore.h"
#include "bnThread.h"
#include "mnodeDb.h"
#include "mnodeMnode.h"
#include "mnodeSdb.h"
#include "mnodeShow.h"
#include "mnodeUser.h"
#include "mnodeVgroup.h"
static SBnMgmt tsBnMgmt;;
static void bnMonitorDnodeModule();
static void bnLock() {
pthread_mutex_lock(&tsBnMgmt.mutex);
}
static void bnUnLock() {
pthread_mutex_unlock(&tsBnMgmt.mutex);
}
static bool bnCheckFree(SDnodeObj *pDnode) {
if (pDnode->status == TAOS_DN_STATUS_DROPPING || pDnode->status == TAOS_DN_STATUS_OFFLINE) {
mError("dnode:%d, status:%s not available", pDnode->dnodeId, mnodeGetDnodeStatusStr(pDnode->status));
return false;
}
if (pDnode->openVnodes >= TSDB_MAX_VNODES) {
mError("dnode:%d, openVnodes:%d maxVnodes:%d not available", pDnode->dnodeId, pDnode->openVnodes, TSDB_MAX_VNODES);
return false;
}
if (pDnode->diskAvailable <= tsMinimalDataDirGB) {
mError("dnode:%d, disk space:%fGB, not available", pDnode->dnodeId, pDnode->diskAvailable);
return false;
}
if (pDnode->alternativeRole == TAOS_DN_ALTERNATIVE_ROLE_MNODE) {
mDebug("dnode:%d, alternative role is master, can't alloc vnodes in this dnode", pDnode->dnodeId);
return false;
}
return true;
}
static void bnDiscardVnode(SVgObj *pVgroup, SVnodeGid *pVnodeGid) {
mDebug("vgId:%d, dnode:%d is dropping", pVgroup->vgId, pVnodeGid->dnodeId);
SDnodeObj *pDnode = mnodeGetDnode(pVnodeGid->dnodeId);
if (pDnode != NULL) {
atomic_sub_fetch_32(&pDnode->openVnodes, 1);
mnodeDecDnodeRef(pDnode);
}
SVnodeGid vnodeGid[TSDB_MAX_REPLICA]; memset(vnodeGid, 0, sizeof(vnodeGid)); /* = {0}; */
int32_t numOfVnodes = 0;
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pTmpVodeGid = pVgroup->vnodeGid + i;
if (pTmpVodeGid == pVnodeGid) {
continue;
}
vnodeGid[numOfVnodes] = *pTmpVodeGid;
++numOfVnodes;
}
memcpy(pVgroup->vnodeGid, vnodeGid, TSDB_MAX_REPLICA * sizeof(SVnodeGid));
pVgroup->numOfVnodes = numOfVnodes;
mnodeUpdateVgroup(pVgroup);
}
static void bnSwapVnodeGid(SVnodeGid *pVnodeGid1, SVnodeGid *pVnodeGid2) {
// SVnodeGid tmp = *pVnodeGid1;
// *pVnodeGid1 = *pVnodeGid2;
// *pVnodeGid2 = tmp;
}
int32_t bnAllocVnodes(SVgObj *pVgroup) {
static int32_t randIndex = 0;
int32_t dnode = 0;
int32_t vnodes = 0;
bnLock();
bnAccquireDnodes();
mDebug("db:%s, try alloc %d vnodes to vgroup, dnodes total:%d, avail:%d", pVgroup->dbName, pVgroup->numOfVnodes,
mnodeGetDnodesNum(), tsBnDnodes.size);
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
for (; dnode < tsBnDnodes.size; ++dnode) {
SDnodeObj *pDnode = tsBnDnodes.list[dnode];
if (bnCheckFree(pDnode)) {
SVnodeGid *pVnodeGid = pVgroup->vnodeGid + i;
pVnodeGid->dnodeId = pDnode->dnodeId;
pVnodeGid->pDnode = pDnode;
dnode++;
vnodes++;
mDebug("dnode:%d, is selected, vnodeIndex:%d", pDnode->dnodeId, i);
break;
} else {
mDebug("dnode:%d, is not selected, status:%s vnodes:%d disk:%fGB role:%d", pDnode->dnodeId,
mnodeGetDnodeStatusStr(pDnode->status), pDnode->openVnodes, pDnode->diskAvailable,
pDnode->alternativeRole);
}
}
}
if (vnodes != pVgroup->numOfVnodes) {
bnReleaseDnodes();
bnUnLock();
mDebug("db:%s, need vnodes:%d, but alloc:%d", pVgroup->dbName, pVgroup->numOfVnodes, vnodes);
void * pIter = NULL;
SDnodeObj *pDnode = NULL;
while (1) {
pIter = mnodeGetNextDnode(pIter, &pDnode);
if (pDnode == NULL) break;
mDebug("dnode:%d, status:%s vnodes:%d disk:%fGB role:%d", pDnode->dnodeId, mnodeGetDnodeStatusStr(pDnode->status),
pDnode->openVnodes, pDnode->diskAvailable, pDnode->alternativeRole);
mnodeDecDnodeRef(pDnode);
}
if (mnodeGetOnlineDnodesNum() == 0) {
return TSDB_CODE_MND_NOT_READY;
} else {
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
}
}
/*
* make the choice more random.
* replica 1: no choice
* replica 2: there are 2 combinations
* replica 3 or larger: there are 6 combinations
*/
if (pVgroup->numOfVnodes == 1) {
} else if (pVgroup->numOfVnodes == 2) {
if (randIndex++ % 2 == 0) {
bnSwapVnodeGid(pVgroup->vnodeGid, pVgroup->vnodeGid + 1);
}
} else {
int32_t randVal = randIndex++ % 6;
if (randVal == 1) { // 1, 0, 2
bnSwapVnodeGid(pVgroup->vnodeGid + 0, pVgroup->vnodeGid + 1);
} else if (randVal == 2) { // 1, 2, 0
bnSwapVnodeGid(pVgroup->vnodeGid + 0, pVgroup->vnodeGid + 1);
bnSwapVnodeGid(pVgroup->vnodeGid + 1, pVgroup->vnodeGid + 2);
} else if (randVal == 3) { // 2, 1, 0
bnSwapVnodeGid(pVgroup->vnodeGid + 0, pVgroup->vnodeGid + 2);
} else if (randVal == 4) { // 2, 0, 1
bnSwapVnodeGid(pVgroup->vnodeGid + 0, pVgroup->vnodeGid + 2);
bnSwapVnodeGid(pVgroup->vnodeGid + 1, pVgroup->vnodeGid + 2);
}
if (randVal == 5) { // 0, 2, 1
bnSwapVnodeGid(pVgroup->vnodeGid + 1, pVgroup->vnodeGid + 2);
} else {
} // 0, 1, 2
}
bnReleaseDnodes();
bnUnLock();
return TSDB_CODE_SUCCESS;
}
static bool bnCheckVgroupReady(SVgObj *pVgroup, SVnodeGid *pRmVnode) {
if (pVgroup->lbTime + 5 * tsStatusInterval > tsAccessSquence) {
return false;
}
bool isReady = false;
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pVnode = pVgroup->vnodeGid + i;
if (pVnode == pRmVnode) continue;
mTrace("vgId:%d, check vgroup status, dnode:%d status:%d, vnode role:%s", pVgroup->vgId, pVnode->pDnode->dnodeId,
pVnode->pDnode->status, syncRole[pVnode->role]);
if (pVnode->pDnode->status == TAOS_DN_STATUS_DROPPING) continue;
if (pVnode->pDnode->status == TAOS_DN_STATUS_OFFLINE) continue;
if (pVnode->role == TAOS_SYNC_ROLE_SLAVE || pVnode->role == TAOS_SYNC_ROLE_MASTER) {
isReady = true;
}
}
return isReady;
}
/**
* desc: remove one vnode from vgroup
* all vnodes in vgroup should in ready state, except the balancing one
**/
static int32_t bnRemoveVnode(SVgObj *pVgroup) {
if (pVgroup->numOfVnodes <= 1) return -1;
SVnodeGid *pRmVnode = NULL;
SVnodeGid *pSelVnode = NULL;
int32_t maxScore = 0;
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pVnode = &(pVgroup->vnodeGid[i]);
SDnodeObj *pDnode = mnodeGetDnode(pVnode->dnodeId);
if (pDnode == NULL) {
mError("vgId:%d, dnode:%d not exist, remove it", pVgroup->vgId, pVnode->dnodeId);
pRmVnode = pVnode;
break;
}
if (pDnode->status == TAOS_DN_STATUS_DROPPING) {
mDebug("vgId:%d, dnode:%d in dropping state", pVgroup->vgId, pVnode->dnodeId);
pRmVnode = pVnode;
} else if (pVnode->dnodeId == pVgroup->lbDnodeId) {
mDebug("vgId:%d, dnode:%d in updating state", pVgroup->vgId, pVnode->dnodeId);
pRmVnode = pVnode;
} else {
if (pSelVnode == NULL) {
pSelVnode = pVnode;
maxScore = pDnode->score;
} else {
if (maxScore < pDnode->score) {
pSelVnode = pVnode;
maxScore = pDnode->score;
}
}
}
mnodeDecDnodeRef(pDnode);
}
if (pRmVnode != NULL) {
pSelVnode = pRmVnode;
}
if (!bnCheckVgroupReady(pVgroup, pSelVnode)) {
mDebug("vgId:%d, is not ready", pVgroup->vgId);
return -1;
} else {
mDebug("vgId:%d, is ready, discard dnode:%d", pVgroup->vgId, pSelVnode->dnodeId);
bnDiscardVnode(pVgroup, pSelVnode);
return TSDB_CODE_SUCCESS;
}
}
static bool bnCheckDnodeInVgroup(SDnodeObj *pDnode, SVgObj *pVgroup) {
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pGid = &pVgroup->vnodeGid[i];
if (pGid->dnodeId == 0) break;
if (pGid->dnodeId == pDnode->dnodeId) {
return true;
}
}
return false;
}
/**
* desc: add vnode to vgroup, find a new one if dest dnode is null
**/
static int32_t bnAddVnode(SVgObj *pVgroup, SDnodeObj *pSrcDnode, SDnodeObj *pDestDnode) {
if (pDestDnode == NULL) {
for (int32_t i = 0; i < tsBnDnodes.size; ++i) {
SDnodeObj *pDnode = tsBnDnodes.list[i];
if (pDnode == pSrcDnode) continue;
if (bnCheckDnodeInVgroup(pDnode, pVgroup)) continue;
if (!bnCheckFree(pDnode)) continue;
pDestDnode = pDnode;
mDebug("vgId:%d, add vnode to dnode:%d", pVgroup->vgId, pDnode->dnodeId);
break;
}
}
if (pDestDnode == NULL) {
return TSDB_CODE_MND_DNODE_NOT_EXIST;
}
SVnodeGid *pVnodeGid = pVgroup->vnodeGid + pVgroup->numOfVnodes;
pVnodeGid->dnodeId = pDestDnode->dnodeId;
pVnodeGid->pDnode = pDestDnode;
pVgroup->numOfVnodes++;
if (pSrcDnode != NULL) {
pVgroup->lbDnodeId = pSrcDnode->dnodeId;
}
atomic_add_fetch_32(&pDestDnode->openVnodes, 1);
mnodeUpdateVgroup(pVgroup);
return TSDB_CODE_SUCCESS;
}
static bool bnMonitorBalance() {
if (tsBnDnodes.size < 2) return false;
for (int32_t src = tsBnDnodes.size - 1; src >= 0; --src) {
SDnodeObj *pDnode = tsBnDnodes.list[src];
mDebug("%d-dnode:%d, state:%s, score:%.1f, numOfCores:%d, openVnodes:%d", tsBnDnodes.size - src - 1,
pDnode->dnodeId, mnodeGetDnodeStatusStr(pDnode->status), pDnode->score, pDnode->numOfCores,
pDnode->openVnodes);
}
float scoresDiff = tsBnDnodes.list[tsBnDnodes.size - 1]->score - tsBnDnodes.list[0]->score;
if (scoresDiff < 0.01) {
mDebug("all dnodes:%d is already balanced, scoresDiff:%f", tsBnDnodes.size, scoresDiff);
return false;
}
for (int32_t src = tsBnDnodes.size - 1; src > 0; --src) {
SDnodeObj *pSrcDnode = tsBnDnodes.list[src];
float srcScore = bnTryCalcDnodeScore(pSrcDnode, -1);
if (tsEnableBalance == 0 && pSrcDnode->status != TAOS_DN_STATUS_DROPPING) {
continue;
}
void *pIter = NULL;
while (1) {
SVgObj *pVgroup;
pIter = mnodeGetNextVgroup(pIter, &pVgroup);
if (pVgroup == NULL) break;
if (bnCheckDnodeInVgroup(pSrcDnode, pVgroup)) {
for (int32_t dest = 0; dest < src; dest++) {
SDnodeObj *pDestDnode = tsBnDnodes.list[dest];
if (bnCheckDnodeInVgroup(pDestDnode, pVgroup)) continue;
float destScore = bnTryCalcDnodeScore(pDestDnode, 1);
if (srcScore + 0.0001 < destScore) continue;
if (!bnCheckFree(pDestDnode)) continue;
mDebug("vgId:%d, balance from dnode:%d to dnode:%d, srcScore:%.1f:%.1f, destScore:%.1f:%.1f",
pVgroup->vgId, pSrcDnode->dnodeId, pDestDnode->dnodeId, pSrcDnode->score,
srcScore, pDestDnode->score, destScore);
bnAddVnode(pVgroup, pSrcDnode, pDestDnode);
mnodeDecVgroupRef(pVgroup);
mnodeCancelGetNextVgroup(pIter);
return true;
}
}
mnodeDecVgroupRef(pVgroup);
}
}
return false;
}
// if mgmt changed to master
// 1. reset balanceAccessSquence to zero
// 2. reset state of dnodes to offline
// 3. reset lastAccess of dnodes to zero
void bnReset() {
void * pIter = NULL;
SDnodeObj *pDnode = NULL;
while (1) {
pIter = mnodeGetNextDnode(pIter, &pDnode);
if (pDnode == NULL) break;
// while master change, should reset dnode to offline
mInfo("dnode:%d set access:%d to 0", pDnode->dnodeId, pDnode->lastAccess);
pDnode->lastAccess = 0;
if (pDnode->status != TAOS_DN_STATUS_DROPPING) {
pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->offlineReason = TAOS_DN_OFF_STATUS_NOT_RECEIVED;
}
mnodeDecDnodeRef(pDnode);
}
tsAccessSquence = 0;
}
static int32_t bnMonitorVgroups() {
void * pIter = NULL;
SVgObj *pVgroup = NULL;
bool hasUpdatingVgroup = false;
while (1) {
pIter = mnodeGetNextVgroup(pIter, &pVgroup);
if (pVgroup == NULL) break;
int32_t dbReplica = pVgroup->pDb->cfg.replications;
int32_t vgReplica = pVgroup->numOfVnodes;
int32_t code = -1;
if (vgReplica > dbReplica) {
mInfo("vgId:%d, replica:%d numOfVnodes:%d, try remove one vnode", pVgroup->vgId, dbReplica, vgReplica);
hasUpdatingVgroup = true;
code = bnRemoveVnode(pVgroup);
} else if (vgReplica < dbReplica) {
mInfo("vgId:%d, replica:%d numOfVnodes:%d, try add one vnode", pVgroup->vgId, dbReplica, vgReplica);
hasUpdatingVgroup = true;
code = bnAddVnode(pVgroup, NULL, NULL);
}
mnodeDecVgroupRef(pVgroup);
if (code == TSDB_CODE_SUCCESS) {
mnodeCancelGetNextVgroup(pIter);
break;
}
}
return hasUpdatingVgroup;
}
static bool bnMonitorDnodeDropping(SDnodeObj *pDnode) {
mDebug("dnode:%d, in dropping state", pDnode->dnodeId);
void * pIter = NULL;
bool hasThisDnode = false;
while (1) {
SVgObj *pVgroup = NULL;
pIter = mnodeGetNextVgroup(pIter, &pVgroup);
if (pVgroup == NULL) break;
hasThisDnode = bnCheckDnodeInVgroup(pDnode, pVgroup);
mnodeDecVgroupRef(pVgroup);
if (hasThisDnode) {
mnodeCancelGetNextVgroup(pIter);
break;
}
}
if (!hasThisDnode) {
mInfo("dnode:%d, dropped for all vnodes are moving to other dnodes", pDnode->dnodeId);
mnodeDropDnode(pDnode, NULL);
return true;
}
return false;
}
static bool bnMontiorDropping() {
void *pIter = NULL;
SDnodeObj *pDnode = NULL;
while (1) {
mnodeDecDnodeRef(pDnode);
pIter = mnodeGetNextDnode(pIter, &pDnode);
if (pDnode == NULL) break;
if (pDnode->status == TAOS_DN_STATUS_OFFLINE) {
if (pDnode->lastAccess + tsOfflineThreshold > tsAccessSquence) continue;
if (dnodeIsMasterEp(pDnode->dnodeEp)) continue;
if (mnodeGetDnodesNum() <= 1) continue;
mLInfo("dnode:%d, set to removing state for it offline:%d seconds", pDnode->dnodeId,
tsAccessSquence - pDnode->lastAccess);
pDnode->status = TAOS_DN_STATUS_DROPPING;
mnodeUpdateDnode(pDnode);
mnodeDecDnodeRef(pDnode);
mnodeCancelGetNextDnode(pIter);
return true;
}
if (pDnode->status == TAOS_DN_STATUS_DROPPING) {
bool ret = bnMonitorDnodeDropping(pDnode);
mnodeDecDnodeRef(pDnode);
mnodeCancelGetNextDnode(pIter);
return ret;
}
}
return false;
}
bool bnStart() {
if (!sdbIsMaster()) return false;
bnLock();
bnAccquireDnodes();
bnMonitorDnodeModule();
bool updateSoon = bnMontiorDropping();
if (!updateSoon) {
updateSoon = bnMonitorVgroups();
}
if (!updateSoon) {
updateSoon = bnMonitorBalance();
}
bnReleaseDnodes();
bnUnLock();
return updateSoon;
}
static void bnSetVgroupOffline(SDnodeObj* pDnode) {
void *pIter = NULL;
while (1) {
SVgObj *pVgroup;
pIter = mnodeGetNextVgroup(pIter, &pVgroup);
if (pVgroup == NULL) break;
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
if (pVgroup->vnodeGid[i].pDnode == pDnode) {
pVgroup->vnodeGid[i].role = TAOS_SYNC_ROLE_OFFLINE;
}
}
mnodeDecVgroupRef(pVgroup);
}
}
void bnCheckStatus() {
void * pIter = NULL;
SDnodeObj *pDnode = NULL;
while (1) {
pIter = mnodeGetNextDnode(pIter, &pDnode);
if (pDnode == NULL) break;
if (tsAccessSquence - pDnode->lastAccess > 3) {
if (pDnode->status != TAOS_DN_STATUS_DROPPING && pDnode->status != TAOS_DN_STATUS_OFFLINE) {
pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->offlineReason = TAOS_DN_OFF_STATUS_MSG_TIMEOUT;
mInfo("dnode:%d, set to offline state, access seq:%d last seq:%d laststat:%d", pDnode->dnodeId, tsAccessSquence,
pDnode->lastAccess, pDnode->status);
bnSetVgroupOffline(pDnode);
}
}
mnodeDecDnodeRef(pDnode);
}
}
void bnCheckModules() {
if (sdbIsMaster()) {
bnLock();
bnAccquireDnodes();
bnMonitorDnodeModule();
bnReleaseDnodes();
bnUnLock();
}
}
int32_t bnInit() {
pthread_mutex_init(&tsBnMgmt.mutex, NULL);
bnInitDnodes();
bnInitThread();
bnReset();
return 0;
}
void bnCleanUp() {
bnCleanupThread();
bnCleanupDnodes();
pthread_mutex_destroy(&tsBnMgmt.mutex);
}
int32_t bnDropDnode(SDnodeObj *pDnode) {
int32_t totalFreeVnodes = 0;
void * pIter = NULL;
SDnodeObj *pTempDnode = NULL;
while (1) {
pIter = mnodeGetNextDnode(pIter, &pTempDnode);
if (pTempDnode == NULL) break;
if (pTempDnode != pDnode && bnCheckFree(pTempDnode)) {
totalFreeVnodes += (TSDB_MAX_VNODES - pTempDnode->openVnodes);
}
mnodeDecDnodeRef(pTempDnode);
}
if (pDnode->openVnodes > totalFreeVnodes) {
mError("dnode:%d, openVnodes:%d totalFreeVnodes:%d no enough dnodes", pDnode->dnodeId, pDnode->openVnodes, totalFreeVnodes);
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
}
pDnode->status = TAOS_DN_STATUS_DROPPING;
mnodeUpdateDnode(pDnode);
bnStartTimer(1100);
return TSDB_CODE_SUCCESS;
}
static void bnMonitorDnodeModule() {
int32_t numOfMnodes = mnodeGetMnodesNum();
if (numOfMnodes >= tsNumOfMnodes) return;
for (int32_t i = 0; i < tsBnDnodes.size; ++i) {
SDnodeObj *pDnode = tsBnDnodes.list[i];
if (pDnode == NULL) break;
if (pDnode->isMgmt || pDnode->status == TAOS_DN_STATUS_DROPPING || pDnode->status == TAOS_DN_STATUS_OFFLINE) {
continue;
}
if (pDnode->alternativeRole == TAOS_DN_ALTERNATIVE_ROLE_VNODE) {
continue;
}
mLInfo("dnode:%d, numOfMnodes:%d expect:%d, create mnode in this dnode", pDnode->dnodeId, numOfMnodes, tsNumOfMnodes);
mnodeCreateMnode(pDnode->dnodeId, pDnode->dnodeEp, true);
#if 0
// Only create one mnode each time
return;
#else
numOfMnodes = mnodeGetMnodesNum();
if (numOfMnodes >= tsNumOfMnodes) return;
#endif
}
}
int32_t bnAlterDnode(struct SDnodeObj *pSrcDnode, int32_t vnodeId, int32_t dnodeId) {
if (!sdbIsMaster()) {
mError("dnode:%d, failed to alter vgId:%d to dnode:%d, for self not master", pSrcDnode->dnodeId, vnodeId, dnodeId);
return TSDB_CODE_MND_DNODE_NOT_EXIST;
}
if (tsEnableBalance != 0) {
mError("dnode:%d, failed to alter vgId:%d to dnode:%d, for balance enabled", pSrcDnode->dnodeId, vnodeId, dnodeId);
return TSDB_CODE_MND_BALANCE_ENABLED;
}
SVgObj *pVgroup = mnodeGetVgroup(vnodeId);
if (pVgroup == NULL) {
mError("dnode:%d, failed to alter vgId:%d to dnode:%d, for vgroup not exist", pSrcDnode->dnodeId, vnodeId, dnodeId);
return TSDB_CODE_MND_VGROUP_NOT_EXIST;
}
SDnodeObj *pDestDnode = mnodeGetDnode(dnodeId);
if (pDestDnode == NULL) {
mnodeDecVgroupRef(pVgroup);
mError("dnode:%d, failed to alter vgId:%d to dnode:%d, for dnode not exist", pSrcDnode->dnodeId, vnodeId, dnodeId);
return TSDB_CODE_MND_DNODE_NOT_EXIST;
}
bnLock();
bnAccquireDnodes();
int32_t code = TSDB_CODE_SUCCESS;
if (!bnCheckDnodeInVgroup(pSrcDnode, pVgroup)) {
mError("dnode:%d, failed to alter vgId:%d to dnode:%d, vgroup not in dnode:%d", pSrcDnode->dnodeId, vnodeId,
dnodeId, pSrcDnode->dnodeId);
code = TSDB_CODE_MND_VGROUP_NOT_IN_DNODE;
} else if (bnCheckDnodeInVgroup(pDestDnode, pVgroup)) {
mError("dnode:%d, failed to alter vgId:%d to dnode:%d, vgroup already in dnode:%d", pSrcDnode->dnodeId, vnodeId,
dnodeId, dnodeId);
code = TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE;
} else if (!bnCheckFree(pDestDnode)) {
mError("dnode:%d, failed to alter vgId:%d to dnode:%d, for dnode:%d not free", pSrcDnode->dnodeId, vnodeId, dnodeId,
dnodeId);
code = TSDB_CODE_MND_DNODE_NOT_FREE;
} else {
code = bnAddVnode(pVgroup, pSrcDnode, pDestDnode);
mInfo("dnode:%d, alter vgId:%d to dnode:%d, result:%s", pSrcDnode->dnodeId, vnodeId, dnodeId, tstrerror(code));
}
bnReleaseDnodes();
bnUnLock();
mnodeDecVgroupRef(pVgroup);
mnodeDecDnodeRef(pDestDnode);
return code;
}

312
src/balance/src/bnScore.c Normal file
View File

@ -0,0 +1,312 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "tglobal.h"
#include "mnodeShow.h"
#include "mnodeUser.h"
#include "bnScore.h"
SBnDnodes tsBnDnodes;
static int32_t bnGetScoresMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t bnRetrieveScores(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static int32_t bnCalcCpuScore(SDnodeObj *pDnode) {
if (pDnode->cpuAvgUsage < 80)
return 0;
else if (pDnode->cpuAvgUsage < 90)
return 10;
else
return 50;
}
static int32_t bnCalcMemoryScore(SDnodeObj *pDnode) {
if (pDnode->memoryAvgUsage < 80)
return 0;
else if (pDnode->memoryAvgUsage < 90)
return 10;
else
return 50;
}
static int32_t bnCalcDiskScore(SDnodeObj *pDnode) {
if (pDnode->diskAvgUsage < 80)
return 0;
else if (pDnode->diskAvgUsage < 90)
return 10;
else
return 50;
}
static int32_t bnCalcBandScore(SDnodeObj *pDnode) {
if (pDnode->bandwidthUsage < 30)
return 0;
else if (pDnode->bandwidthUsage < 80)
return 10;
else
return 50;
}
static float bnCalcModuleScore(SDnodeObj *pDnode) {
if (pDnode->numOfCores <= 0) return 0;
if (pDnode->isMgmt) {
return (float)tsMnodeEqualVnodeNum / pDnode->numOfCores;
}
return 0;
}
static float bnCalcVnodeScore(SDnodeObj *pDnode, int32_t extra) {
if (pDnode->status == TAOS_DN_STATUS_DROPPING || pDnode->status == TAOS_DN_STATUS_OFFLINE) return 100000000;
if (pDnode->numOfCores <= 0) return 0;
return (float)(pDnode->openVnodes + extra) / pDnode->numOfCores;
}
/**
* calc singe score, such as cpu/memory/disk/bandwitdh/vnode
* 1. get the score config
* 2. if the value is out of range, use border data
* 3. otherwise use interpolation method
**/
static void bnCalcDnodeScore(SDnodeObj *pDnode) {
pDnode->score = bnCalcCpuScore(pDnode) + bnCalcMemoryScore(pDnode) + bnCalcDiskScore(pDnode) +
bnCalcBandScore(pDnode) + bnCalcModuleScore(pDnode) + bnCalcVnodeScore(pDnode, 0) +
pDnode->customScore;
}
float bnTryCalcDnodeScore(SDnodeObj *pDnode, int32_t extra) {
int32_t systemScore = bnCalcCpuScore(pDnode) + bnCalcMemoryScore(pDnode) + bnCalcDiskScore(pDnode) +
bnCalcBandScore(pDnode);
float moduleScore = bnCalcModuleScore(pDnode);
float vnodeScore = bnCalcVnodeScore(pDnode, extra);
float score = systemScore + moduleScore + vnodeScore + pDnode->customScore;
return score;
}
void bnInitDnodes() {
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_SCORES, bnGetScoresMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_SCORES, bnRetrieveScores);
mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_SCORES, mnodeCancelGetNextDnode);
memset(&tsBnDnodes, 0, sizeof(SBnDnodes));
tsBnDnodes.maxSize = 16;
tsBnDnodes.list = calloc(tsBnDnodes.maxSize, sizeof(SDnodeObj *));
}
void bnCleanupDnodes() {
if (tsBnDnodes.list != NULL) {
free(tsBnDnodes.list);
tsBnDnodes.list = NULL;
}
}
static void bnCheckDnodesSize(int32_t dnodesNum) {
if (tsBnDnodes.maxSize <= dnodesNum) {
tsBnDnodes.maxSize = dnodesNum * 2;
tsBnDnodes.list = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *));
}
}
void bnAccquireDnodes() {
int32_t dnodesNum = mnodeGetDnodesNum();
bnCheckDnodesSize(dnodesNum);
void * pIter = NULL;
SDnodeObj *pDnode = NULL;
int32_t dnodeIndex = 0;
while (1) {
if (dnodeIndex >= dnodesNum) {
mnodeCancelGetNextDnode(pIter);
break;
}
pIter = mnodeGetNextDnode(pIter, &pDnode);
if (pDnode == NULL) break;
if (pDnode->status == TAOS_DN_STATUS_OFFLINE) {
mnodeDecDnodeRef(pDnode);
continue;
}
bnCalcDnodeScore(pDnode);
int32_t orderIndex = dnodeIndex;
for (; orderIndex > 0; --orderIndex) {
if (pDnode->score > tsBnDnodes.list[orderIndex - 1]->score) {
break;
}
tsBnDnodes.list[orderIndex] = tsBnDnodes.list[orderIndex - 1];
}
tsBnDnodes.list[orderIndex] = pDnode;
dnodeIndex++;
}
tsBnDnodes.size = dnodeIndex;
}
void bnReleaseDnodes() {
for (int32_t i = 0; i < tsBnDnodes.size; ++i) {
SDnodeObj *pDnode = tsBnDnodes.list[i];
if (pDnode != NULL) {
mnodeDecDnodeRef(pDnode);
}
}
}
static int32_t bnGetScoresMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
SUserObj *pUser = mnodeGetUserFromConn(pConn);
if (pUser == NULL) return 0;
if (strcmp(pUser->pAcct->user, "root") != 0) {
mnodeDecUserRef(pUser);
return TSDB_CODE_MND_NO_RIGHTS;
}
int32_t cols = 0;
SSchema *pSchema = pMeta->schema;
pShow->bytes[cols] = 2;
pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT;
strcpy(pSchema[cols].name, "id");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_FLOAT;
strcpy(pSchema[cols].name, "system scores");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_FLOAT;
strcpy(pSchema[cols].name, "custom scores");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_FLOAT;
strcpy(pSchema[cols].name, "module scores");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_FLOAT;
strcpy(pSchema[cols].name, "vnode scores");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_FLOAT;
strcpy(pSchema[cols].name, "total scores");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_INT;
strcpy(pSchema[cols].name, "open vnodes");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_INT;
strcpy(pSchema[cols].name, "cpu cores");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 18 + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
strcpy(pSchema[cols].name, "balance state");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pMeta->numOfColumns = htons(cols);
pShow->numOfColumns = cols;
pShow->offset[0] = 0;
for (int32_t i = 1; i < cols; ++i) {
pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1];
}
pShow->numOfRows = mnodeGetDnodesNum();
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
pShow->pIter = NULL;
mnodeDecUserRef(pUser);
return 0;
}
static int32_t bnRetrieveScores(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
int32_t numOfRows = 0;
SDnodeObj *pDnode = NULL;
char * pWrite;
int32_t cols = 0;
while (numOfRows < rows) {
pShow->pIter = mnodeGetNextDnode(pShow->pIter, &pDnode);
if (pDnode == NULL) break;
int32_t systemScore = bnCalcCpuScore(pDnode) + bnCalcMemoryScore(pDnode) + bnCalcDiskScore(pDnode) + bnCalcBandScore(pDnode);
float moduleScore = bnCalcModuleScore(pDnode);
float vnodeScore = bnCalcVnodeScore(pDnode, 0);
cols = 0;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int16_t *)pWrite = pDnode->dnodeId;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = systemScore;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = pDnode->customScore;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = (int32_t)moduleScore;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = (int32_t)vnodeScore;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = (int32_t)(vnodeScore + moduleScore + pDnode->customScore + systemScore);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = pDnode->openVnodes;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *)pWrite = pDnode->numOfCores;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_TO_VARSTR(pWrite, mnodeGetDnodeStatusStr(pDnode->status));
cols++;
numOfRows++;
mnodeDecDnodeRef(pDnode);
}
mnodeVacuumResult(data, pShow->numOfColumns, numOfRows, rows, pShow);
pShow->numOfReads += numOfRows;
return numOfRows;
}

132
src/balance/src/bnThread.c Normal file
View File

@ -0,0 +1,132 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "ttimer.h"
#include "tglobal.h"
#include "mnodeSdb.h"
#include "bnThread.h"
static SBnThread tsBnThread;
static void *bnThreadFunc(void *arg) {
while (1) {
pthread_mutex_lock(&tsBnThread.mutex);
if (tsBnThread.stop) {
pthread_mutex_unlock(&tsBnThread.mutex);
break;
}
pthread_cond_wait(&tsBnThread.cond, &tsBnThread.mutex);
bool updateSoon = bnStart();
bnStartTimer(updateSoon ? 1000 : -1);
pthread_mutex_unlock(&(tsBnThread.mutex));
}
mDebug("balance thread is stopped");
return NULL;
}
int32_t bnInitThread() {
memset(&tsBnThread, 0, sizeof(SBnThread));
tsBnThread.stop = false;
pthread_mutex_init(&tsBnThread.mutex, NULL);
pthread_cond_init(&tsBnThread.cond, NULL);
pthread_attr_t thattr;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
int32_t ret = pthread_create(&tsBnThread.thread, &thattr, bnThreadFunc, NULL);
pthread_attr_destroy(&thattr);
if (ret != 0) {
mError("failed to create balance thread since %s", strerror(errno));
return -1;
}
bnStartTimer(2000);
mDebug("balance thread is created");
return 0;
}
void bnCleanupThread() {
mDebug("balance thread will be cleanup");
if (tsBnThread.timer != NULL) {
taosTmrStopA(&tsBnThread.timer);
tsBnThread.timer = NULL;
mDebug("stop balance timer");
}
pthread_mutex_lock(&tsBnThread.mutex);
tsBnThread.stop = true;
pthread_cond_signal(&tsBnThread.cond);
pthread_mutex_unlock(&(tsBnThread.mutex));
pthread_join(tsBnThread.thread, NULL);
pthread_cond_destroy(&tsBnThread.cond);
pthread_mutex_destroy(&tsBnThread.mutex);
}
static void bnPostSignal() {
if (tsBnThread.stop) return;
pthread_mutex_lock(&tsBnThread.mutex);
pthread_cond_signal(&tsBnThread.cond);
pthread_mutex_unlock(&(tsBnThread.mutex));
}
/*
* once sdb work as mater, then tsAccessSquence reset to zero
* increase tsAccessSquence every balance interval
*/
static void bnProcessTimer(void *handle, void *tmrId) {
if (!sdbIsMaster()) return;
if (tsBnThread.stop) return;
tsBnThread.timer = NULL;
tsAccessSquence++;
bnCheckStatus();
bnStartTimer(-1);
if (handle == NULL) {
if (tsAccessSquence % tsBalanceInterval == 0) {
mDebug("balance function is scheduled by timer");
bnPostSignal();
}
} else {
int64_t mseconds = (int64_t)handle;
mDebug("balance function is scheduled by event for %" PRId64 " mseconds arrived", mseconds);
bnPostSignal();
}
}
void bnStartTimer(int64_t mseconds) {
if (tsBnThread.stop) return;
bool updateSoon = (mseconds != -1);
if (updateSoon) {
taosTmrReset(bnProcessTimer, mseconds, (void *)mseconds, tsMnodeTmr, &tsBnThread.timer);
} else {
taosTmrReset(bnProcessTimer, tsStatusInterval * 1000, NULL, tsMnodeTmr, &tsBnThread.timer);
}
}
void bnNotify() {
bnStartTimer(500);
}

View File

@ -56,17 +56,16 @@ typedef struct SLocalReducer {
tFilePage * pTempBuffer;
struct SQLFunctionCtx *pCtx;
int32_t rowSize; // size of each intermediate result.
int32_t finalRowSize; // final result row size
int32_t status; // denote it is in reduce process, in reduce process, it
bool hasPrevRow; // cannot be released
bool hasUnprocessedRow;
tOrderDescriptor * pDesc;
SColumnModel * resColModel;
SColumnModel* finalModel;
tExtMemBuffer ** pExtMemBuffer; // disk-based buffer
SFillInfo* pFillInfo; // interpolation support structure
char * pFinalRes; // result data after interpo
tFilePage * discardData;
SResultInfo * pResInfo;
char* pFinalRes; // result data after interpo
tFilePage* discardData;
bool discard;
int32_t offset; // limit offset value
bool orderPrjOnSTable; // projection query on stable
@ -75,7 +74,8 @@ typedef struct SLocalReducer {
typedef struct SRetrieveSupport {
tExtMemBuffer ** pExtMemBuffer; // for build loser tree
tOrderDescriptor *pOrderDescriptor;
SColumnModel * pFinalColModel; // colModel for final result
SColumnModel* pFinalColModel; // colModel for final result
SColumnModel* pFFColModel;
int32_t subqueryIndex; // index of current vnode in vnode list
SSqlObj * pParentSql;
tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
@ -83,9 +83,9 @@ typedef struct SRetrieveSupport {
} SRetrieveSupport;
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pDesc,
SColumnModel **pFinalModel, uint32_t nBufferSize);
SColumnModel **pFinalModel, SColumnModel** pFFModel, uint32_t nBufferSize);
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel,
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, SColumnModel* pFFModel,
int32_t numOfVnodes);
int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data,
@ -97,7 +97,7 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
* create local reducer to launch the second-stage reduce process at client site
*/
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalModel, SSqlObj* pSql);
SColumnModel *finalModel, SColumnModel *pFFModel, SSqlObj* pSql);
void tscDestroyLocalReducer(SSqlObj *pSql);

View File

@ -23,7 +23,7 @@ extern "C" {
#include "tscUtil.h"
#include "tsclient.h"
void tscFetchDatablockFromSubquery(SSqlObj* pSql);
void tscFetchDatablockForSubquery(SSqlObj* pSql);
void tscSetupOutputColumnIndex(SSqlObj* pSql);
void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code);
@ -39,7 +39,9 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql);
int32_t tscHandleInsertRetry(SSqlObj* pSql);
void tscBuildResFromSubqueries(SSqlObj *pSql);
void **doSetResultRowData(SSqlObj *pSql, bool finalResult);
TAOS_ROW doSetResultRowData(SSqlObj *pSql);
char *getArithmeticInputSrc(void *param, const char *name, int32_t colId);
#ifdef __cplusplus
}

View File

@ -75,6 +75,7 @@ typedef struct SJoinSupporter {
SArray* exprList;
SFieldInfo fieldsInfo;
STagCond tagCond;
SSqlGroupbyExpr groupInfo; // group by info
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
FILE* f; // temporary file in order to create TSBuf
char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory
@ -82,15 +83,16 @@ typedef struct SJoinSupporter {
char* pIdTagList; // result of first stage tags
int32_t totalLen;
int32_t num;
SArray* pVgroupTables;
} SJoinSupporter;
typedef struct SVgroupTableInfo {
SCMVgroupInfo vgInfo;
SArray* itemList; //SArray<STableIdInfo>
SVgroupInfo vgInfo;
SArray* itemList; //SArray<STableIdInfo>
} SVgroupTableInfo;
static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex) {
assert(pCmd != NULL && subClauseIndex >= 0 && subClauseIndex < TSDB_MAX_UNION_CLAUSE);
assert(pCmd != NULL && subClauseIndex >= 0);
if (pCmd->pQueryInfo == NULL || subClauseIndex >= pCmd->numOfClause) {
return NULL;
@ -123,6 +125,7 @@ int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t
*/
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
@ -149,15 +152,14 @@ int tscAllocPayload(SSqlCmd* pCmd, int size);
TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes);
SFieldSupInfo* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField);
SFieldSupInfo* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field);
SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField);
SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIELD* field);
SFieldSupInfo* tscFieldInfoGetSupp(SFieldInfo* pFieldInfo, int32_t index);
SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t index);
TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index);
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
void tscFieldInfoCopy(SFieldInfo* dst, const SFieldInfo* src);
void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo);
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index);
void tscFieldInfoClear(SFieldInfo* pFieldInfo);
@ -166,15 +168,15 @@ static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQue
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
int32_t tscGetResRowLength(SArray* pExprList);
SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t interSize, bool isTagCol);
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol);
SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t interSize, bool isTagCol);
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol);
SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
int16_t size);
@ -216,7 +218,7 @@ SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex);
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache);
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols);
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo);
int32_t tscAddSubqueryInfo(SSqlCmd *pCmd);
@ -225,16 +227,20 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo);
void tscClearSubqueryInfo(SSqlCmd* pCmd);
void tscFreeVgroupTableInfo(SArray* pVgroupTables);
SArray* tscVgroupTableInfoClone(SArray* pVgroupTables);
void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index);
void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo);
int tscGetSTableVgroupInfo(SSqlObj* pSql, int32_t clauseIndex);
int tscGetTableMeta(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo);
int tscGetMeterMetaEx(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, bool createIfNotExists);
void tscResetForNextRetrieve(SSqlRes* pRes);
void tscAddTimestampColumn(SQueryInfo* pQueryInfo, int16_t functionId, int16_t tableIndex);
void tscDoQuery(SSqlObj* pSql);
SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo);
void* tscVgroupInfoClear(SVgroupsInfo *pInfo);
void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src);
/**
* The create object function must be successful expect for the out of memory issue.
*
@ -262,6 +268,7 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t sub
void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex);
int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid);
int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId);
void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex);
@ -271,10 +278,11 @@ bool hasMoreClauseToTry(SSqlObj* pSql);
void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp);
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp);
int tscSetMgmtEpSetFromCfg(const char *first, const char *second);
int tscSetMgmtEpSetFromCfg(const char *first, const char *second, SRpcCorEpSet *corEpSet);
bool tscSetSqlOwner(SSqlObj* pSql);
void tscClearSqlOwner(SSqlObj* pSql);
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize);
void* malloc_throw(size_t size);
void* calloc_throw(size_t nmemb, size_t size);

View File

@ -77,7 +77,7 @@ SSchema *tscGetTableColumnSchema(const STableMeta *pMeta, int32_t colIndex);
* @param colId
* @return
*/
SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId);
/**
* check if the schema is valid or not, including following aspects:
@ -107,9 +107,6 @@ SSchema tscGetTbnameColumnSchema();
*/
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size);
//todo tags value as well as the table id structure needs refactor
char *tsGetTagsValue(STableMeta *pMeta);
#ifdef __cplusplus
}
#endif

View File

@ -30,6 +30,7 @@ extern "C" {
#include "tsqlfunction.h"
#include "tutil.h"
#include "tcache.h"
#include "tref.h"
#include "qExecutor.h"
#include "qSqlparser.h"
@ -89,12 +90,12 @@ typedef struct STableComInfo {
int32_t rowSize;
} STableComInfo;
typedef struct SCMCorVgroupInfo {
int32_t version;
int8_t inUse;
int8_t numOfEps;
SEpAddr epAddr[TSDB_MAX_REPLICA];
} SCMCorVgroupInfo;
typedef struct SCorVgroupInfo {
int32_t version;
int8_t inUse;
int8_t numOfEps;
SEpAddr1 epAddr[TSDB_MAX_REPLICA];
} SCorVgroupInfo;
typedef struct STableMeta {
STableComInfo tableInfo;
@ -102,8 +103,8 @@ typedef struct STableMeta {
int16_t sversion;
int16_t tversion;
char sTableId[TSDB_TABLE_FNAME_LEN];
SCMVgroupInfo vgroupInfo;
SCMCorVgroupInfo corVgroupInfo;
SVgroupInfo vgroupInfo;
SCorVgroupInfo corVgroupInfo;
STableId id;
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
} STableMeta;
@ -127,7 +128,7 @@ typedef struct STableMetaInfo {
typedef struct SSqlExpr {
char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
SColIndex colInfo;
int64_t uid; // refactor use the pointer
uint64_t uid; // refactor use the pointer
int16_t functionId; // function id in aAgg array
int16_t resType; // return value type
int16_t resBytes; // length of return value
@ -135,6 +136,7 @@ typedef struct SSqlExpr {
int16_t numOfParams; // argument value of each function
tVariant param[3]; // parameters are not more than 3
int32_t offset; // sub result column value of arithmetic expression.
int16_t resColId; // result column id
} SSqlExpr;
typedef struct SColumnIndex {
@ -142,16 +144,17 @@ typedef struct SColumnIndex {
int16_t columnIndex;
} SColumnIndex;
typedef struct SFieldSupInfo {
typedef struct SInternalField {
TAOS_FIELD field;
bool visible;
SExprInfo *pArithExprInfo;
SSqlExpr *pSqlExpr;
} SFieldSupInfo;
} SInternalField;
typedef struct SFieldInfo {
int16_t numOfOutput; // number of column in result
SArray *pFields; // SArray<TAOS_FIELD>
SArray *pSupportInfo; // SArray<SFieldSupInfo>
int16_t numOfOutput; // number of column in result
TAOS_FIELD* final;
SArray *internalField; // SArray<SInternalField>
} SFieldInfo;
typedef struct SColumn {
@ -243,12 +246,16 @@ typedef struct SQueryInfo {
int16_t fillType; // final result fill type
int16_t numOfTables;
STableMetaInfo **pTableMetaInfo;
struct STSBuf * tsBuf;
struct STSBuf *tsBuf;
int64_t * fillVal; // default value for fill
char * msg; // pointer to the pCmd->payload to keep error message temporarily
int64_t clauseLimit; // limit for current sub clause
int64_t prjOffset; // offset value in the original sql expression, only applied at client side
int64_t tableLimit; // table limit in case of super table projection query + global order + limit
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
int16_t resColumnId; // result column id
} SQueryInfo;
typedef struct {
@ -278,7 +285,7 @@ typedef struct {
int8_t dataSourceType; // load data from file or not
int8_t submitSchema; // submit block is built with table schema
STagData tagData;
STagData *pTagData; // NOTE: pTagData->data is used as a variant length array
SHashObj *pTableList; // referred table involved in sql
SArray *pDataBlocks; // SArray<STableDataBlocks*> submit data blocks after parsing sql
} SSqlCmd;
@ -289,32 +296,32 @@ typedef struct SResRec {
} SResRec;
typedef struct {
int64_t numOfRows; // num of results in current retrieved
int64_t numOfRowsGroup; // num of results of current group
int64_t numOfTotal; // num of total results
int64_t numOfClauseTotal; // num of total result in current subclause
char * pRsp;
int32_t rspType;
int32_t rspLen;
uint64_t qhandle;
int64_t uid;
int64_t useconds;
int64_t offset; // offset value from vnode during projection query of stable
int32_t row;
int16_t numOfCols;
int16_t precision;
bool completed;
int32_t code;
int32_t numOfGroups;
SResRec * pGroupRec;
char * data;
void ** tsrow;
int32_t* length; // length for each field for current row
char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t)
SColumnIndex * pColumnIndex;
SArithmeticSupport* pArithSup; // support the arithmetic expression calculation on agg functions
int32_t numOfRows; // num of results in current retrieval
int64_t numOfRowsGroup; // num of results of current group
int64_t numOfTotal; // num of total results
int64_t numOfClauseTotal; // num of total result in current subclause
char * pRsp;
int32_t rspType;
int32_t rspLen;
uint64_t qhandle;
int64_t useconds;
int64_t offset; // offset value from vnode during projection query of stable
int32_t row;
int16_t numOfCols;
int16_t precision;
bool completed;
int32_t code;
int32_t numOfGroups;
SResRec * pGroupRec;
char * data;
TAOS_ROW tsrow;
TAOS_ROW urow;
int32_t* length; // length for each field for current row
char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t)
SColumnIndex* pColumnIndex;
struct SLocalReducer *pLocalReducer;
SArithmeticSupport* pArithSup; // support the arithmetic expression calculation on agg functions
struct SLocalReducer* pLocalReducer;
} SSqlRes;
typedef struct STscObj {
@ -328,25 +335,27 @@ typedef struct STscObj {
char writeAuth : 1;
char superAuth : 1;
uint32_t connId;
struct SSqlObj * pHb;
uint64_t rid; // ref ID returned by taosAddRef
int64_t hbrid;
struct SSqlObj * sqlList;
struct SSqlStream *streamList;
SRpcCorEpSet *tscCorMgmtEpSet;
void* pDnodeConn;
pthread_mutex_t mutex;
T_REF_DECLARE()
} STscObj;
typedef struct SSubqueryState {
int32_t numOfRemain; // the number of remain unfinished subquery
int32_t numOfSub; // the number of total sub-queries
uint64_t numOfRetrievedRows; // total number of points in this query
int32_t numOfRemain; // the number of remain unfinished subquery
int32_t numOfSub; // the number of total sub-queries
uint64_t numOfRetrievedRows; // total number of points in this query
} SSubqueryState;
typedef struct SSqlObj {
void *signature;
pthread_t owner; // owner of sql object, by which it is executed
STscObj *pTscObj;
void *pRpcCtx;
int64_t rpcRid;
void (*fp)();
void (*fetchFp)();
void *param;
@ -368,7 +377,7 @@ typedef struct SSqlObj {
struct SSqlObj **pSubs;
struct SSqlObj *prev, *next;
struct SSqlObj **self;
int64_t self;
} SSqlObj;
typedef struct SSqlStream {
@ -420,6 +429,7 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache);
@ -429,28 +439,20 @@ void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache);
*/
void tscFreeSqlResult(SSqlObj *pSql);
/**
* only free part of resources allocated during query.
* TODO remove it later
* Note: this function is multi-thread safe.
* @param pObj
*/
void tscPartiallyFreeSqlObj(SSqlObj *pSql);
/**
* free sql object, release allocated resource
* @param pObj
*/
void tscFreeSqlObj(SSqlObj *pSql);
void tscFreeRegisteredSqlObj(void *pSql);
void tscFreeTableMetaHelper(void *pTableMeta);
void tscCloseTscObj(STscObj *pObj);
void tscCloseTscObj(void *pObj);
// todo move to taos? or create a new file: taos_internal.h
TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
void *param, void **taos);
void *param, TAOS **taos);
TAOS_RES* taos_query_h(TAOS* taos, const char *sqlstr, TAOS_RES** res);
void waitForQueryRsp(void *param, TAOS_RES *tres, int code);
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen);
@ -467,25 +469,25 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo);
static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) {
SFieldSupInfo* pInfo = (SFieldSupInfo*) TARRAY_GET_ELEM(pFieldInfo->pSupportInfo, columnIndex);
assert(pInfo->pSqlExpr != NULL);
static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex, int32_t offset) {
SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pFieldInfo->internalField, columnIndex);
int32_t type = pInfo->pSqlExpr->resType;
int32_t bytes = pInfo->pSqlExpr->resBytes;
int32_t type = pInfo->field.type;
int32_t bytes = pInfo->field.bytes;
char* pData = pRes->data + (int32_t)(pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row);
char* pData = pRes->data + (int32_t)(offset * pRes->numOfRows + bytes * pRes->row);
UNUSED(pData);
// user defined constant value output columns
if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
// user defined constant value output columns
if (pInfo->pSqlExpr != NULL && TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
pData = pInfo->pSqlExpr->param[1].pz;
pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
pRes->tsrow[columnIndex] = (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) ? NULL : pData;
pRes->tsrow[columnIndex] = (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) ? NULL : (unsigned char*)pData;
} else {
assert(bytes == tDataTypeDesc[type].nSize);
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : &pInfo->pSqlExpr->param[1].i64Key;
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)&pInfo->pSqlExpr->param[1].i64Key;
pRes->length[columnIndex] = bytes;
}
} else {
@ -493,7 +495,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
int32_t realLen = varDataLen(pData);
assert(realLen <= bytes - VARSTR_HEADER_SIZE);
pRes->tsrow[columnIndex] = (isNull(pData, type)) ? NULL : ((tstr *)pData)->data;
pRes->tsrow[columnIndex] = (isNull(pData, type)) ? NULL : (unsigned char*)((tstr *)pData)->data;
if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor
*(pData + realLen + VARSTR_HEADER_SIZE) = 0;
}
@ -502,26 +504,26 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
} else {
assert(bytes == tDataTypeDesc[type].nSize);
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : pData;
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)pData;
pRes->length[columnIndex] = bytes;
}
}
}
extern SCacheObj* tscMetaCache;
extern SCacheObj* tscObjCache;
extern int tscObjRef;
extern void * tscTmr;
extern void * tscQhandle;
extern int tscKeepConn[];
extern int tsInsertHeadSize;
extern int tscNumOfThreads;
extern int tscRefId;
extern SRpcCorEpSet tscMgmtEpSet;
extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);
int32_t tscCompareTidTags(const void* p1, const void* p2);
void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables);
int16_t getNewResColId(SQueryInfo* pQueryInfo);
#ifdef __cplusplus
}

View File

@ -129,6 +129,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaData
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp
(JNIEnv *, jobject, jlong, jlong, jobject);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: fetchBlockImp
* Signature: (JJLcom/taosdata/jdbc/TSDBResultSetBlockData;)I
*/
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp
(JNIEnv *, jobject, jlong, jlong, jobject);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
* Method: closeConnectionImp

View File

@ -17,7 +17,6 @@
#include "taos.h"
#include "tlog.h"
#include "tscUtil.h"
#include "tsclient.h"
#include "com_taosdata_jdbc_TSDBJNIConnector.h"
@ -57,6 +56,10 @@ jmethodID g_rowdataSetStringFp;
jmethodID g_rowdataSetTimestampFp;
jmethodID g_rowdataSetByteArrayFp;
jmethodID g_blockdataSetByteArrayFp;
jmethodID g_blockdataSetNumOfRowsFp;
jmethodID g_blockdataSetNumOfColsFp;
#define JNI_SUCCESS 0
#define JNI_TDENGINE_ERROR -1
#define JNI_CONNECTION_NULL -2
@ -66,7 +69,7 @@ jmethodID g_rowdataSetByteArrayFp;
#define JNI_FETCH_END -6
#define JNI_OUT_OF_MEMORY -7
void jniGetGlobalMethod(JNIEnv *env) {
static void jniGetGlobalMethod(JNIEnv *env) {
// make sure init function executed once
switch (atomic_val_compare_exchange_32(&__init, 0, 1)) {
case 0:
@ -114,10 +117,31 @@ void jniGetGlobalMethod(JNIEnv *env) {
g_rowdataSetByteArrayFp = (*env)->GetMethodID(env, g_rowdataClass, "setByteArray", "(I[B)V");
(*env)->DeleteLocalRef(env, rowdataClass);
jclass blockdataClass = (*env)->FindClass(env, "com/taosdata/jdbc/TSDBResultSetBlockData");
jclass g_blockdataClass = (*env)->NewGlobalRef(env, blockdataClass);
g_blockdataSetByteArrayFp = (*env)->GetMethodID(env, g_blockdataClass, "setByteArray", "(II[B)V");
g_blockdataSetNumOfRowsFp = (*env)->GetMethodID(env, g_blockdataClass, "setNumOfRows", "(I)V");
g_blockdataSetNumOfColsFp = (*env)->GetMethodID(env, g_blockdataClass, "setNumOfCols", "(I)V");
(*env)->DeleteLocalRef(env, blockdataClass);
atomic_store_32(&__init, 2);
jniDebug("native method register finished");
}
static int32_t check_for_params(jobject jobj, jlong conn, jlong res) {
if ((TAOS*) conn == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
}
if ((TAOS_RES *) res == NULL) {
jniError("jobj:%p, conn:%p, res is null", jobj, (TAOS*) conn);
return JNI_RESULT_SET_NULL;
}
return JNI_SUCCESS;
}
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setAllocModeImp(JNIEnv *env, jobject jobj, jint jMode,
jstring jPath, jboolean jAutoDump) {
if (jPath != NULL) {
@ -192,39 +216,37 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEnv *env, jobject jobj, jstring jhost,
jint jport, jstring jdbName, jstring juser,
jstring jpass) {
jlong ret = 0;
jlong ret = 0;
const char *host = NULL;
const char *dbname = NULL;
const char *user = NULL;
const char *pass = NULL;
const char *dbname = NULL;
if (jhost != NULL) {
host = (*env)->GetStringUTFChars(env, jhost, NULL);
}
if (jdbName != NULL) {
dbname = (*env)->GetStringUTFChars(env, jdbName, NULL);
}
if (juser != NULL) {
user = (*env)->GetStringUTFChars(env, juser, NULL);
}
if (jpass != NULL) {
pass = (*env)->GetStringUTFChars(env, jpass, NULL);
}
if (user == NULL) {
jniDebug("jobj:%p, user is null, use default user %s", jobj, TSDB_DEFAULT_USER);
jniDebug("jobj:%p, user not specified, use default user %s", jobj, TSDB_DEFAULT_USER);
}
if (pass == NULL) {
jniDebug("jobj:%p, pass is null, use default password", jobj);
jniDebug("jobj:%p, pass not specified, use default password", jobj);
}
/*
* set numOfThreadsPerCore = 0
* means only one thread for client side scheduler
*/
tsNumOfThreadsPerCore = 0.0;
ret = (jlong)taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport);
ret = (jlong) taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport);
if (ret == 0) {
jniError("jobj:%p, conn:%p, connect to database failed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret,
(char *)host, (char *)user, (char *)dbname, (int32_t)jport);
@ -233,10 +255,21 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEn
(char *)host, (char *)user, (char *)dbname, (int32_t)jport);
}
if (host != NULL) (*env)->ReleaseStringUTFChars(env, jhost, host);
if (dbname != NULL) (*env)->ReleaseStringUTFChars(env, jdbName, dbname);
if (user != NULL) (*env)->ReleaseStringUTFChars(env, juser, user);
if (pass != NULL) (*env)->ReleaseStringUTFChars(env, jpass, pass);
if (host != NULL) {
(*env)->ReleaseStringUTFChars(env, jhost, host);
}
if (dbname != NULL) {
(*env)->ReleaseStringUTFChars(env, jdbName, dbname);
}
if (user != NULL) {
(*env)->ReleaseStringUTFChars(env, juser, user);
}
if (pass != NULL) {
(*env)->ReleaseStringUTFChars(env, jpass, pass);
}
return ret;
}
@ -245,64 +278,53 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(
jbyteArray jsql, jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is already closed", jobj);
jniError("jobj:%p, connection already closed", jobj);
return JNI_CONNECTION_NULL;
}
if (jsql == NULL) {
jniError("jobj:%p, conn:%p, sql is null", jobj, tscon);
jniError("jobj:%p, conn:%p, empty sql string", jobj, tscon);
return JNI_SQL_NULL;
}
jsize len = (*env)->GetArrayLength(env, jsql);
char *dst = (char *)calloc(1, sizeof(char) * (len + 1));
if (dst == NULL) {
jniError("jobj:%p, conn:%p, can not alloc memory", jobj, tscon);
char *str = (char *) calloc(1, sizeof(char) * (len + 1));
if (str == NULL) {
jniError("jobj:%p, conn:%p, alloc memory failed", jobj, tscon);
return JNI_OUT_OF_MEMORY;
}
(*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)dst);
(*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)str);
if ((*env)->ExceptionCheck(env)) {
// todo handle error
}
jniDebug("jobj:%p, conn:%p, sql:%s", jobj, tscon, dst);
SSqlObj *pSql = taos_query(tscon, dst);
SSqlObj *pSql = taos_query(tscon, str);
int32_t code = taos_errno(pSql);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s, msg:%s", jobj, tscon, tstrerror(code), taos_errstr(pSql));
} else {
int32_t affectRows = 0;
if (pSql->cmd.command == TSDB_SQL_INSERT) {
affectRows = taos_affected_rows(pSql);
int32_t affectRows = taos_affected_rows(pSql);
jniDebug("jobj:%p, conn:%p, code:%s, affect rows:%d", jobj, tscon, tstrerror(code), affectRows);
} else {
jniDebug("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
}
}
free(dst);
return (jlong)pSql;
free(str);
return (jlong) pSql;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp(JNIEnv *env, jobject jobj, jlong con, jlong tres) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return (jint)TSDB_CODE_TSC_INVALID_CONNECTION;
int32_t code = check_for_params(jobj, con, tres);
if (code != JNI_SUCCESS) {
return code;
}
if ((void *)tres == NULL) {
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
return JNI_RESULT_SET_NULL;
}
TAOS_RES *pSql = (TAOS_RES *)tres;
return (jint)taos_errno(pSql);
return (jint)taos_errno((TAOS_RES*) tres);
}
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(JNIEnv *env, jobject jobj, jlong tres) {
@ -313,23 +335,16 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(J
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(JNIEnv *env, jobject jobj, jlong con,
jlong tres) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
}
if ((void *)tres == NULL) {
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
return JNI_RESULT_SET_NULL;
int32_t code = check_for_params(jobj, con, tres);
if (code != JNI_SUCCESS) {
return code;
}
SSqlObj *pSql = (TAOS_RES *)tres;
STscObj *pObj = pSql->pTscObj;
if (tscIsUpdateQuery(pSql)) {
jniDebug("jobj:%p, conn:%p, update query, no resultset, %p", jobj, pObj, (void *)tres);
jniDebug("jobj:%p, conn:%p, update query, no resultset, %p", jobj, tscon, (void *)tres);
} else {
jniDebug("jobj:%p, conn:%p, get resultset, %p", jobj, pObj, (void *)tres);
jniDebug("jobj:%p, conn:%p, get resultset, %p", jobj, tscon, (void *)tres);
}
return tres;
@ -337,15 +352,9 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp(JNIEnv *env, jobject jobj, jlong con,
jlong tres) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
}
if ((void *)tres == NULL) {
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
return JNI_RESULT_SET_NULL;
int32_t code = check_for_params(jobj, con, tres);
if (code != JNI_SUCCESS) {
return code;
}
SSqlObj *pSql = (TAOS_RES *)tres;
@ -355,37 +364,27 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_isUpdateQueryImp(
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp(JNIEnv *env, jobject jobj, jlong con,
jlong res) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
}
if ((void *)res == NULL) {
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
return JNI_RESULT_SET_NULL;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
taos_free_result((void *)res);
jniDebug("jobj:%p, conn:%p, free resultset:%p", jobj, tscon, (void *)res);
jniDebug("jobj:%p, conn:%p, free resultset:%p", jobj, (TAOS*) con, (void *)res);
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsImp(JNIEnv *env, jobject jobj, jlong con,
jlong res) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
}
if ((void *)res == NULL) {
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
return JNI_RESULT_SET_NULL;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
jint ret = taos_affected_rows((SSqlObj *)res);
jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (void *)con, (void *)res, (int32_t)ret);
jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (TAOS *)con, (TAOS_RES *)res, (int32_t)ret);
return ret;
}
@ -394,27 +393,20 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaData
jlong con, jlong res,
jobject arrayListObj) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
TAOS_RES *result = (TAOS_RES *)res;
if (result == NULL) {
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
return JNI_RESULT_SET_NULL;
}
TAOS_FIELD *fields = taos_fetch_fields(result);
int num_fields = taos_num_fields(result);
// jobject arrayListObj = (*env)->NewObject(env, g_arrayListClass, g_arrayListConstructFp, "");
TAOS_RES* tres = (TAOS_RES*) res;
TAOS_FIELD *fields = taos_fetch_fields(tres);
int32_t num_fields = taos_num_fields(tres);
if (num_fields == 0) {
jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void *)res, num_fields);
jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, tres, num_fields);
return JNI_NUM_OF_FIELDS_0;
} else {
jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void *)res, num_fields);
jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, tres, num_fields);
for (int i = 0; i < num_fields; ++i) {
jobject metadataObj = (*env)->NewObject(env, g_metadataClass, g_metadataConstructFp);
(*env)->SetIntField(env, metadataObj, g_metadataColtypeField, fields[i].type);
@ -457,21 +449,21 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
}
TAOS_FIELD *fields = taos_fetch_fields(result);
int num_fields = taos_num_fields(result);
if (num_fields == 0) {
jniError("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, (void*)res, num_fields);
int32_t numOfFields = taos_num_fields(result);
if (numOfFields == 0) {
jniError("jobj:%p, conn:%p, resultset:%p, fields size %d", jobj, tscon, (void*)res, numOfFields);
return JNI_NUM_OF_FIELDS_0;
}
TAOS_ROW row = taos_fetch_row(result);
if (row == NULL) {
int tserrno = taos_errno(result);
if (tserrno == 0) {
jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void*)res, num_fields);
int code = taos_errno(result);
if (code == TSDB_CODE_SUCCESS) {
jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, (void*)res, numOfFields);
return JNI_FETCH_END;
} else {
jniDebug("jobj:%p, conn:%p, interruptted query", jobj, tscon);
jniDebug("jobj:%p, conn:%p, interrupted query", jobj, tscon);
return JNI_RESULT_SET_NULL;
}
}
@ -480,7 +472,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
char tmp[TSDB_MAX_BYTES_PER_ROW] = {0};
for (int i = 0; i < num_fields; i++) {
for (int i = 0; i < numOfFields; i++) {
if (row[i] == NULL) {
continue;
}
@ -534,6 +526,45 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNIEnv *env, jobject jobj, jlong con,
jlong res, jobject rowobj) {
TAOS * tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
TAOS_RES * tres = (TAOS_RES *)res;
TAOS_FIELD *fields = taos_fetch_fields(tres);
int32_t numOfFields = taos_num_fields(tres);
assert(numOfFields > 0);
TAOS_ROW row = NULL;
int32_t numOfRows = taos_fetch_block(tres, &row);
if (numOfRows == 0) {
code = taos_errno(tres);
if (code == JNI_SUCCESS) {
jniDebug("jobj:%p, conn:%p, resultset:%p, numOfFields:%d, no data to retrieve", jobj, tscon, (void *)res,
numOfFields);
return JNI_FETCH_END;
} else {
jniDebug("jobj:%p, conn:%p, query interrupted", jobj, tscon);
return JNI_RESULT_SET_NULL;
}
}
(*env)->CallVoidMethod(env, rowobj, g_blockdataSetNumOfRowsFp, (jint)numOfRows);
(*env)->CallVoidMethod(env, rowobj, g_blockdataSetNumOfColsFp, (jint)numOfFields);
for (int i = 0; i < numOfFields; i++) {
(*env)->CallVoidMethod(env, rowobj, g_blockdataSetByteArrayFp, i, fields[i].bytes * numOfRows,
jniFromNCharToByteArray(env, (char *)row[i], fields[i].bytes * numOfRows));
}
return JNI_SUCCESS;
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionImp(JNIEnv *env, jobject jobj,
jlong con) {
TAOS *tscon = (TAOS *)con;
@ -589,7 +620,6 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEn
jniGetGlobalMethod(env);
TAOS_SUB *tsub = (TAOS_SUB *)sub;
TAOS_RES *res = taos_consume(tsub);
if (res == NULL) {
@ -621,16 +651,16 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTab
jsize len = (*env)->GetArrayLength(env, jsql);
char *dst = (char *)calloc(1, sizeof(char) * (len + 1));
(*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)dst);
char *str = (char *)calloc(1, sizeof(char) * (len + 1));
(*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)str);
if ((*env)->ExceptionCheck(env)) {
// todo handle error
}
int code = taos_validate_sql(tscon, dst);
int code = taos_validate_sql(tscon, str);
jniDebug("jobj:%p, conn:%p, code is %d", jobj, tscon, code);
free(dst);
free(str);
return code;
}

View File

@ -91,8 +91,8 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
int32_t sqlLen = (int32_t)strlen(sqlstr);
if (sqlLen > tsMaxSQLStringLen) {
tscError("sql string exceeds max length:%d", tsMaxSQLStringLen);
terrno = TSDB_CODE_TSC_INVALID_SQL;
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_INVALID_SQL);
terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
tscQueueAsyncError(fp, param, terrno);
return;
}
@ -176,7 +176,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
}
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql);
tscFetchDatablockForSubquery(pSql);
} else {
tscProcessSql(pSql);
}
@ -226,7 +226,7 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
// handle the sub queries of join query
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql);
tscFetchDatablockForSubquery(pSql);
} else if (pRes->completed) {
if(pCmd->command == TSDB_SQL_FETCH || (pCmd->command >= TSDB_SQL_SERV_STATUS && pCmd->command <= TSDB_SQL_CURRENT_USER)) {
if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
@ -327,7 +327,7 @@ void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows) {
}
for (int i = 0; i < pCmd->numOfCols; ++i){
SFieldSupInfo* pSup = taosArrayGet(pQueryInfo->fieldsInfo.pSupportInfo, i);
SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
if (pSup->pSqlExpr != NULL) {
// pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i) + pSup->pSqlExpr->resBytes * pRes->row;
} else {
@ -348,10 +348,10 @@ void tscProcessFetchRow(SSchedMsg *pMsg) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
for (int i = 0; i < pCmd->numOfCols; ++i) {
SFieldSupInfo* pSup = taosArrayGet(pQueryInfo->fieldsInfo.pSupportInfo, i);
SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
if (pSup->pSqlExpr != NULL) {
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i);
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, 0);
} else {
// todo add
}
@ -405,11 +405,12 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlRes *pRes = &pSql->res;
pRes->code = code;
SSqlObj *sub = (SSqlObj*) res;
const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
if (code != TSDB_CODE_SUCCESS) {
tscError("%p get tableMeta failed, code:%s", pSql, tstrerror(code));
tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code));
goto _error;
} else {
const char* msg = (pCmd->command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
tscDebug("%p get %s successfully", pSql, msg);
}
@ -428,7 +429,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
assert(code == TSDB_CODE_SUCCESS);
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0) && pSql->param != NULL);
// param already freed by other routine and pSql in tscCache when ctrl + c
if (atomic_load_ptr(&pSql->param) == NULL) {
return;
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param;
SSqlObj * pParObj = trs->pParentSql;
@ -437,6 +442,20 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex &&
pTableMetaInfo->vgroupIndex >= 0 && pTableMetaInfo->vgroupList != NULL);
// tscProcessSql can add error into async res
tscProcessSql(pSql);
return;
} else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) {
tscDebug("%p update table meta in local cache, continue to process sql and send corresponding tid_tag query", pSql);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
} else {
assert(code == TSDB_CODE_SUCCESS);
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
// tscProcessSql can add error into async res
tscProcessSql(pSql);
return;
@ -461,7 +480,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
tscResetSqlCmdObj(pCmd, false);
code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
} else if (code != TSDB_CODE_SUCCESS) {

File diff suppressed because it is too large Load Diff

View File

@ -49,82 +49,6 @@ typedef struct SCreateBuilder {
} SCreateBuilder;
static void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength);
static int32_t getToStringLength(const char *pData, int32_t length, int32_t type) {
char buf[512] = {0};
int32_t len = 0;
int32_t MAX_BOOL_TYPE_LENGTH = 5; // max(strlen("true"), strlen("false"));
switch (type) {
case TSDB_DATA_TYPE_BINARY:
return length;
case TSDB_DATA_TYPE_NCHAR:
return length;
case TSDB_DATA_TYPE_DOUBLE: {
double dv = 0;
dv = GET_DOUBLE_VAL(pData);
len = sprintf(buf, "%lf", dv);
if (strncasecmp("nan", buf, 3) == 0) {
len = 4;
}
} break;
case TSDB_DATA_TYPE_FLOAT: {
float fv = 0;
fv = GET_FLOAT_VAL(pData);
len = sprintf(buf, "%f", fv);
if (strncasecmp("nan", buf, 3) == 0) {
len = 4;
}
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
len = sprintf(buf, "%" PRId64, *(int64_t *)pData);
break;
case TSDB_DATA_TYPE_BOOL:
len = MAX_BOOL_TYPE_LENGTH;
break;
default:
len = sprintf(buf, "%d", *(int32_t *)pData);
break;
};
return len;
}
/*
* we need to convert all data into string, so we need to sprintf all kinds of
* non-string data into string, and record its length to get the right
* maximum length. The length may be less or greater than its original binary length:
* For example:
* length((short) 1) == 1, less than sizeof(short)
* length((uint64_t) 123456789011) > 12, greater than sizsof(uint64_t)
*/
static int32_t tscMaxLengthOfTagsFields(SSqlObj *pSql) {
STableMeta *pMeta = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0)->pTableMeta;
if (pMeta->tableType == TSDB_SUPER_TABLE || pMeta->tableType == TSDB_NORMAL_TABLE ||
pMeta->tableType == TSDB_STREAM_TABLE) {
return 0;
}
char * pTagValue = tsGetTagsValue(pMeta);
SSchema *pTagsSchema = tscGetTableTagSchema(pMeta);
int32_t len = getToStringLength(pTagValue, pTagsSchema[0].bytes, pTagsSchema[0].type);
pTagValue += pTagsSchema[0].bytes;
int32_t numOfTags = tscGetNumOfTags(pMeta);
for (int32_t i = 1; i < numOfTags; ++i) {
int32_t tLen = getToStringLength(pTagValue, pTagsSchema[i].bytes, pTagsSchema[i].type);
if (len < tLen) {
len = tLen;
}
pTagValue += pTagsSchema[i].bytes;
}
return len;
}
static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
SSqlRes *pRes = &pSql->res;
@ -186,8 +110,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
return 0;
}
// the following is handle display tags value for meters created according to metric
char *pTagValue = tsGetTagsValue(pMeta);
// the following is handle display tags for table created according to super table
for (int32_t i = numOfRows; i < totalNumOfRows; ++i) {
// field name
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0);
@ -219,8 +142,6 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
char *target = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i;
const char *src = "TAG";
STR_WITH_MAXSIZE_TO_VARSTR(target, src, pField->bytes);
pTagValue += pSchema[i].bytes;
}
return 0;
@ -239,9 +160,9 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
TAOS_FIELD f = {.type = TSDB_DATA_TYPE_BINARY, .bytes = (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE};
tstrncpy(f.name, "Field", sizeof(f.name));
SFieldSupInfo* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
(TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, (TSDB_COL_NAME_LEN - 1), false);
(TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, -1000, (TSDB_COL_NAME_LEN - 1), false);
rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE);
@ -251,7 +172,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE),
typeColLength, false);
-1000, typeColLength, false);
rowLen += typeColLength + VARSTR_HEADER_SIZE;
@ -261,7 +182,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t),
sizeof(int32_t), false);
-1000, sizeof(int32_t), false);
rowLen += sizeof(int32_t);
@ -271,7 +192,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE),
noteColLength, false);
-1000, noteColLength, false);
rowLen += noteColLength + VARSTR_HEADER_SIZE;
return rowLen;
@ -286,17 +207,17 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
const int32_t TYPE_COLUMN_LENGTH = 16;
const int32_t NOTE_COLUMN_MIN_LENGTH = 8;
int32_t noteFieldLen = tscMaxLengthOfTagsFields(pSql);
if (noteFieldLen == 0) {
noteFieldLen = NOTE_COLUMN_MIN_LENGTH;
}
int32_t noteFieldLen = NOTE_COLUMN_MIN_LENGTH;//tscMaxLengthOfTagsFields(pSql);
// if (noteFieldLen == 0) {
// noteFieldLen = NOTE_COLUMN_MIN_LENGTH;
// }
int32_t rowLen = tscBuildTableSchemaResultFields(pSql, NUM_OF_DESC_TABLE_COLUMNS, TYPE_COLUMN_LENGTH, noteFieldLen);
tscFieldInfoUpdateOffset(pQueryInfo);
return tscSetValueToResObj(pSql, rowLen);
}
static int32_t tscGetNthFieldResult(TAOS_ROW row, TAOS_FIELD* fields, int *lengths, int idx, char *result) {
const char *val = row[idx];
const char *val = (const char*)row[idx];
if (val == NULL) {
sprintf(result, "%s", TSDB_DATA_NULL_STR);
return -1;
@ -420,7 +341,7 @@ TAOS_ROW tscFetchRow(void *param) {
return NULL;
}
void* data = doSetResultRowData(pSql, true);
void* data = doSetResultRowData(pSql);
tscClearSqlOwner(pSql);
return data;
@ -485,9 +406,8 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
tstrncpy(f.name, "Database", sizeof(f.name));
}
SFieldSupInfo* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
f.bytes, f.bytes - VARSTR_HEADER_SIZE, false);
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false);
rowLen += f.bytes;
@ -501,7 +421,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
(int16_t)(ddlLen + VARSTR_HEADER_SIZE), ddlLen, false);
(int16_t)(ddlLen + VARSTR_HEADER_SIZE), -1000, ddlLen, false);
rowLen += ddlLen + VARSTR_HEADER_SIZE;
@ -698,7 +618,11 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName,
for (int32_t i = 0; i < numOfRows; ++i) {
uint8_t type = pSchema[i].type;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE;
}
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName, bytes);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName);
}
@ -721,7 +645,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
for (int32_t i = 0; i < numOfRows; ++i) {
uint8_t type = pSchema[i].type;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE;
}
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
}
@ -731,7 +659,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
for (int32_t i = numOfRows; i < totalRows; i++) {
uint8_t type = pSchema[i].type;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
if (type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes/TSDB_NCHAR_SIZE;
}
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
}
@ -893,8 +825,11 @@ static int32_t tscProcessClientVer(SSqlObj *pSql) {
static int32_t tscProcessServStatus(SSqlObj *pSql) {
STscObj* pObj = pSql->pTscObj;
if (pObj->pHb != NULL) {
if (pObj->pHb->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
SSqlObj* pHb = (SSqlObj*)taosAcquireRef(tscObjRef, pObj->hbrid);
if (pHb != NULL) {
int32_t code = pHb->res.code;
taosReleaseRef(tscObjRef, pObj->hbrid);
if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
pSql->res.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
return pSql->res.code;
}
@ -922,19 +857,17 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
pQueryInfo->order.order = TSDB_ORDER_ASC;
tscFieldInfoClear(&pQueryInfo->fieldsInfo);
pQueryInfo->fieldsInfo.pFields = taosArrayInit(1, sizeof(TAOS_FIELD));
pQueryInfo->fieldsInfo.pSupportInfo = taosArrayInit(1, sizeof(SFieldSupInfo));
pQueryInfo->fieldsInfo.internalField = taosArrayInit(1, sizeof(SInternalField));
TAOS_FIELD f = tscCreateField((int8_t)type, columnName, (int16_t)valueLength);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
tscInitResObjForLocalQuery(pSql, 1, (int32_t)valueLength);
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0);
SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, 0);
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, 0);
pInfo->pSqlExpr = taosArrayGetP(pQueryInfo->exprList, 0);
memcpy(pRes->data, val, pField->bytes);
memcpy(pRes->data, val, pInfo->field.bytes);
}
int tscProcessLocalCmd(SSqlObj *pSql) {

View File

@ -13,14 +13,15 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tscLocalMerge.h"
#include "tscSubquery.h"
#include "os.h"
#include "qAst.h"
#include "tlosertree.h"
#include "tscLog.h"
#include "tscUtil.h"
#include "tschemautil.h"
#include "tsclient.h"
#include "tutil.h"
#include "tscLog.h"
#include "tscLocalMerge.h"
typedef struct SCompareParam {
SLocalDataSource **pLocalData;
@ -97,14 +98,14 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc
pCtx->param[2].i64Key = pQueryInfo->order.order;
pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT;
pCtx->param[1].i64Key = pQueryInfo->order.orderColId;
} else if (functionId == TSDB_FUNC_APERCT) {
pCtx->param[0].i64Key = pExpr->param[0].i64Key;
pCtx->param[0].nType = pExpr->param[0].nType;
}
SResultInfo *pResInfo = &pReducer->pResInfo[i];
pResInfo->bufLen = pExpr->interBytes;
pResInfo->interResultBuf = calloc(1, (size_t) pResInfo->bufLen);
pCtx->resultInfo = &pReducer->pResInfo[i];
pCtx->resultInfo->superTableQ = true;
pCtx->interBufBytes = pExpr->interBytes;
pCtx->resultInfo = calloc(1, pCtx->interBufBytes + sizeof(SResultRowCellInfo));
pCtx->stableQuery = true;
}
int16_t n = 0;
@ -132,40 +133,53 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc
}
static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) {
int32_t numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo);
int32_t offset = 0;
SFillColInfo* pFillCol = calloc(numOfCols, sizeof(SFillColInfo));
for(int32_t i = 0; i < numOfCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
SInternalField* pIField = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
pFillCol[i].col.bytes = pExpr->resBytes;
pFillCol[i].col.type = (int8_t)pExpr->resType;
pFillCol[i].col.colId = pExpr->colInfo.colId;
pFillCol[i].flag = pExpr->colInfo.flag;
pFillCol[i].col.offset = offset;
pFillCol[i].functionId = pExpr->functionId;
pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
offset += pExpr->resBytes;
if (pIField->pArithExprInfo == NULL) {
SSqlExpr* pExpr = pIField->pSqlExpr;
pFillCol[i].col.bytes = pExpr->resBytes;
pFillCol[i].col.type = (int8_t)pExpr->resType;
pFillCol[i].col.colId = pExpr->colInfo.colId;
pFillCol[i].flag = pExpr->colInfo.flag;
pFillCol[i].col.offset = offset;
pFillCol[i].functionId = pExpr->functionId;
pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
} else {
pFillCol[i].col.bytes = pIField->field.bytes;
pFillCol[i].col.type = (int8_t)pIField->field.type;
pFillCol[i].col.colId = -100;
pFillCol[i].flag = TSDB_COL_NORMAL;
pFillCol[i].col.offset = offset;
pFillCol[i].functionId = -1;
pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
}
offset += pFillCol[i].col.bytes;
}
return pFillCol;
}
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalmodel, SSqlObj* pSql) {
SColumnModel *finalmodel, SColumnModel *pFFModel, SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
if (pMemBuffer == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
tscError("%p pMemBuffer is NULL", pMemBuffer);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return;
}
if (pDesc->pColumnModel == NULL) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
tscError("%p no local buffer or intermediate result format model", pSql);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return;
@ -183,7 +197,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
}
if (numOfFlush == 0 || numOfBuffer == 0) {
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
pCmd->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; // no result, set the result empty
tscDebug("%p retrieved no data", pSql);
return;
}
@ -192,7 +207,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
tscError("%p Invalid value of buffer capacity %d and page size %d ", pSql, pDesc->pColumnModel->capacity,
pMemBuffer[0]->pageSize);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
pRes->code = TSDB_CODE_TSC_APP_ERROR;
return;
}
@ -203,7 +218,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (pReducer == NULL) {
tscError("%p failed to create local merge structure, out of memory", pSql);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return;
}
@ -227,7 +242,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (ds == NULL) {
tscError("%p failed to create merge structure", pSql);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
taosTFree(pReducer);
tfree(pReducer);
return;
}
@ -254,7 +269,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (ds->filePage.num == 0) { // no data in this flush, the index does not increase
tscDebug("%p flush data is empty, ignore %d flush record", pSql, idx);
taosTFree(ds);
tfree(ds);
continue;
}
@ -264,7 +279,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
// no data actually, no need to merge result.
if (idx == 0) {
taosTFree(pReducer);
tfree(pReducer);
return;
}
@ -272,7 +287,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
SCompareParam *param = malloc(sizeof(SCompareParam));
if (param == NULL) {
taosTFree(pReducer);
tfree(pReducer);
return;
}
@ -286,8 +301,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator);
if (pReducer->pLoserTree == NULL || pRes->code != 0) {
taosTFree(param);
taosTFree(pReducer);
tfree(param);
tfree(pReducer);
return;
}
@ -316,36 +331,32 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->nResultBufSize = pMemBuffer[0]->pageSize * 16;
pReducer->pResultBuf = (tFilePage *)calloc(1, pReducer->nResultBufSize + sizeof(tFilePage));
pReducer->finalRowSize = tscGetResRowLength(pQueryInfo->exprList);
pReducer->resColModel = finalmodel;
pReducer->resColModel->capacity = pReducer->nResultBufSize;
pReducer->finalModel = pFFModel;
assert(pReducer->finalRowSize > 0);
if (pReducer->finalRowSize > 0) {
pReducer->resColModel->capacity /= pReducer->finalRowSize;
if (finalmodel->rowSize > 0) {
pReducer->resColModel->capacity /= finalmodel->rowSize;
}
assert(pReducer->finalRowSize <= pReducer->rowSize);
assert(finalmodel->rowSize > 0 && finalmodel->rowSize <= pReducer->rowSize);
pReducer->pFinalRes = calloc(1, pReducer->rowSize * pReducer->resColModel->capacity);
if (pReducer->pTempBuffer == NULL || pReducer->discardData == NULL || pReducer->pResultBuf == NULL ||
/*pReducer->pBufForInterpo == NULL || */pReducer->pFinalRes == NULL || pReducer->prevRowOfInput == NULL) {
taosTFree(pReducer->pTempBuffer);
taosTFree(pReducer->discardData);
taosTFree(pReducer->pResultBuf);
taosTFree(pReducer->pFinalRes);
taosTFree(pReducer->prevRowOfInput);
taosTFree(pReducer->pLoserTree);
taosTFree(param);
taosTFree(pReducer);
pReducer->pFinalRes == NULL || pReducer->prevRowOfInput == NULL) {
tfree(pReducer->pTempBuffer);
tfree(pReducer->discardData);
tfree(pReducer->pResultBuf);
tfree(pReducer->pFinalRes);
tfree(pReducer->prevRowOfInput);
tfree(pReducer->pLoserTree);
tfree(param);
tfree(pReducer);
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return;
}
size_t numOfCols = tscSqlExprNumOfExprs(pQueryInfo);
pReducer->pTempBuffer->num = 0;
pReducer->pResInfo = calloc(numOfCols, sizeof(SResultInfo));
tscCreateResPointerInfo(pRes, pQueryInfo);
tscInitSqlContext(pCmd, pReducer, pDesc);
@ -373,8 +384,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
4096, (int32_t)numOfCols, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
tinfo.precision, pQueryInfo->fillType, pFillCol);
4096, (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
tinfo.precision, pQueryInfo->fillType, pFillCol, pSql);
}
}
@ -489,46 +500,41 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
tscDebug("%p waiting for delete procedure, status: %d", pSql, status);
}
pLocalReducer->pFillInfo = taosDestoryFillInfo(pLocalReducer->pFillInfo);
pLocalReducer->pFillInfo = taosDestroyFillInfo(pLocalReducer->pFillInfo);
if (pLocalReducer->pCtx != NULL) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i];
tVariantDestroy(&pCtx->tag);
tfree(pCtx->resultInfo);
if (pCtx->tagInfo.pTagCtxList != NULL) {
taosTFree(pCtx->tagInfo.pTagCtxList);
tfree(pCtx->tagInfo.pTagCtxList);
}
}
taosTFree(pLocalReducer->pCtx);
tfree(pLocalReducer->pCtx);
}
taosTFree(pLocalReducer->prevRowOfInput);
tfree(pLocalReducer->prevRowOfInput);
taosTFree(pLocalReducer->pTempBuffer);
taosTFree(pLocalReducer->pResultBuf);
if (pLocalReducer->pResInfo != NULL) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
taosTFree(pLocalReducer->pResInfo[i].interResultBuf);
}
taosTFree(pLocalReducer->pResInfo);
}
tfree(pLocalReducer->pTempBuffer);
tfree(pLocalReducer->pResultBuf);
if (pLocalReducer->pLoserTree) {
taosTFree(pLocalReducer->pLoserTree->param);
taosTFree(pLocalReducer->pLoserTree);
tfree(pLocalReducer->pLoserTree->param);
tfree(pLocalReducer->pLoserTree);
}
taosTFree(pLocalReducer->pFinalRes);
taosTFree(pLocalReducer->discardData);
tfree(pLocalReducer->pFinalRes);
tfree(pLocalReducer->discardData);
tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel,
tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel, pLocalReducer->finalModel,
pLocalReducer->numOfVnode);
for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) {
taosTFree(pLocalReducer->pLocalDataSrc[i]);
tfree(pLocalReducer->pLocalDataSrc[i]);
}
pLocalReducer->numOfBuffer = 0;
@ -562,7 +568,8 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
if (numOfGroupByCols > 0) {
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
int32_t startCols = pQueryInfo->fieldsInfo.numOfOutput - pQueryInfo->groupbyExpr.numOfGroupCols;
int32_t numOfInternalOutput = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
int32_t startCols = numOfInternalOutput - pQueryInfo->groupbyExpr.numOfGroupCols;
// the last "pQueryInfo->groupbyExpr.numOfGroupCols" columns are order-by columns
for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) {
@ -595,7 +602,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
}
*pOrderDesc = tOrderDesCreate(orderColIndexList, numOfGroupByCols, pModel, pQueryInfo->order.order);
taosTFree(orderColIndexList);
tfree(orderColIndexList);
if (*pOrderDesc == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -648,7 +655,7 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
}
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pOrderDesc,
SColumnModel **pFinalModel, uint32_t nBufferSizes) {
SColumnModel **pFinalModel, SColumnModel** pFFModel, uint32_t nBufferSizes) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
@ -681,6 +688,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
pSchema[i].bytes = pExpr->resBytes;
pSchema[i].type = (int8_t)pExpr->resType;
tstrncpy(pSchema[i].name, pExpr->aliasName, tListLen(pSchema[i].name));
rlen += pExpr->resBytes;
}
@ -697,7 +706,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
pg *= 2;
}
size_t numOfSubs = pTableMetaInfo->vgroupList->numOfVgroups;
size_t numOfSubs = pSql->subState.numOfSub;
assert(numOfSubs <= pTableMetaInfo->vgroupList->numOfVgroups);
for (int32_t i = 0; i < numOfSubs; ++i) {
(*pMemBuffer)[i] = createExtMemBuffer(nBufferSizes, rlen, pg, pModel);
(*pMemBuffer)[i]->flushModel = MULTIPLE_APPEND_MODEL;
@ -705,16 +715,22 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
if (createOrderDescriptor(pOrderDesc, pCmd, pModel) != TSDB_CODE_SUCCESS) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
taosTFree(pSchema);
tfree(pSchema);
return pRes->code;
}
// final result depends on the fields number
memset(pSchema, 0, sizeof(SSchema) * size);
for (int32_t i = 0; i < size; ++i) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
SSchema *p1 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->colInfo.colIndex);
SSchema p1 = {0};
if (pExpr->colInfo.colIndex != TSDB_TBNAME_COLUMN_INDEX) {
p1 = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->colInfo.colIndex);
} else {
p1 = tGetTableNameColumnSchema();
}
int32_t inter = 0;
int16_t type = -1;
@ -733,7 +749,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
functionId = TSDB_FUNC_LAST;
}
getResultDataInfo(p1->type, p1->bytes, functionId, 0, &type, &bytes, &inter, 0, false);
int32_t ret = getResultDataInfo(p1.type, p1.bytes, functionId, 0, &type, &bytes, &inter, 0, false);
assert(ret == TSDB_CODE_SUCCESS);
}
pSchema[i].type = (uint8_t)type;
@ -742,8 +759,20 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
}
*pFinalModel = createColumnModel(pSchema, (int32_t)size, capacity);
taosTFree(pSchema);
memset(pSchema, 0, sizeof(SSchema) * size);
size = tscNumOfFields(pQueryInfo);
for(int32_t i = 0; i < size; ++i) {
SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
pSchema[i].bytes = pField->field.bytes;
pSchema[i].type = pField->field.type;
tstrncpy(pSchema[i].name, pField->field.name, tListLen(pSchema[i].name));
}
*pFFModel = createColumnModel(pSchema, (int32_t) size, capacity);
tfree(pSchema);
return TSDB_CODE_SUCCESS;
}
@ -753,16 +782,18 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
* @param pFinalModel
* @param numOfVnodes
*/
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel,
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, SColumnModel *pFFModel,
int32_t numOfVnodes) {
destroyColumnModel(pFinalModel);
destroyColumnModel(pFFModel);
tOrderDescDestroy(pDesc);
for (int32_t i = 0; i < numOfVnodes; ++i) {
pMemBuffer[i] = destoryExtMemBuffer(pMemBuffer[i]);
}
taosTFree(pMemBuffer);
tfree(pMemBuffer);
}
/**
@ -858,17 +889,17 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
tFilePage * pBeforeFillData = pLocalReducer->pResultBuf;
pRes->data = pLocalReducer->pFinalRes;
pRes->numOfRows = pBeforeFillData->num;
pRes->numOfRows = (int32_t) pBeforeFillData->num;
if (pQueryInfo->limit.offset > 0) {
if (pQueryInfo->limit.offset < pRes->numOfRows) {
int32_t prevSize = (int32_t)pBeforeFillData->num;
tColModelErase(pLocalReducer->resColModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
int32_t prevSize = (int32_t) pBeforeFillData->num;
tColModelErase(pLocalReducer->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
/* remove the hole in column model */
tColModelCompact(pLocalReducer->resColModel, pBeforeFillData, prevSize);
tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
pRes->numOfRows -= pQueryInfo->limit.offset;
pRes->numOfRows -= (int32_t) pQueryInfo->limit.offset;
pQueryInfo->limit.offset = 0;
} else {
pQueryInfo->limit.offset -= pRes->numOfRows;
@ -888,13 +919,13 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
pRes->numOfRows -= overflow;
pBeforeFillData->num -= overflow;
tColModelCompact(pLocalReducer->resColModel, pBeforeFillData, prevSize);
tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
// set remain data to be discarded, and reset the interpolation information
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo);
}
memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalReducer->finalRowSize));
memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalReducer->finalModel->rowSize));
pRes->numOfClauseTotal += pRes->numOfRows;
pBeforeFillData->num = 0;
@ -922,7 +953,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
while (1) {
int64_t newRows = taosGenerateDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity);
int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity);
if (pQueryInfo->limit.offset < newRows) {
newRows -= pQueryInfo->limit.offset;
@ -936,7 +967,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
pRes->data = pLocalReducer->pFinalRes;
pRes->numOfRows = newRows;
pRes->numOfRows = (int32_t) newRows;
pQueryInfo->limit.offset = 0;
break;
@ -951,7 +982,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
// all output in current group are completed
int32_t totalRemainRows = (int32_t)getFilledNumOfRes(pFillInfo, actualETime, pLocalReducer->resColModel->capacity);
int32_t totalRemainRows = (int32_t)getNumOfResWithFill(pFillInfo, actualETime, pLocalReducer->resColModel->capacity);
if (totalRemainRows <= 0) {
break;
}
@ -972,10 +1003,11 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo);
}
int32_t offset = 0;
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
int16_t offset = getColumnModelOffset(pLocalReducer->resColModel, i);
memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, (size_t)(pField->bytes * pRes->numOfRows));
offset += pField->bytes;
}
pRes->numOfRowsGroup += pRes->numOfRows;
@ -984,10 +1016,10 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
pBeforeFillData->num = 0;
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
taosTFree(pResPages[i]);
tfree(pResPages[i]);
}
taosTFree(pResPages);
tfree(pResPages);
}
static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
@ -1070,7 +1102,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx)
continue;
}
SResultInfo* pResInfo = GET_RES_INFO(&pCtx[j]);
SResultRowCellInfo* pResInfo = GET_RES_INFO(&pCtx[j]);
if (maxOutput < pResInfo->numOfRes) {
maxOutput = pResInfo->numOfRes;
}
@ -1228,6 +1260,10 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
tColModelCompact(pModel, pResBuf, pModel->capacity);
if (tscIsSecondStageQuery(pQueryInfo)) {
doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalReducer->finalModel->rowSize);
}
#ifdef _DEBUG_VIEW
printf("final result before interpo:\n");
// tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num);
@ -1251,10 +1287,11 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
return true;
}
void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) { // reset output buffer to the beginning
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
pLocalReducer->pCtx[i].aOutputBuf =
pLocalReducer->pResultBuf->data + tscFieldInfoGetOffset(pQueryInfo, i) * pLocalReducer->resColModel->capacity;
void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {// reset output buffer to the beginning
size_t t = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < t; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
pLocalReducer->pCtx[i].aOutputBuf = pLocalReducer->pResultBuf->data + pExpr->offset * pLocalReducer->resColModel->capacity;
}
memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage));
@ -1299,7 +1336,7 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1));
// the first column must be the timestamp column
int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity);
int32_t rows = (int32_t) getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity);
if (rows > 0) { // do fill gap
doFillResult(pSql, pLocalReducer, false);
}
@ -1328,7 +1365,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) {
int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey;
int32_t rows = (int32_t)getFilledNumOfRes(pFillInfo, etime, pLocalReducer->resColModel->capacity);
int32_t rows = (int32_t)getNumOfResWithFill(pFillInfo, etime, pLocalReducer->resColModel->capacity);
if (rows > 0) {
doFillResult(pSql, pLocalReducer, true);
}
@ -1499,8 +1536,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
if (pLocalReducer->discard && sameGroup) {
pLocalReducer->hasUnprocessedRow = false;
tmpBuffer->num = 0;
} else {
// current row does not belongs to the previous group, so it is not be handled yet.
} else { // current row does not belongs to the previous group, so it is not be handled yet.
pLocalReducer->hasUnprocessedRow = true;
}
@ -1594,3 +1630,46 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
pRes->pLocalReducer->pResultBuf->num = numOfRes;
pRes->data = pRes->pLocalReducer->pResultBuf->data;
}
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {
int32_t maxRowSize = MAX(rowSize, finalRowSize);
char* pbuf = calloc(1, pOutput->num * maxRowSize);
size_t size = tscNumOfFields(pQueryInfo);
SArithmeticSupport arithSup = {0};
// todo refactor
arithSup.offset = 0;
arithSup.numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
arithSup.exprList = pQueryInfo->exprList;
arithSup.data = calloc(arithSup.numOfCols, POINTER_BYTES);
for(int32_t k = 0; k < arithSup.numOfCols; ++k) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k);
arithSup.data[k] = (pOutput->data + pOutput->num* pExpr->offset);
}
int32_t offset = 0;
for (int i = 0; i < size; ++i) {
SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
// calculate the result from several other columns
if (pSup->pArithExprInfo != NULL) {
arithSup.pArithExpr = pSup->pArithExprInfo;
tExprTreeCalcTraverse(arithSup.pArithExpr->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithmeticInputSrc);
} else {
SSqlExpr* pExpr = pSup->pSqlExpr;
memcpy(pbuf + pOutput->num * offset, pExpr->offset * pOutput->num + pOutput->data, pExpr->resBytes * pOutput->num);
}
offset += pSup->field.bytes;
}
memcpy(pOutput->data, pbuf, pOutput->num * offset);
tfree(pbuf);
tfree(arithSup.data);
return offset;
}

View File

@ -630,11 +630,17 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int3
return TSDB_CODE_SUCCESS;
}
static void tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows) {
static int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows) {
pBlocks->tid = pTableMeta->id.tid;
pBlocks->uid = pTableMeta->id.uid;
pBlocks->sversion = pTableMeta->sversion;
pBlocks->numOfRows += numOfRows;
if (pBlocks->numOfRows + numOfRows >= INT16_MAX) {
return TSDB_CODE_TSC_INVALID_SQL;
} else {
pBlocks->numOfRows += numOfRows;
return TSDB_CODE_SUCCESS;
}
}
// data block is disordered, sort it in ascending order
@ -702,7 +708,7 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st
}
int32_t code = TSDB_CODE_TSC_INVALID_SQL;
char * tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \"
char * tmpTokenBuf = calloc(1, 16*1024); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@ -722,7 +728,11 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st
}
SSubmitBlk *pBlocks = (SSubmitBlk *)(dataBuf->pData);
tsSetBlockInfo(pBlocks, pTableMeta, numOfRows);
code = tsSetBlockInfo(pBlocks, pTableMeta, numOfRows);
if (code != TSDB_CODE_SUCCESS) {
tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", *str);
return code;
}
dataBuf->vgId = pTableMeta->vgroupInfo.vgId;
dataBuf->numOfTables = 1;
@ -790,9 +800,6 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
sql += index;
tscAllocPayload(pCmd, sizeof(STagData));
STagData *pTag = &pCmd->tagData;
memset(pTag, 0, sizeof(STagData));
//the source super table is moved to the secondary position of the pTableMetaInfo list
if (pQueryInfo->numOfTables < 2) {
@ -805,7 +812,14 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return code;
}
STagData *pTag = realloc(pCmd->pTagData, offsetof(STagData, data));
if (pTag == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
memset(pTag, 0, offsetof(STagData, data));
tstrncpy(pTag->name, pSTableMeterMetaInfo->name, sizeof(pTag->name));
pCmd->pTagData = pTag;
code = tscGetTableMeta(pSql, pSTableMeterMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -934,7 +948,13 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
tdSortKVRowByColIdx(row);
pTag->dataLen = kvRowLen(row);
pTag = (STagData*)realloc(pCmd->pTagData, offsetof(STagData, data) + kvRowLen(row));
if (pTag == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pCmd->pTagData = pTag;
pTag->dataLen = htonl(kvRowLen(row));
kvRowCpy(pTag->data, row);
free(row);
@ -945,8 +965,6 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
}
pTag->dataLen = htonl(pTag->dataLen);
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
}
@ -1148,6 +1166,10 @@ int tsParseInsertSql(SSqlObj *pSql) {
index = 0;
sToken = tStrGetToken(str, &index, false, 0, NULL);
if (sToken.type != TK_STRING && sToken.type != TK_ID) {
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
goto _error;
}
str += index;
if (sToken.n == 0) {
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
@ -1372,7 +1394,10 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock
STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0)->pTableMeta;
SSubmitBlk *pBlocks = (SSubmitBlk *)(pTableDataBlocks->pData);
tsSetBlockInfo(pBlocks, pTableMeta, numOfRows);
code = tsSetBlockInfo(pBlocks, pTableMeta, numOfRows);
if (code != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", NULL);
}
if ((code = tscMergeTableDataBlocks(pSql, pCmd->pDataBlocks)) != TSDB_CODE_SUCCESS) {
return code;
@ -1406,7 +1431,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
assert(taos_errno(pSql) == code);
taos_free_result(pSql);
taosTFree(pSupporter);
tfree(pSupporter);
fclose(fp);
pParentSql->res.code = code;
@ -1445,7 +1470,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
char *tokenBuf = calloc(1, 4096);
while ((readLen = taosGetline(&line, &n, fp)) != -1) {
while ((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
line[--readLen] = 0;
}
@ -1470,7 +1495,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
}
}
taosTFree(tokenBuf);
tfree(tokenBuf);
free(line);
if (count > 0) {
@ -1483,7 +1508,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
} else {
taos_free_result(pSql);
taosTFree(pSupporter);
tfree(pSupporter);
fclose(fp);
pParentSql->fp = pParentSql->fetchFp;
@ -1513,7 +1538,7 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
pSql->res.code = TAOS_SYSTEM_ERROR(errno);
tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code));
taosTFree(pSupporter)
tfree(pSupporter)
tscQueueAsyncRes(pSql);
return;

View File

@ -258,14 +258,391 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
static int doBindParam(char* data, SParamInfo* param, TAOS_BIND* bind) {
if (bind->is_null != NULL && *(bind->is_null)) {
if (param->type == TSDB_DATA_TYPE_BINARY || param->type == TSDB_DATA_TYPE_NCHAR) {
setVardataNull(data + param->offset, param->type);
} else {
setNull(data + param->offset, param->type, param->bytes);
}
setNull(data + param->offset, param->type, param->bytes);
return TSDB_CODE_SUCCESS;
}
if (1) {
// allow user bind param data with different type
union {
int8_t v1;
int16_t v2;
int32_t v4;
int64_t v8;
float f4;
double f8;
unsigned char buf[32*1024];
} u;
switch (param->type) {
case TSDB_DATA_TYPE_BOOL: {
switch (bind->buffer_type) {
case TSDB_DATA_TYPE_BOOL: {
u.v1 = *(int8_t*)bind->buffer;
if (u.v1==0 || u.v1==1) break;
} break;
case TSDB_DATA_TYPE_TINYINT: {
u.v1 = *(int8_t*)bind->buffer;
if (u.v1==0 || u.v1==1) break;
} break;
case TSDB_DATA_TYPE_SMALLINT: {
u.v1 = (int8_t)*(int16_t*)bind->buffer;
if (u.v1==0 || u.v1==1) break;
} break;
case TSDB_DATA_TYPE_INT: {
u.v1 = (int8_t)*(int32_t*)bind->buffer;
if (u.v1==0 || u.v1==1) break;
} break;
case TSDB_DATA_TYPE_BIGINT: {
u.v1 = (int8_t)*(int64_t*)bind->buffer;
if (u.v1==0 || u.v1==1) break;
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
// "0", "1" convertible
if (strncmp((const char*)bind->buffer, "0", *bind->length)==0) {
u.v1 = 0;
break;
}
if (strncmp((const char*)bind->buffer, "1", *bind->length)==0) {
u.v1 = 1;
break;
}
return TSDB_CODE_TSC_INVALID_VALUE;
}
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP:
default: {
return TSDB_CODE_TSC_INVALID_VALUE;
}
}
memcpy(data + param->offset, &u.v1, sizeof(u.v1));
return TSDB_CODE_SUCCESS;
} break;
case TSDB_DATA_TYPE_TINYINT: {
switch (bind->buffer_type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: {
int8_t v = *(int8_t*)bind->buffer;
u.v1 = v;
if (v >= SCHAR_MIN && v <= SCHAR_MAX) break;
} break;
case TSDB_DATA_TYPE_SMALLINT: {
int16_t v = *(int16_t*)bind->buffer;
u.v1 = (int8_t)v;
if (v >= SCHAR_MIN && v <= SCHAR_MAX) break;
return TSDB_CODE_TSC_INVALID_VALUE;
}
case TSDB_DATA_TYPE_INT: {
int32_t v = *(int32_t*)bind->buffer;
u.v1 = (int8_t)v;
if (v >= SCHAR_MIN && v <= SCHAR_MAX) break;
return TSDB_CODE_TSC_INVALID_VALUE;
}
case TSDB_DATA_TYPE_BIGINT: {
int64_t v = *(int64_t*)bind->buffer;
u.v1 = (int8_t)v;
if (v >= SCHAR_MIN && v <= SCHAR_MAX) break;
return TSDB_CODE_TSC_INVALID_VALUE;
}
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
int64_t v;
int n, r;
r = sscanf((const char*)bind->buffer, "%" PRId64 "%n", &v, &n);
if (r == 1 && n == strlen((const char*)bind->buffer)) {
u.v1 = (int8_t)v;
if (v >= SCHAR_MIN && v <= SCHAR_MAX) break;
}
return TSDB_CODE_TSC_INVALID_VALUE;
}
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP:
default: {
return TSDB_CODE_TSC_INVALID_VALUE;
}
}
memcpy(data + param->offset, &u.v1, sizeof(u.v1));
return TSDB_CODE_SUCCESS;
}
case TSDB_DATA_TYPE_SMALLINT: {
switch (bind->buffer_type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_SMALLINT: {
int v = *(int16_t*)bind->buffer;
u.v2 = (int16_t)v;
} break;
case TSDB_DATA_TYPE_INT: {
int32_t v = *(int32_t*)bind->buffer;
u.v2 = (int16_t)v;
if (v >= SHRT_MIN && v <= SHRT_MAX) break;
return TSDB_CODE_TSC_INVALID_VALUE;
}
case TSDB_DATA_TYPE_BIGINT: {
int64_t v = *(int64_t*)bind->buffer;
u.v2 = (int16_t)v;
if (v >= SHRT_MIN && v <= SHRT_MAX) break;
return TSDB_CODE_TSC_INVALID_VALUE;
}
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
int64_t v;
int n, r;
r = sscanf((const char*)bind->buffer, "%" PRId64 "%n", &v, &n);
if (r == 1 && n == strlen((const char*)bind->buffer)) {
u.v2 = (int16_t)v;
if (v >= SHRT_MIN && v <= SHRT_MAX) break;
}
return TSDB_CODE_TSC_INVALID_VALUE;
}
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP:
default: {
return TSDB_CODE_TSC_INVALID_VALUE;
}
}
memcpy(data + param->offset, &u.v2, sizeof(u.v2));
return TSDB_CODE_SUCCESS;
}
case TSDB_DATA_TYPE_INT: {
switch (bind->buffer_type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_INT: {
u.v4 = *(int32_t*)bind->buffer;
} break;
case TSDB_DATA_TYPE_BIGINT: {
int64_t v = *(int64_t*)bind->buffer;
u.v4 = (int32_t)v;
if (v >= INT_MIN && v <= INT_MAX) break;
return TSDB_CODE_TSC_INVALID_VALUE;
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
int64_t v;
int n,r;
r = sscanf((const char*)bind->buffer, "%" PRId64 "%n", &v, &n);
if (r==1 && n==strlen((const char*)bind->buffer)) {
u.v4 = (int32_t)v;
if (v >= INT_MIN && v <= INT_MAX) break;
}
return TSDB_CODE_TSC_INVALID_VALUE;
} break;
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP:
default: {
return TSDB_CODE_TSC_INVALID_VALUE;
} break;
}
memcpy(data + param->offset, &u.v2, sizeof(u.v2));
return TSDB_CODE_SUCCESS;
} break;
case TSDB_DATA_TYPE_FLOAT: {
switch (bind->buffer_type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: {
u.f4 = *(int8_t*)bind->buffer;
} break;
case TSDB_DATA_TYPE_SMALLINT: {
u.f4 = *(int16_t*)bind->buffer;
} break;
case TSDB_DATA_TYPE_INT: {
u.f4 = (float)*(int32_t*)bind->buffer;
// shall we check equality?
} break;
case TSDB_DATA_TYPE_BIGINT: {
u.f4 = (float)*(int64_t*)bind->buffer;
// shall we check equality?
} break;
case TSDB_DATA_TYPE_FLOAT: {
u.f4 = *(float*)bind->buffer;
} break;
case TSDB_DATA_TYPE_DOUBLE: {
u.f4 = *(float*)bind->buffer;
// shall we check equality?
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
float v;
int n,r;
r = sscanf((const char*)bind->buffer, "%f%n", &v, &n);
if (r==1 && n==strlen((const char*)bind->buffer)) {
u.f4 = v;
break;
}
return TSDB_CODE_TSC_INVALID_VALUE;
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
default: {
return TSDB_CODE_TSC_INVALID_VALUE;
} break;
}
memcpy(data + param->offset, &u.f4, sizeof(u.f4));
return TSDB_CODE_SUCCESS;
} break;
case TSDB_DATA_TYPE_BIGINT: {
switch (bind->buffer_type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: {
u.v8 = *(int8_t*)bind->buffer;
} break;
case TSDB_DATA_TYPE_SMALLINT: {
u.v8 = *(int16_t*)bind->buffer;
} break;
case TSDB_DATA_TYPE_INT: {
u.v8 = *(int32_t*)bind->buffer;
} break;
case TSDB_DATA_TYPE_BIGINT: {
u.v8 = *(int64_t*)bind->buffer;
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
int64_t v;
int n,r;
r = sscanf((const char*)bind->buffer, "%" PRId64 "%n", &v, &n);
if (r==1 && n==strlen((const char*)bind->buffer)) {
u.v8 = v;
break;
}
return TSDB_CODE_TSC_INVALID_VALUE;
}
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP:
default: {
return TSDB_CODE_TSC_INVALID_VALUE;
}
}
memcpy(data + param->offset, &u.v8, sizeof(u.v8));
return TSDB_CODE_SUCCESS;
}
case TSDB_DATA_TYPE_DOUBLE: {
switch (bind->buffer_type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: {
u.f8 = *(int8_t*)bind->buffer;
} break;
case TSDB_DATA_TYPE_SMALLINT: {
u.f8 = *(int16_t*)bind->buffer;
} break;
case TSDB_DATA_TYPE_INT: {
u.f8 = *(int32_t*)bind->buffer;
} break;
case TSDB_DATA_TYPE_BIGINT: {
u.f8 = (double)*(int64_t*)bind->buffer;
} break;
case TSDB_DATA_TYPE_FLOAT: {
u.f8 = *(float*)bind->buffer;
} break;
case TSDB_DATA_TYPE_DOUBLE: {
u.f8 = *(double*)bind->buffer;
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
double v;
int n,r;
r = sscanf((const char*)bind->buffer, "%lf%n", &v, &n);
if (r==1 && n==strlen((const char*)bind->buffer)) {
u.f8 = v;
break;
}
return TSDB_CODE_TSC_INVALID_VALUE;
}
case TSDB_DATA_TYPE_TIMESTAMP:
default: {
return TSDB_CODE_TSC_INVALID_VALUE;
}
}
memcpy(data + param->offset, &u.f8, sizeof(u.f8));
return TSDB_CODE_SUCCESS;
}
case TSDB_DATA_TYPE_TIMESTAMP: {
switch (bind->buffer_type) {
case TSDB_DATA_TYPE_TIMESTAMP: {
u.v8 = *(int64_t*)bind->buffer;
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
// is this the correct way to call taosParseTime?
int32_t len = (int32_t)*bind->length;
if (taosParseTime(bind->buffer, &u.v8, len, 3, tsDaylight) == TSDB_CODE_SUCCESS) {
break;
}
return TSDB_CODE_TSC_INVALID_VALUE;
} break;
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_DOUBLE:
default: {
return TSDB_CODE_TSC_INVALID_VALUE;
} break;
};
memcpy(data + param->offset, &u.v8, sizeof(u.v8));
return TSDB_CODE_SUCCESS;
}
case TSDB_DATA_TYPE_BINARY: {
switch (bind->buffer_type) {
case TSDB_DATA_TYPE_BINARY: {
if ((*bind->length) > (uintptr_t)param->bytes) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
short size = (short)*bind->length;
STR_WITH_SIZE_TO_VARSTR(data + param->offset, bind->buffer, size);
return TSDB_CODE_SUCCESS;
}
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_NCHAR:
default: {
return TSDB_CODE_TSC_INVALID_VALUE;
}
}
}
case TSDB_DATA_TYPE_NCHAR: {
switch (bind->buffer_type) {
case TSDB_DATA_TYPE_NCHAR: {
size_t output = 0;
if (!taosMbsToUcs4(bind->buffer, *bind->length, varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
varDataSetLen(data + param->offset, output);
return TSDB_CODE_SUCCESS;
}
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_DOUBLE:
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BINARY:
default: {
return TSDB_CODE_TSC_INVALID_VALUE;
}
}
}
default: {
return TSDB_CODE_TSC_INVALID_VALUE;
}
}
}
if (bind->buffer_type != param->type) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
@ -637,3 +1014,80 @@ TAOS_RES *taos_stmt_use_result(TAOS_STMT* stmt) {
pStmt->pSql = NULL;
return result;
}
int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert) {
STscStmt* pStmt = (STscStmt*)stmt;
if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return TSDB_CODE_TSC_DISCONNECTED;
}
if (insert) *insert = pStmt->isInsert;
return TSDB_CODE_SUCCESS;
}
int taos_stmt_num_params(TAOS_STMT *stmt, int *nums) {
STscStmt* pStmt = (STscStmt*)stmt;
if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return TSDB_CODE_TSC_DISCONNECTED;
}
if (pStmt->isInsert) {
SSqlObj* pSql = pStmt->pSql;
SSqlCmd *pCmd = &pSql->cmd;
*nums = pCmd->numOfParams;
return TSDB_CODE_SUCCESS;
} else {
SNormalStmt* normal = &pStmt->normal;
*nums = normal->numParams;
return TSDB_CODE_SUCCESS;
}
}
int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
STscStmt* pStmt = (STscStmt*)stmt;
if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return TSDB_CODE_TSC_DISCONNECTED;
}
if (pStmt->isInsert) {
SSqlObj* pSql = pStmt->pSql;
SSqlCmd *pCmd = &pSql->cmd;
STableDataBlocks* pBlock = taosArrayGetP(pCmd->pDataBlocks, 0);
assert(pCmd->numOfParams == pBlock->numOfParams);
if (idx < 0 || idx >= pBlock->numOfParams) return -1;
SParamInfo* param = pBlock->params + idx;
if (type) *type = param->type;
if (bytes) *bytes = param->bytes;
return TSDB_CODE_SUCCESS;
} else {
return TSDB_CODE_TSC_APP_ERROR;
}
}
const char *taos_data_type(int type) {
switch (type) {
case TSDB_DATA_TYPE_NULL: return "TSDB_DATA_TYPE_NULL";
case TSDB_DATA_TYPE_BOOL: return "TSDB_DATA_TYPE_BOOL";
case TSDB_DATA_TYPE_TINYINT: return "TSDB_DATA_TYPE_TINYINT";
case TSDB_DATA_TYPE_SMALLINT: return "TSDB_DATA_TYPE_SMALLINT";
case TSDB_DATA_TYPE_INT: return "TSDB_DATA_TYPE_INT";
case TSDB_DATA_TYPE_BIGINT: return "TSDB_DATA_TYPE_BIGINT";
case TSDB_DATA_TYPE_FLOAT: return "TSDB_DATA_TYPE_FLOAT";
case TSDB_DATA_TYPE_DOUBLE: return "TSDB_DATA_TYPE_DOUBLE";
case TSDB_DATA_TYPE_BINARY: return "TSDB_DATA_TYPE_BINARY";
case TSDB_DATA_TYPE_TIMESTAMP: return "TSDB_DATA_TYPE_TIMESTAMP";
case TSDB_DATA_TYPE_NCHAR: return "TSDB_DATA_TYPE_NCHAR";
default: return "UNKNOWN";
}
}

View File

@ -20,8 +20,10 @@
#include "tutil.h"
#include "taosmsg.h"
#include "taos.h"
void tscSaveSlowQueryFp(void *handle, void *tmrId);
void *tscSlowQueryConn = NULL;
TAOS *tscSlowQueryConn = NULL;
bool tscSlowQueryConnInitialized = false;
void tscInitConnCb(void *param, TAOS_RES *result, int code) {
@ -37,6 +39,7 @@ void tscInitConnCb(void *param, TAOS_RES *result, int code) {
tscSlowQueryConnInitialized = true;
tscSaveSlowQueryFp(sql, NULL);
}
taos_free_result(result);
}
void tscAddIntoSqlList(SSqlObj *pSql) {
@ -67,6 +70,7 @@ void tscSaveSlowQueryFpCb(void *param, TAOS_RES *result, int code) {
} else {
tscDebug("success to save slow query, code:%d", code);
}
taos_free_result(result);
}
void tscSaveSlowQueryFp(void *handle, void *tmrId) {
@ -220,7 +224,7 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
}
int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
SCMHeartBeatMsg *pHeartbeat = pMsg;
SHeartBeatMsg *pHeartbeat = pMsg;
int allocedQueriesNum = pHeartbeat->numOfQueries;
int allocedStreamsNum = pHeartbeat->numOfStreams;
@ -275,7 +279,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
}
int32_t msgLen = pHeartbeat->numOfQueries * sizeof(SQueryDesc) + pHeartbeat->numOfStreams * sizeof(SStreamDesc) +
sizeof(SCMHeartBeatMsg);
sizeof(SHeartBeatMsg);
pHeartbeat->connId = htonl(pObj->connId);
pHeartbeat->numOfQueries = htonl(pHeartbeat->numOfQueries);
pHeartbeat->numOfStreams = htonl(pHeartbeat->numOfStreams);

File diff suppressed because it is too large Load Diff

View File

@ -118,7 +118,7 @@ SSchema* tscGetTableColumnSchema(const STableMeta* pTableMeta, int32_t colIndex)
}
// TODO for large number of columns, employ the binary search method
SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId) {
SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId) {
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
for(int32_t i = 0; i < tinfo.numOfColumns + tinfo.numOfTags; ++i) {
@ -130,25 +130,16 @@ SSchema* tscGetTableColumnSchemaById(STableMeta* pTableMeta, int16_t colId) {
return NULL;
}
struct SSchema tscGetTbnameColumnSchema() {
struct SSchema s = {
.colId = TSDB_TBNAME_COLUMN_INDEX,
.type = TSDB_DATA_TYPE_BINARY,
.bytes = TSDB_TABLE_NAME_LEN
};
strcpy(s.name, TSQL_TBNAME_L);
return s;
}
static void tscInitCorVgroupInfo(SCMCorVgroupInfo *corVgroupInfo, SCMVgroupInfo *vgroupInfo) {
static void tscInitCorVgroupInfo(SCorVgroupInfo *corVgroupInfo, SVgroupInfo *vgroupInfo) {
corVgroupInfo->version = 0;
corVgroupInfo->inUse = 0;
corVgroupInfo->numOfEps = vgroupInfo->numOfEps;
for (int32_t i = 0; i < corVgroupInfo->numOfEps; i++) {
strncpy(corVgroupInfo->epAddr[i].fqdn, vgroupInfo->epAddr[i].fqdn, TSDB_FQDN_LEN);
corVgroupInfo->epAddr[i].fqdn = strdup(vgroupInfo->epAddr[i].fqdn);
corVgroupInfo->epAddr[i].port = vgroupInfo->epAddr[i].port;
}
}
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size) {
assert(pTableMetaMsg != NULL);
@ -162,11 +153,21 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
.numOfColumns = pTableMetaMsg->numOfColumns,
};
pTableMeta->id.tid = pTableMetaMsg->sid;
pTableMeta->id.tid = pTableMetaMsg->tid;
pTableMeta->id.uid = pTableMetaMsg->uid;
pTableMeta->vgroupInfo = pTableMetaMsg->vgroup;
tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, &pTableMeta->vgroupInfo);
SVgroupInfo* pVgroupInfo = &pTableMeta->vgroupInfo;
pVgroupInfo->numOfEps = pTableMetaMsg->vgroup.numOfEps;
pVgroupInfo->vgId = pTableMetaMsg->vgroup.vgId;
for(int32_t i = 0; i < pVgroupInfo->numOfEps; ++i) {
SEpAddrMsg* pEpMsg = &pTableMetaMsg->vgroup.epAddr[i];
pVgroupInfo->epAddr[i].fqdn = strndup(pEpMsg->fqdn, tListLen(pEpMsg->fqdn));
pVgroupInfo->epAddr[i].port = pEpMsg->port;
}
tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, pVgroupInfo);
pTableMeta->sversion = pTableMetaMsg->sversion;
pTableMeta->tversion = pTableMetaMsg->tversion;
@ -186,28 +187,6 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
return pTableMeta;
}
/**
* the TableMeta data format in memory is as follows:
*
* +--------------------+
* |STableMeta Body data| sizeof(STableMeta)
* +--------------------+
* |Schema data | numOfTotalColumns * sizeof(SSchema)
* +--------------------+
* |Tags data | tag_col_1.bytes + tag_col_2.bytes + ....
* +--------------------+
*
* @param pTableMeta
* @return
*/
char* tsGetTagsValue(STableMeta* pTableMeta) {
int32_t offset = 0;
// int32_t numOfTotalCols = pTableMeta->numOfColumns + pTableMeta->numOfTags;
// uint32_t offset = sizeof(STableMeta) + numOfTotalCols * sizeof(SSchema);
return ((char*)pTableMeta + offset);
}
// todo refactor
UNUSED_FUNC static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) {
for (int32_t i = 0; i < num; ++i) {

File diff suppressed because it is too large Load Diff

View File

@ -28,7 +28,6 @@
#include "tutil.h"
#include "ttimer.h"
#include "tscProfile.h"
#include "ttimer.h"
static bool validImpl(const char* str, size_t maxsize) {
if (str == NULL) {
@ -51,14 +50,15 @@ static bool validPassword(const char* passwd) {
return validImpl(passwd, TSDB_PASSWORD_LEN - 1);
}
SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, const char *auth, const char *db,
uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) {
static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, const char *auth, const char *db,
uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, TAOS **taos) {
taos_init();
if (!validUserName(user)) {
terrno = TSDB_CODE_TSC_INVALID_USER_LENGTH;
return NULL;
}
SRpcCorEpSet corMgmtEpSet;
char secretEncrypt[32] = {0};
int secretEncryptLen = 0;
@ -85,8 +85,10 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
}
if (ip) {
if (tscSetMgmtEpSetFromCfg(ip, NULL) < 0) return NULL;
if (port) tscMgmtEpSet.epSet.port[0] = port;
if (tscSetMgmtEpSetFromCfg(ip, NULL, &corMgmtEpSet) < 0) return NULL;
if (port) corMgmtEpSet.epSet.port[0] = port;
} else {
if (tscSetMgmtEpSetFromCfg(tsFirst, tsSecond, &corMgmtEpSet) < 0) return NULL;
}
void *pDnodeConn = NULL;
@ -101,11 +103,21 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
rpcClose(pDnodeConn);
return NULL;
}
// set up tscObj's mgmtEpSet
pObj->tscCorMgmtEpSet = (SRpcCorEpSet *)malloc(sizeof(SRpcCorEpSet));
if (NULL == pObj->tscCorMgmtEpSet) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
rpcClose(pDnodeConn);
free(pObj->tscCorMgmtEpSet);
free(pObj);
}
memcpy(pObj->tscCorMgmtEpSet, &corMgmtEpSet, sizeof(SRpcCorEpSet));
pObj->signature = pObj;
pObj->pDnodeConn = pDnodeConn;
T_REF_INIT_VAL(pObj, 1);
tstrncpy(pObj->user, user, sizeof(pObj->user));
secretEncryptLen = MIN(secretEncryptLen, sizeof(pObj->pass));
memcpy(pObj->pass, secretEncrypt, secretEncryptLen);
@ -116,6 +128,7 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
if (len >= TSDB_DB_NAME_LEN) {
terrno = TSDB_CODE_TSC_INVALID_DB_LENGTH;
rpcClose(pDnodeConn);
free(pObj->tscCorMgmtEpSet);
free(pObj);
return NULL;
}
@ -133,6 +146,7 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
if (NULL == pSql) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
rpcClose(pDnodeConn);
free(pObj->tscCorMgmtEpSet);
free(pObj);
return NULL;
}
@ -150,6 +164,7 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
rpcClose(pDnodeConn);
free(pSql);
free(pObj->tscCorMgmtEpSet);
free(pObj);
return NULL;
}
@ -161,6 +176,7 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
registerSqlObj(pSql);
tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
pObj->rid = taosAddRef(tscRefId, pObj);
return pSql;
}
@ -243,16 +259,19 @@ static void asyncConnCallback(void *param, TAOS_RES *tres, int code) {
}
TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
void *param, void **taos) {
SSqlObj* pSql = taosConnectImpl(ip, user, pass, NULL, db, port, asyncConnCallback, param, taos);
void *param, TAOS **taos) {
STscObj *pObj = NULL;
SSqlObj *pSql = taosConnectImpl(ip, user, pass, NULL, db, port, asyncConnCallback, param, (void **)&pObj);
if (pSql == NULL) {
return NULL;
}
if (taos) *taos = pObj;
pSql->fetchFp = fp;
pSql->res.code = tscProcessSql(pSql);
tscDebug("%p DB async connection is opening", taos);
return taos;
return pObj;
}
void taos_close(TAOS *taos) {
@ -273,15 +292,18 @@ void taos_close(TAOS *taos) {
pObj->signature = NULL;
taosTmrStopA(&(pObj->pTimer));
SSqlObj* pHb = pObj->pHb;
if (pHb != NULL && atomic_val_compare_exchange_ptr(&pObj->pHb, pHb, 0) == pHb) {
if (pHb->pRpcCtx != NULL) { // wait for rsp from dnode
rpcCancelRequest(pHb->pRpcCtx);
pHb->pRpcCtx = NULL;
}
if (pObj->hbrid > 0) {
SSqlObj* pHb = (SSqlObj*)taosAcquireRef(tscObjRef, pObj->hbrid);
if (pHb != NULL) {
if (pHb->rpcRid > 0) { // wait for rsp from dnode
rpcCancelRequest(pHb->rpcRid);
pHb->rpcRid = -1;
}
tscDebug("%p HB is freed", pHb);
taos_free_result(pHb);
tscDebug("%p HB is freed", pHb);
taos_free_result(pHb);
taosReleaseRef(tscObjRef, pHb->self);
}
}
int32_t ref = T_REF_DEC(pObj);
@ -293,7 +315,8 @@ void taos_close(TAOS *taos) {
}
tscDebug("%p all sqlObj are freed, free tscObj and close dnodeConn:%p", pObj, pObj->pDnodeConn);
tscCloseTscObj(pObj);
taosRemoveRef(tscRefId, pObj->rid);
}
void waitForQueryRsp(void *param, TAOS_RES *tres, int code) {
@ -317,7 +340,7 @@ TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen, TAOS_RES
if (sqlLen > (uint32_t)tsMaxSQLStringLen) {
tscError("sql string exceeds max length:%d", tsMaxSQLStringLen);
terrno = TSDB_CODE_TSC_INVALID_SQL;
terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
return NULL;
}
@ -370,7 +393,7 @@ int taos_num_fields(TAOS_RES *res) {
size_t numOfCols = tscNumOfFields(pQueryInfo);
for(int32_t i = 0; i < numOfCols; ++i) {
SFieldSupInfo* pInfo = taosArrayGet(pQueryInfo->fieldsInfo.pSupportInfo, i);
SInternalField* pInfo = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
if (pInfo->visible) {
num++;
}
@ -390,7 +413,7 @@ int taos_affected_rows(TAOS_RES *tres) {
SSqlObj* pSql = (SSqlObj*) tres;
if (pSql == NULL || pSql->signature != pSql) return 0;
return (int)(pSql->res.numOfRows);
return pSql->res.numOfRows;
}
TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
@ -407,7 +430,32 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
return NULL;
}
return pQueryInfo->fieldsInfo.pFields->pData;
SFieldInfo *pFieldInfo = &pQueryInfo->fieldsInfo;
if (pFieldInfo->final == NULL) {
TAOS_FIELD* f = calloc(pFieldInfo->numOfOutput, sizeof(TAOS_FIELD));
int32_t j = 0;
for(int32_t i = 0; i < pFieldInfo->numOfOutput; ++i) {
SInternalField* pField = tscFieldInfoGetInternalField(pFieldInfo, i);
if (pField->visible) {
f[j] = pField->field;
// revise the length for binary and nchar fields
if (f[j].type == TSDB_DATA_TYPE_BINARY) {
f[j].bytes -= VARSTR_HEADER_SIZE;
} else if (f[j].type == TSDB_DATA_TYPE_NCHAR) {
f[j].bytes = (f[j].bytes - VARSTR_HEADER_SIZE)/TSDB_NCHAR_SIZE;
}
j += 1;
}
}
pFieldInfo->final = f;
}
return pFieldInfo->final;
}
int taos_retrieve(TAOS_RES *res) {
@ -423,50 +471,30 @@ int taos_retrieve(TAOS_RES *res) {
if (pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
tscProcessSql(pSql);
return (int)pRes->numOfRows;
tscProcessSql(pSql);
return pRes->numOfRows;
}
int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) {
SSqlObj *pSql = (SSqlObj *)res;
SSqlCmd *pCmd = &pSql->cmd;
static bool needToFetchNewBlock(SSqlObj* pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
if (pRes->qhandle == 0 || pSql->signature != pSql) {
*rows = NULL;
return 0;
}
// Retrieve new block
tscResetForNextRetrieve(pRes);
if (pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
tscProcessSql(pSql);
if (pRes->numOfRows == 0) {
*rows = NULL;
return 0;
}
// secondary merge has handle this situation
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE) {
pRes->numOfClauseTotal += pRes->numOfRows;
}
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
if (pQueryInfo == NULL)
return 0;
assert(0);
for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i);
}
*rows = pRes->tsrow;
return (int)((pQueryInfo->order.order == TSDB_ORDER_DESC) ? pRes->numOfRows : -pRes->numOfRows);
return (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) &&
(pCmd->command == TSDB_SQL_RETRIEVE ||
pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE ||
pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
pCmd->command == TSDB_SQL_SERV_STATUS ||
pCmd->command == TSDB_SQL_CURRENT_DB ||
pCmd->command == TSDB_SQL_SERV_VERSION ||
pCmd->command == TSDB_SQL_CLI_VERSION ||
pCmd->command == TSDB_SQL_CURRENT_USER);
}
TAOS_ROW taos_fetch_row(TAOS_RES *res) {
@ -489,77 +517,50 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
// set the sql object owner
tscSetSqlOwner(pSql);
// current data set are exhausted, fetch more data from node
if (pRes->row >= pRes->numOfRows && (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) &&
(pCmd->command == TSDB_SQL_RETRIEVE ||
pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE ||
pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
pCmd->command == TSDB_SQL_SERV_STATUS ||
pCmd->command == TSDB_SQL_CURRENT_DB ||
pCmd->command == TSDB_SQL_SERV_VERSION ||
pCmd->command == TSDB_SQL_CLI_VERSION ||
pCmd->command == TSDB_SQL_CURRENT_USER )) {
// current data set are exhausted, fetch more result from node
if (pRes->row >= pRes->numOfRows && needToFetchNewBlock(pSql)) {
taos_fetch_rows_a(res, waitForRetrieveRsp, pSql->pTscObj);
tsem_wait(&pSql->rspSem);
}
void* data = doSetResultRowData(pSql, true);
void* data = doSetResultRowData(pSql);
tscClearSqlOwner(pSql);
return data;
}
int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) {
#if 0
SSqlObj *pSql = (SSqlObj *)res;
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
int nRows = 0;
if (pSql == NULL || pSql->signature != pSql) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
*rows = NULL;
return 0;
}
// projection query on metric, pipeline retrieve data from vnode list,
// instead of two-stage mergednodeProcessMsgFromShell free qhandle
nRows = taos_fetch_block_impl(res, rows);
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
// current subclause is completed, try the next subclause
while (rows == NULL && pCmd->clauseIndex < pCmd->numOfClause - 1) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pSql->cmd.command = pQueryInfo->command;
pCmd->clauseIndex++;
pRes->numOfTotal += pRes->numOfClauseTotal;
pRes->numOfClauseTotal = 0;
pRes->rspType = 0;
pSql->subState.numOfSub = 0;
taosTFree(pSql->pSubs);
assert(pSql->fp == NULL);
tscDebug("%p try data in the next subclause:%d, total subclause:%d", pSql, pCmd->clauseIndex, pCmd->numOfClause);
tscProcessSql(pSql);
nRows = taos_fetch_block_impl(res, rows);
if (pRes->qhandle == 0 ||
pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED ||
pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
pCmd->command == TSDB_SQL_INSERT) {
return 0;
}
return nRows;
#endif
tscResetForNextRetrieve(pRes);
(*rows) = taos_fetch_row(res);
return ((*rows) != NULL)? 1:0;
// set the sql object owner
tscSetSqlOwner(pSql);
// current data set are exhausted, fetch more data from node
if (needToFetchNewBlock(pSql)) {
taos_fetch_rows_a(res, waitForRetrieveRsp, pSql->pTscObj);
tsem_wait(&pSql->rspSem);
}
*rows = pRes->urow;
tscClearSqlOwner(pSql);
return pRes->numOfRows;
}
int taos_select_db(TAOS *taos, const char *db) {
@ -580,7 +581,7 @@ int taos_select_db(TAOS *taos, const char *db) {
}
// send free message to vnode to free qhandle and corresponding resources in vnode
static UNUSED_FUNC bool tscKillQueryInDnode(SSqlObj* pSql) {
static bool tscKillQueryInDnode(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
@ -588,7 +589,8 @@ static UNUSED_FUNC bool tscKillQueryInDnode(SSqlObj* pSql) {
return true;
}
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
if ((pQueryInfo == NULL) || tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
return true;
}
@ -623,8 +625,7 @@ void taos_free_result(TAOS_RES *res) {
bool freeNow = tscKillQueryInDnode(pSql);
if (freeNow) {
tscDebug("%p free sqlObj in cache", pSql);
SSqlObj** p = pSql->self;
taosCacheRelease(tscObjCache, (void**) &p, true);
taosReleaseRef(tscObjRef, pSql->self);
}
}
@ -702,6 +703,7 @@ static void tscKillSTableQuery(SSqlObj *pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (!tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
return;
}
@ -716,22 +718,16 @@ static void tscKillSTableQuery(SSqlObj *pSql) {
continue;
}
void** p = taosCacheAcquireByKey(tscObjCache, &pSub, sizeof(TSDB_CACHE_PTR_TYPE));
if (p == NULL) {
continue;
}
SSqlObj* pSubObj = (SSqlObj*) (*p);
assert(pSubObj->self == (SSqlObj**) p);
SSqlObj* pSubObj = pSub;
pSubObj->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
if (pSubObj->pRpcCtx != NULL) {
rpcCancelRequest(pSubObj->pRpcCtx);
pSubObj->pRpcCtx = NULL;
if (pSubObj->rpcRid > 0) {
rpcCancelRequest(pSubObj->rpcRid);
pSubObj->rpcRid = -1;
}
tscQueueAsyncRes(pSubObj);
taosCacheRelease(tscObjCache, (void**) &p, false);
taosReleaseRef(tscObjRef, pSubObj->self);
}
tscDebug("%p super table query cancelled", pSql);
@ -750,8 +746,9 @@ void taos_stop_query(TAOS_RES *res) {
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
assert(pSql->pRpcCtx == NULL);
assert(pSql->rpcRid <= 0);
tscKillSTableQuery(pSql);
} else {
if (pSql->cmd.command < TSDB_SQL_LOCAL) {
@ -760,9 +757,9 @@ void taos_stop_query(TAOS_RES *res) {
* reset and freed in the processMsgFromServer function, and causes the invalid
* write problem for rpcCancelRequest.
*/
if (pSql->pRpcCtx != NULL) {
rpcCancelRequest(pSql->pRpcCtx);
pSql->pRpcCtx = NULL;
if (pSql->rpcRid > 0) {
rpcCancelRequest(pSql->rpcRid);
pSql->rpcRid = -1;
}
tscQueueAsyncRes(pSql);
@ -772,6 +769,25 @@ void taos_stop_query(TAOS_RES *res) {
tscDebug("%p query is cancelled", res);
}
bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
SSqlObj *pSql = (SSqlObj *)res;
if (pSql == NULL || pSql->signature != pSql) {
return true;
}
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo == NULL) {
return true;
}
SInternalField* pInfo = (SInternalField*)TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, col);
if (col < 0 || col >= tscNumOfFields(pQueryInfo) || row < 0 || row > pSql->res.numOfRows) {
return true;
}
return isNull(((char*) pSql->res.urow[col]) + row * pInfo->field.bytes, pInfo->field.type);
}
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
int len = 0;
for (int i = 0; i < num_fields; ++i) {
@ -869,18 +885,16 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
int32_t sqlLen = (int32_t)strlen(sql);
if (sqlLen > tsMaxSQLStringLen) {
tscError("%p sql too long", pSql);
pRes->code = TSDB_CODE_TSC_INVALID_SQL;
taosTFree(pSql);
return pRes->code;
tfree(pSql);
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
}
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
if (pSql->sqlstr == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscError("%p failed to malloc sql string buffer", pSql);
tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj);
taosTFree(pSql);
return pRes->code;
tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj);
tfree(pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
strtolower(pSql->sqlstr, sql);
@ -903,7 +917,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
}
if (code != TSDB_CODE_SUCCESS) {
tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, code, taos_errstr(taos), pObj);
tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, code, taos_errstr(pSql), pObj);
}
taos_free_result(pSql);
@ -1047,7 +1061,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
tscDoQuery(pSql);
tscDebug("%p load multi table meta result:%d %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj);
tscDebug("%p load multi table meta result:%d %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj);
if ((code = pRes->code) != TSDB_CODE_SUCCESS) {
tscFreeSqlObj(pSql);
}

View File

@ -168,7 +168,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), true);
taosTFree(pTableMetaInfo->vgroupList);
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
return;
@ -273,9 +273,9 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false);
tscFreeSqlResult(pSql);
taosTFree(pSql->pSubs);
tfree(pSql->pSubs);
pSql->subState.numOfSub = 0;
taosTFree(pTableMetaInfo->vgroupList);
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
tscSetNextLaunchTimer(pStream, pSql);
}
}
@ -617,6 +617,6 @@ void taos_close_stream(TAOS_STREAM *handle) {
pStream->pSql = NULL;
taos_free_result(pSql);
taosTFree(pStream);
tfree(pStream);
}
}

View File

@ -157,7 +157,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
registerSqlObj(pSql);
code = tsParseSql(pSql, false);
code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
tsem_wait(&pSub->sem);
code = pSql->res.code;
@ -168,7 +168,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
goto fail;
}
if (pSql->cmd.command != TSDB_SQL_SELECT) {
if (pSql->cmd.command != TSDB_SQL_SELECT && pSql->cmd.command != TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
line = __LINE__;
code = TSDB_CODE_TSC_INVALID_SQL;
goto fail;
@ -179,10 +179,10 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
fail:
tscError("tscCreateSubscription failed at line %d, reason: %s", line, tstrerror(code));
if (pSql != NULL) {
if (pSql->self != NULL) {
taos_free_result(pSql);
if (pSql->self != 0) {
taosReleaseRef(tscObjRef, pSql->self);
} else {
tscFreeSqlObj(pSql);
tscFreeSqlObj(pSql);
}
pSql = NULL;
@ -401,9 +401,11 @@ TAOS_SUB *taos_subscribe(TAOS *taos, int restart, const char* topic, const char
tscLoadSubscriptionProgress(pSub);
}
if (!tscUpdateSubscription(pObj, pSub)) {
taos_unsubscribe(pSub, 1);
return NULL;
if (pSub->pSql->cmd.command == TSDB_SQL_SELECT) {
if (!tscUpdateSubscription(pObj, pSub)) {
taos_unsubscribe(pSub, 1);
return NULL;
}
}
pSub->interval = interval;
@ -417,10 +419,80 @@ TAOS_SUB *taos_subscribe(TAOS *taos, int restart, const char* topic, const char
return pSub;
}
SSqlObj* recreateSqlObj(SSub* pSub) {
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
if (pSql == NULL) {
return NULL;
}
pSql->signature = pSql;
pSql->pTscObj = pSub->taos;
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
if (tsem_init(&pSql->rspSem, 0, 0) == -1) {
tscFreeSqlObj(pSql);
return NULL;
}
pSql->param = pSub;
pSql->maxRetry = TSDB_MAX_REPLICA;
pSql->fp = asyncCallback;
pSql->fetchFp = asyncCallback;
pSql->sqlstr = strdup(pSub->pSql->sqlstr);
if (pSql->sqlstr == NULL) {
tscFreeSqlObj(pSql);
return NULL;
}
pRes->qhandle = 0;
pRes->numOfRows = 1;
int code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE);
if (code != TSDB_CODE_SUCCESS) {
tscFreeSqlObj(pSql);
return NULL;
}
registerSqlObj(pSql);
code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
tsem_wait(&pSub->sem);
code = pSql->res.code;
}
if (code != TSDB_CODE_SUCCESS) {
taosReleaseRef(tscObjRef, pSql->self);
return NULL;
}
if (pSql->cmd.command != TSDB_SQL_SELECT) {
taosReleaseRef(tscObjRef, pSql->self);
return NULL;
}
return pSql;
}
TAOS_RES *taos_consume(TAOS_SUB *tsub) {
SSub *pSub = (SSub *)tsub;
if (pSub == NULL) return NULL;
if (pSub->pSql->cmd.command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
SSqlObj* pSql = recreateSqlObj(pSub);
if (pSql == NULL) {
return NULL;
}
if (pSub->pSql->self != 0) {
taosReleaseRef(tscObjRef, pSub->pSql->self);
} else {
tscFreeSqlObj(pSub->pSql);
}
pSub->pSql = pSql;
pSql->pSubscription = pSub;
}
tscSaveSubscriptionProgress(pSub);
SSqlObj *pSql = pSub->pSql;
@ -442,7 +514,12 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
size_t size = taosArrayGetSize(pSub->progress) * sizeof(STableIdInfo);
size += sizeof(SQueryTableMsg) + 4096;
tscAllocPayload(&pSql->cmd, (int)size);
int code = tscAllocPayload(&pSql->cmd, (int)size);
if (code != TSDB_CODE_SUCCESS) {
tscError("failed to alloc payload");
return NULL;
}
for (int retry = 0; retry < 3; retry++) {
tscRemoveFromSqlList(pSql);
@ -507,10 +584,13 @@ void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress) {
}
if (pSub->pSql != NULL) {
taos_free_result(pSub->pSql);
if (pSub->pSql->self != 0) {
taosReleaseRef(tscObjRef, pSub->pSql->self);
} else {
tscFreeSqlObj(pSub->pSql);
}
}
tscFreeSqlObj(pSub->pSql);
taosArrayDestroy(pSub->progress);
tsem_destroy(&pSub->sem);
memset(pSub, 0, sizeof(*pSub));

File diff suppressed because it is too large Load Diff

View File

@ -15,7 +15,7 @@
#include "os.h"
#include "taosmsg.h"
#include "tcache.h"
#include "tref.h"
#include "trpc.h"
#include "tsystem.h"
#include "ttimer.h"
@ -31,11 +31,12 @@
// global, not configurable
SCacheObj* tscMetaCache;
SCacheObj* tscObjCache;
int tscObjRef = -1;
void * tscTmr;
void * tscQhandle;
void * tscCheckDiskUsageTmr;
int tsInsertHeadSize;
int tscRefId = -1;
int tscNumOfThreads;
@ -77,8 +78,9 @@ int32_t tscInitRpc(const char *user, const char *secretEncrypt, void **pDnodeCon
return 0;
}
void taos_init_imp(void) {
char temp[128];
char temp[128] = {0};
errno = TSDB_CODE_SUCCESS;
srand(taosGetTimestampSec());
@ -102,8 +104,8 @@ void taos_init_imp(void) {
taosReadGlobalCfg();
taosCheckGlobalCfg();
taosPrintGlobalCfg();
rpcInit();
tscDebug("starting to initialize TAOS client ...");
tscDebug("Local End Point is:%s", tsLocalEp);
}
@ -114,18 +116,14 @@ void taos_init_imp(void) {
taosInitNote(tsNumOfLogLines / 10, 1, (char*)"tsc_note");
}
if (tscSetMgmtEpSetFromCfg(tsFirst, tsSecond) < 0) {
tscError("failed to init mnode EP list");
return;
}
tscInitMsgsFp();
int queueSize = tsMaxConnections*2;
double factor = (tscEmbedded == 0)? 2.0:4.0;
tscNumOfThreads = (int)(tsNumOfCores * tsNumOfThreadsPerCore / factor);
if (tscNumOfThreads < 2) tscNumOfThreads = 2;
if (tscNumOfThreads < 2) {
tscNumOfThreads = 2;
}
tscQhandle = taosInitScheduler(queueSize, tscNumOfThreads, "tsc");
if (NULL == tscQhandle) {
@ -140,33 +138,49 @@ void taos_init_imp(void) {
int64_t refreshTime = 10; // 10 seconds by default
if (tscMetaCache == NULL) {
tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, NULL, "tableMeta");
tscObjCache = taosCacheInit(TSDB_CACHE_PTR_KEY, refreshTime / 2, false, tscFreeRegisteredSqlObj, "sqlObj");
tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, tscFreeTableMetaHelper, "tableMeta");
tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj);
}
tscRefId = taosOpenRef(200, tscCloseTscObj);
// in other language APIs, taos_cleanup is not available yet.
// So, to make sure taos_cleanup will be invoked to clean up the allocated
// resource to suppress the valgrind warning.
atexit(taos_cleanup);
tscDebug("client is initialized successfully");
}
void taos_init() { pthread_once(&tscinit, taos_init_imp); }
void taos_cleanup() {
if (tscMetaCache != NULL) {
taosCacheCleanup(tscMetaCache);
tscMetaCache = NULL;
// this function may be called by user or system, or by both simultaneously.
void taos_cleanup(void) {
tscDebug("start to cleanup client environment");
taosCacheCleanup(tscObjCache);
tscObjCache = NULL;
void* m = tscMetaCache;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscMetaCache, m, 0) == m) {
taosCacheCleanup(m);
}
if (tscQhandle != NULL) {
taosCleanUpScheduler(tscQhandle);
tscQhandle = NULL;
int refId = atomic_exchange_32(&tscObjRef, -1);
if (refId != -1) {
taosCloseRef(refId);
}
m = tscQhandle;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscQhandle, m, 0) == m) {
taosCleanUpScheduler(m);
}
taosCloseRef(tscRefId);
taosCleanupKeywordsTable();
taosCloseLog();
if (tscEmbedded == 0) rpcCleanup();
taosTmrCleanUp(tscTmr);
m = tscTmr;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscTmr, m, 0) == m) {
taosTmrCleanUp(m);
}
}
static int taos_options_imp(TSDB_OPTION option, const char *pStr) {

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,138 @@
#include <gtest/gtest.h>
#include <iostream>
#include "taos.h"
namespace {
static int64_t start_ts = 1433955661000;
}
/* test parse time function */
TEST(testCase, result_field_test) {
taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
taos_init();
TAOS* conn = taos_connect("ubuntu", "root", "taosdata", 0, 0);
if (conn == NULL) {
printf("Failed to connect to DB, reason:%s", taos_errstr(conn));
exit(-1);
}
TAOS_RES* res = taos_query(conn, "create database if not exists test");
ASSERT_EQ(taos_errno(res), 0);
taos_free_result(res);
res = taos_query(conn, "use test");
ASSERT_EQ(taos_errno(res), 0);
taos_free_result(res);
res = taos_query(conn, "create table if not exists t1(ts timestamp, k int, a binary(11), b nchar(4))");
ASSERT_EQ(taos_errno(res), 0);
taos_free_result(res);
char sql[512] = {0};
sprintf(sql, "insert into t1 values(%ld, 99, 'abc', 'test')", start_ts);
res = taos_query(conn, sql);
ASSERT_EQ(taos_errno(res), 0);
taos_free_result(res);
res = taos_query(conn, "select count(*), spread(ts)/(1000 * 3600 * 24), first(a), last(b) from t1");
ASSERT_EQ(taos_num_fields(res), 4);
TAOS_FIELD* fields = taos_fetch_fields(res);
ASSERT_EQ(fields[0].bytes, 8);
ASSERT_EQ(fields[0].type, TSDB_DATA_TYPE_BIGINT);
ASSERT_STREQ(fields[0].name, "count(*)");
ASSERT_EQ(fields[1].bytes, 8);
ASSERT_EQ(fields[1].type, TSDB_DATA_TYPE_DOUBLE);
ASSERT_STREQ(fields[1].name, "spread(ts)/(1000 * 3600 * 24)");
ASSERT_EQ(fields[2].bytes, 11);
ASSERT_EQ(fields[2].type, TSDB_DATA_TYPE_BINARY);
ASSERT_STREQ(fields[2].name, "first(a)");
ASSERT_EQ(fields[3].bytes, 4);
ASSERT_EQ(fields[3].type, TSDB_DATA_TYPE_NCHAR);
ASSERT_STREQ(fields[3].name, "last(b)");
taos_free_result(res);
res = taos_query(conn, "select last_row(*) from t1");
ASSERT_EQ(taos_num_fields(res), 4);
fields = taos_fetch_fields(res);
ASSERT_EQ(fields[0].bytes, 8);
ASSERT_EQ(fields[0].type, TSDB_DATA_TYPE_TIMESTAMP);
ASSERT_STREQ(fields[0].name, "last_row(ts)");
ASSERT_EQ(fields[1].bytes, 4);
ASSERT_EQ(fields[1].type, TSDB_DATA_TYPE_INT);
ASSERT_STREQ(fields[1].name, "last_row(k)");
ASSERT_EQ(fields[2].bytes, 11);
ASSERT_EQ(fields[2].type, TSDB_DATA_TYPE_BINARY);
ASSERT_STREQ(fields[2].name, "last_row(a)");
ASSERT_EQ(fields[3].bytes, 4);
ASSERT_EQ(fields[3].type, TSDB_DATA_TYPE_NCHAR);
ASSERT_STREQ(fields[3].name, "last_row(b)");
taos_free_result(res);
res = taos_query(conn, "select first(*), last(*) from t1");
ASSERT_EQ(taos_num_fields(res), 8);
fields = taos_fetch_fields(res);
ASSERT_EQ(fields[0].bytes, 8);
ASSERT_EQ(fields[0].type, TSDB_DATA_TYPE_TIMESTAMP);
ASSERT_STREQ(fields[0].name, "first(ts)");
ASSERT_EQ(fields[1].bytes, 4);
ASSERT_EQ(fields[1].type, TSDB_DATA_TYPE_INT);
ASSERT_STREQ(fields[1].name, "first(k)");
ASSERT_EQ(fields[2].bytes, 11);
ASSERT_EQ(fields[2].type, TSDB_DATA_TYPE_BINARY);
ASSERT_STREQ(fields[2].name, "first(a)");
ASSERT_EQ(fields[3].bytes, 4);
ASSERT_EQ(fields[3].type, TSDB_DATA_TYPE_NCHAR);
ASSERT_STREQ(fields[3].name, "first(b)");
taos_free_result(res);
res = taos_query(conn, "select first(ts, a, k, k, b, b, ts) from t1");
ASSERT_EQ(taos_num_fields(res), 7);
fields = taos_fetch_fields(res);
ASSERT_EQ(fields[0].bytes, 8);
ASSERT_EQ(fields[0].type, TSDB_DATA_TYPE_TIMESTAMP);
ASSERT_STREQ(fields[0].name, "first(ts)");
ASSERT_EQ(fields[1].bytes, 11);
ASSERT_EQ(fields[1].type, TSDB_DATA_TYPE_BINARY);
ASSERT_STREQ(fields[1].name, "first(a)");
ASSERT_EQ(fields[2].bytes, 4);
ASSERT_EQ(fields[2].type, TSDB_DATA_TYPE_INT);
ASSERT_STREQ(fields[2].name, "first(k)");
ASSERT_EQ(fields[3].bytes, 4);
ASSERT_EQ(fields[3].type, TSDB_DATA_TYPE_INT);
ASSERT_STREQ(fields[3].name, "first(k)");
ASSERT_EQ(fields[4].bytes, 4);
ASSERT_EQ(fields[4].type, TSDB_DATA_TYPE_NCHAR);
ASSERT_STREQ(fields[4].name, "first(b)");
ASSERT_EQ(fields[5].bytes, 4);
ASSERT_EQ(fields[5].type, TSDB_DATA_TYPE_NCHAR);
ASSERT_STREQ(fields[5].name, "first(b)");
ASSERT_EQ(fields[6].bytes, 8);
ASSERT_EQ(fields[6].type, TSDB_DATA_TYPE_TIMESTAMP);
ASSERT_STREQ(fields[6].name, "first(ts)");
taos_free_result(res);
taos_close(conn);
}

View File

@ -80,7 +80,7 @@ typedef struct {
#define schemaFLen(s) ((s)->flen)
#define schemaVLen(s) ((s)->vlen)
#define schemaColAt(s, i) ((s)->columns + i)
#define tdFreeSchema(s) taosTFree((s))
#define tdFreeSchema(s) tfree((s))
STSchema *tdDupSchema(STSchema *pSchema);
int tdEncodeSchema(void **buf, STSchema *pSchema);
@ -119,6 +119,33 @@ void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, int32_t version);
int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int16_t bytes);
STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
// ----------------- Semantic timestamp key definition
typedef uint64_t TKEY;
#define TKEY_INVALID UINT64_MAX
#define TKEY_NULL TKEY_INVALID
#define TKEY_NEGATIVE_FLAG (((TKEY)1) << 63)
#define TKEY_DELETE_FLAG (((TKEY)1) << 62)
#define TKEY_VALUE_FILTER (~(TKEY_NEGATIVE_FLAG | TKEY_DELETE_FLAG))
#define TKEY_IS_NEGATIVE(tkey) (((tkey)&TKEY_NEGATIVE_FLAG) != 0)
#define TKEY_IS_DELETED(tkey) (((tkey)&TKEY_DELETE_FLAG) != 0)
#define tdSetTKEYDeleted(tkey) ((tkey) | TKEY_DELETE_FLAG)
#define tdGetTKEY(key) (((TKEY)ABS(key)) | (TKEY_NEGATIVE_FLAG & (TKEY)(key)))
#define tdGetKey(tkey) (((TSKEY)((tkey)&TKEY_VALUE_FILTER)) * (TKEY_IS_NEGATIVE(tkey) ? -1 : 1))
static FORCE_INLINE int tkeyComparFn(const void *tkey1, const void *tkey2) {
TSKEY key1 = tdGetKey(*(TKEY *)tkey1);
TSKEY key2 = tdGetKey(*(TKEY *)tkey2);
if (key1 < key2) {
return -1;
} else if (key1 > key2) {
return 1;
} else {
return 0;
}
}
// ----------------- Data row structure
/* A data row, the format is like below:
@ -129,6 +156,8 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
* +----------+----------+---------------------------------+---------------------------------+
* | len | sversion | First part | Second part |
* +----------+----------+---------------------------------+---------------------------------+
*
* NOTE: timestamp in this row structure is TKEY instead of TSKEY
*/
typedef void *SDataRow;
@ -137,11 +166,13 @@ typedef void *SDataRow;
#define dataRowLen(r) (*(uint16_t *)(r))
#define dataRowVersion(r) *(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))
#define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE)
#define dataRowKey(r) (*(TSKEY *)(dataRowTuple(r)))
#define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r)))
#define dataRowKey(r) tdGetKey(dataRowTKey(r))
#define dataRowSetLen(r, l) (dataRowLen(r) = (l))
#define dataRowSetVersion(r, v) (dataRowVersion(r) = (v))
#define dataRowCpy(dst, r) memcpy((dst), (r), dataRowLen(r))
#define dataRowMaxBytesFromSchema(s) (schemaTLen(s) + TD_DATA_ROW_HEAD_SIZE)
#define dataRowDeleted(r) TKEY_IS_DELETED(dataRowTKey(r))
SDataRow tdNewDataRowFromSchema(STSchema *pSchema);
void tdFreeDataRow(SDataRow row);
@ -154,16 +185,18 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, void *value, int8_t type, i
int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE;
char * ptr = (char *)POINTER_SHIFT(row, dataRowLen(row));
switch (type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
*(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row);
memcpy(ptr, value, varDataTLen(value));
dataRowLen(row) += varDataTLen(value);
break;
default:
if (IS_VAR_DATA_TYPE(type)) {
*(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row);
memcpy(ptr, value, varDataTLen(value));
dataRowLen(row) += varDataTLen(value);
} else {
if (offset == 0) {
ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP);
TKEY tvalue = tdGetTKEY(*(TSKEY *)value);
memcpy(POINTER_SHIFT(row, toffset), (void *)(&tvalue), TYPE_BYTES[type]);
} else {
memcpy(POINTER_SHIFT(row, toffset), value, TYPE_BYTES[type]);
break;
}
}
return 0;
@ -171,12 +204,10 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, void *value, int8_t type, i
// NOTE: offset here including the header size
static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t offset) {
switch (type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
return POINTER_SHIFT(row, *(VarDataOffsetT *)POINTER_SHIFT(row, offset));
default:
return POINTER_SHIFT(row, offset);
if (IS_VAR_DATA_TYPE(type)) {
return POINTER_SHIFT(row, *(VarDataOffsetT *)POINTER_SHIFT(row, offset));
} else {
return POINTER_SHIFT(row, offset);
}
}
@ -196,7 +227,6 @@ static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints);
void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints);
void dataColPopPoints(SDataCol *pCol, int pointsToPop, int numOfRows);
void dataColSetOffset(SDataCol *pCol, int nEle);
bool isNEleNull(SDataCol *pCol, int nEle);
@ -204,28 +234,20 @@ void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints);
// Get the data pointer from a column-wised data
static FORCE_INLINE void *tdGetColDataOfRow(SDataCol *pCol, int row) {
switch (pCol->type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
break;
default:
return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row);
break;
if (IS_VAR_DATA_TYPE(pCol->type)) {
return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
} else {
return POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * row);
}
}
static FORCE_INLINE int32_t dataColGetNEleLen(SDataCol *pDataCol, int rows) {
ASSERT(rows > 0);
switch (pDataCol->type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
return pDataCol->dataOff[rows - 1] + varDataTLen(tdGetColDataOfRow(pDataCol, rows - 1));
break;
default:
return TYPE_BYTES[pDataCol->type] * rows;
if (IS_VAR_DATA_TYPE(pDataCol->type)) {
return pDataCol->dataOff[rows - 1] + varDataTLen(tdGetColDataOfRow(pDataCol, rows - 1));
} else {
return TYPE_BYTES[pDataCol->type] * rows;
}
}
@ -243,9 +265,14 @@ typedef struct {
} SDataCols;
#define keyCol(pCols) (&((pCols)->cols[0])) // Key column
#define dataColsKeyAt(pCols, idx) ((TSKEY *)(keyCol(pCols)->pData))[(idx)]
#define dataColsKeyFirst(pCols) dataColsKeyAt(pCols, 0)
#define dataColsKeyLast(pCols) ((pCols->numOfRows == 0) ? 0 : dataColsKeyAt(pCols, (pCols)->numOfRows - 1))
#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)]
#define dataColsKeyAt(pCols, idx) tdGetKey(dataColsTKeyAt(pCols, idx))
#define dataColsTKeyFirst(pCols) (((pCols)->numOfRows == 0) ? TKEY_INVALID : dataColsTKeyAt(pCols, 0))
#define dataColsKeyFirst(pCols) (((pCols)->numOfRows == 0) ? TSDB_DATA_TIMESTAMP_NULL : dataColsKeyAt(pCols, 0))
#define dataColsTKeyLast(pCols) \
(((pCols)->numOfRows == 0) ? TKEY_INVALID : dataColsTKeyAt(pCols, (pCols)->numOfRows - 1))
#define dataColsKeyLast(pCols) \
(((pCols)->numOfRows == 0) ? TSDB_DATA_TIMESTAMP_NULL : dataColsKeyAt(pCols, (pCols)->numOfRows - 1))
SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows);
void tdResetDataCols(SDataCols *pCols);
@ -253,10 +280,7 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData);
void tdFreeDataCols(SDataCols *pCols);
void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols);
void tdPopDataColsPoints(SDataCols *pCols, int pointsToPop); //!!!!
int tdMergeDataCols(SDataCols *target, SDataCols *src, int rowsToMerge);
void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
int limit2, int tRows);
// ----------------- K-V data row structure
/*
@ -284,7 +308,7 @@ typedef struct {
#define kvRowCpy(dst, r) memcpy((dst), (r), kvRowLen(r))
#define kvRowColVal(r, colIdx) POINTER_SHIFT(kvRowValues(r), (colIdx)->offset)
#define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i))
#define kvRowFree(r) taosTFree(r)
#define kvRowFree(r) tfree(r)
#define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r))
SKVRow tdKVRowDup(SKVRow row);

View File

@ -44,13 +44,19 @@ extern int32_t tsMaxShellConns;
extern int32_t tsShellActivityTimer;
extern uint32_t tsMaxTmrCtrl;
extern float tsNumOfThreadsPerCore;
extern float tsRatioOfQueryThreads;
extern int32_t tsNumOfCommitThreads;
extern float tsRatioOfQueryThreads; // todo remove it
extern int8_t tsDaylight;
extern char tsTimezone[];
extern char tsLocale[];
extern char tsCharset[]; // default encode string
extern char tsCharset[]; // default encode string
extern int32_t tsEnableCoreFile;
extern int32_t tsCompressMsgSize;
extern char tsTempDir[];
//query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer for each data node during query processing
extern int32_t tsHalfCoresForQuery; // only 50% will be used in query processing
// client
extern int32_t tsTableMetaKeepTimer;
@ -84,6 +90,7 @@ extern int16_t tsWAL;
extern int32_t tsFsyncPeriod;
extern int32_t tsReplications;
extern int32_t tsQuorum;
extern int32_t tsUpdate;
// balance
extern int32_t tsEnableBalance;
@ -91,6 +98,7 @@ extern int32_t tsAlternativeRole;
extern int32_t tsBalanceInterval;
extern int32_t tsOfflineThreshold;
extern int32_t tsMnodeEqualVnodeNum;
extern int32_t tsFlowCtrl;
// restful
extern int32_t tsEnableHttpModule;
@ -118,6 +126,9 @@ extern char tsMonitorDbName[];
extern char tsInternalPass[];
extern int32_t tsMonitorInterval;
// stream
extern int32_t tsEnableStream;
// internal
extern int32_t tsPrintAuth;
extern int32_t tscEmbedded;
@ -169,7 +180,7 @@ extern int32_t tmrDebugFlag;
extern int32_t sdbDebugFlag;
extern int32_t httpDebugFlag;
extern int32_t mqttDebugFlag;
extern int32_t monitorDebugFlag;
extern int32_t monDebugFlag;
extern int32_t uDebugFlag;
extern int32_t rpcDebugFlag;
extern int32_t odbcDebugFlag;
@ -180,12 +191,12 @@ extern int32_t debugFlag;
#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
void taosInitGlobalCfg();
bool taosCheckGlobalCfg();
void taosSetAllDebugFlag();
bool taosCfgDynamicOptions(char *msg);
int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port);
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId);
void taosInitGlobalCfg();
int32_t taosCheckGlobalCfg();
void taosSetAllDebugFlag();
bool taosCfgDynamicOptions(char *msg);
int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port);
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId);
#ifdef __cplusplus
}

View File

@ -35,6 +35,6 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
// int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
SSchema tscGetTbnameColumnSchema();
#endif // TDENGINE_NAME_H

View File

@ -18,6 +18,9 @@
#include "tcoding.h"
#include "wchar.h"
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
int limit2, int tRows);
/**
* Duplicate the schema and return a new object
*/
@ -94,7 +97,7 @@ int tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, int32_t version) {
void tdDestroyTSchemaBuilder(STSchemaBuilder *pBuilder) {
if (pBuilder) {
taosTFree(pBuilder->columns);
tfree(pBuilder->columns);
}
}
@ -202,7 +205,7 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints)
pDataCol->offset = colOffset(pCol) + TD_DATA_ROW_HEAD_SIZE;
pDataCol->len = 0;
if (pDataCol->type == TSDB_DATA_TYPE_BINARY || pDataCol->type == TSDB_DATA_TYPE_NCHAR) {
if (IS_VAR_DATA_TYPE(pDataCol->type)) {
pDataCol->dataOff = (VarDataOffsetT *)(*pBuf);
pDataCol->pData = POINTER_SHIFT(*pBuf, sizeof(VarDataOffsetT) * maxPoints);
pDataCol->spaceSize = pDataCol->bytes * maxPoints;
@ -215,60 +218,29 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints)
}
}
// value from timestamp should be TKEY here instead of TSKEY
void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints) {
ASSERT(pCol != NULL && value != NULL);
switch (pCol->type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
// set offset
pCol->dataOff[numOfRows] = pCol->len;
// Copy data
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value));
// Update the length
pCol->len += varDataTLen(value);
break;
default:
ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows);
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes);
pCol->len += pCol->bytes;
break;
}
}
void dataColPopPoints(SDataCol *pCol, int pointsToPop, int numOfRows) {
int pointsLeft = numOfRows - pointsToPop;
ASSERT(pointsLeft > 0);
if (pCol->type == TSDB_DATA_TYPE_BINARY || pCol->type == TSDB_DATA_TYPE_NCHAR) {
ASSERT(pCol->len > 0);
VarDataOffsetT toffset = pCol->dataOff[pointsToPop];
pCol->len = pCol->len - toffset;
ASSERT(pCol->len > 0);
memmove(pCol->pData, POINTER_SHIFT(pCol->pData, toffset), pCol->len);
dataColSetOffset(pCol, pointsLeft);
if (IS_VAR_DATA_TYPE(pCol->type)) {
// set offset
pCol->dataOff[numOfRows] = pCol->len;
// Copy data
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value));
// Update the length
pCol->len += varDataTLen(value);
} else {
ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows);
pCol->len = TYPE_BYTES[pCol->type] * pointsLeft;
memmove(pCol->pData, POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * pointsToPop), pCol->len);
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes);
pCol->len += pCol->bytes;
}
}
bool isNEleNull(SDataCol *pCol, int nEle) {
switch (pCol->type) {
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
for (int i = 0; i < nEle; i++) {
if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false;
}
return true;
default:
for (int i = 0; i < nEle; i++) {
if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false;
}
return true;
for (int i = 0; i < nEle; i++) {
if (!isNull(tdGetColDataOfRow(pCol, i), pCol->type)) return false;
}
return true;
}
void dataColSetNullAt(SDataCol *pCol, int index) {
@ -367,8 +339,8 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema) {
void tdFreeDataCols(SDataCols *pCols) {
if (pCols) {
taosTFree(pCols->buf);
taosTFree(pCols->cols);
tfree(pCols->buf);
tfree(pCols->cols);
free(pCols);
}
}
@ -390,7 +362,7 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
pRet->cols[i].spaceSize = pDataCols->cols[i].spaceSize;
pRet->cols[i].pData = (void *)((char *)pRet->buf + ((char *)(pDataCols->cols[i].pData) - (char *)(pDataCols->buf)));
if (pRet->cols[i].type == TSDB_DATA_TYPE_BINARY || pRet->cols[i].type == TSDB_DATA_TYPE_NCHAR) {
if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
ASSERT(pDataCols->cols[i].dataOff != NULL);
pRet->cols[i].dataOff =
(int32_t *)((char *)pRet->buf + ((char *)(pDataCols->cols[i].dataOff) - (char *)(pDataCols->buf)));
@ -400,7 +372,7 @@ SDataCols *tdDupDataCols(SDataCols *pDataCols, bool keepData) {
pRet->cols[i].len = pDataCols->cols[i].len;
if (pDataCols->cols[i].len > 0) {
memcpy(pRet->cols[i].pData, pDataCols->cols[i].pData, pDataCols->cols[i].len);
if (pRet->cols[i].type == TSDB_DATA_TYPE_BINARY || pRet->cols[i].type == TSDB_DATA_TYPE_NCHAR) {
if (IS_VAR_DATA_TYPE(pRet->cols[i].type)) {
memcpy(pRet->cols[i].dataOff, pDataCols->cols[i].dataOff, sizeof(VarDataOffsetT) * pDataCols->maxPoints);
}
}
@ -420,58 +392,54 @@ void tdResetDataCols(SDataCols *pCols) {
}
void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) {
ASSERT(dataColsKeyLast(pCols) < dataRowKey(row));
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row));
int rcol = 0;
int dcol = 0;
while (dcol < pCols->numOfCols) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) {
dataColSetNullAt(pDataCol, pCols->numOfRows);
dcol++;
continue;
if (dataRowDeleted(row)) {
for (; dcol < pCols->numOfCols; dcol++) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (dcol == 0) {
dataColAppendVal(pDataCol, dataRowTuple(row), pCols->numOfRows, pCols->maxPoints);
} else {
dataColSetNullAt(pDataCol, pCols->numOfRows);
}
}
} else {
while (dcol < pCols->numOfCols) {
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) {
dataColSetNullAt(pDataCol, pCols->numOfRows);
dcol++;
continue;
}
STColumn *pRowCol = schemaColAt(pSchema, rcol);
if (pRowCol->colId == pDataCol->colId) {
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset+TD_DATA_ROW_HEAD_SIZE);
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
dcol++;
rcol++;
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
dataColSetNullAt(pDataCol, pCols->numOfRows);
dcol++;
STColumn *pRowCol = schemaColAt(pSchema, rcol);
if (pRowCol->colId == pDataCol->colId) {
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE);
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
dcol++;
rcol++;
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
dataColSetNullAt(pDataCol, pCols->numOfRows);
dcol++;
}
}
}
pCols->numOfRows++;
}
// Pop pointsToPop points from the SDataCols
void tdPopDataColsPoints(SDataCols *pCols, int pointsToPop) {
int pointsLeft = pCols->numOfRows - pointsToPop;
if (pointsLeft <= 0) {
tdResetDataCols(pCols);
return;
}
for (int iCol = 0; iCol < pCols->numOfCols; iCol++) {
SDataCol *pCol = pCols->cols + iCol;
dataColPopPoints(pCol, pointsToPop, pCols->numOfRows);
}
pCols->numOfRows = pointsLeft;
}
int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge) {
ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows);
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
ASSERT(target->numOfCols == source->numOfCols);
SDataCols *pTarget = NULL;
if (dataColsKeyLast(target) < dataColsKeyFirst(source)) { // No overlap
ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints);
for (int i = 0; i < rowsToMerge; i++) {
for (int j = 0; j < source->numOfCols; j++) {
if (source->cols[j].len > 0) {
@ -499,17 +467,23 @@ _err:
return -1;
}
void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2, int limit2, int tRows) {
// src2 data has more priority than src1
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
int limit2, int tRows) {
tdResetDataCols(target);
ASSERT(limit1 <= src1->numOfRows && limit2 <= src2->numOfRows);
while (target->numOfRows < tRows) {
if (*iter1 >= limit1 && *iter2 >= limit2) break;
TSKEY key1 = (*iter1 >= limit1) ? INT64_MAX : ((TSKEY *)(src1->cols[0].pData))[*iter1];
TSKEY key2 = (*iter2 >= limit2) ? INT64_MAX : ((TSKEY *)(src2->cols[0].pData))[*iter2];
TSKEY key1 = (*iter1 >= limit1) ? INT64_MAX : dataColsKeyAt(src1, *iter1);
TKEY tkey1 = (*iter1 >= limit1) ? TKEY_NULL : dataColsTKeyAt(src1, *iter1);
TSKEY key2 = (*iter2 >= limit2) ? INT64_MAX : dataColsKeyAt(src2, *iter2);
TKEY tkey2 = (*iter2 >= limit2) ? TKEY_NULL : dataColsTKeyAt(src2, *iter2);
if (key1 <= key2) {
ASSERT(tkey1 == TKEY_NULL || (!TKEY_IS_DELETED(tkey1)));
if (key1 < key2) {
for (int i = 0; i < src1->numOfCols; i++) {
ASSERT(target->cols[i].type == src1->cols[i].type);
if (src1->cols[i].len > 0) {
@ -520,19 +494,23 @@ void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limi
target->numOfRows++;
(*iter1)++;
if (key1 == key2) (*iter2)++;
} else {
for (int i = 0; i < src2->numOfCols; i++) {
ASSERT(target->cols[i].type == src2->cols[i].type);
if (src2->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows,
target->maxPoints);
} else if (key1 >= key2) {
if ((key1 > key2) || (key1 == key2 && !TKEY_IS_DELETED(tkey2))) {
for (int i = 0; i < src2->numOfCols; i++) {
ASSERT(target->cols[i].type == src2->cols[i].type);
if (src2->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows,
target->maxPoints);
}
}
target->numOfRows++;
}
target->numOfRows++;
(*iter2)++;
if (key1 == key2) (*iter1)++;
}
ASSERT(target->numOfRows <= target->maxPoints);
}
}
@ -691,8 +669,8 @@ int tdInitKVRowBuilder(SKVRowBuilder *pBuilder) {
}
void tdDestroyKVRowBuilder(SKVRowBuilder *pBuilder) {
taosTFree(pBuilder->pColIdx);
taosTFree(pBuilder->buf);
tfree(pBuilder->pColIdx);
tfree(pBuilder->buf);
}
void tdResetKVRowBuilder(SKVRowBuilder *pBuilder) {

View File

@ -45,19 +45,21 @@ int32_t tsEnableTelemetryReporting = 1;
char tsEmail[TSDB_FQDN_LEN] = {0};
// common
int32_t tsRpcTimer = 1000;
int32_t tsRpcMaxTime = 600; // seconds;
int32_t tsMaxShellConns = 5000;
int32_t tsRpcTimer = 1000;
int32_t tsRpcMaxTime = 600; // seconds;
int32_t tsMaxShellConns = 5000;
int32_t tsMaxConnections = 5000;
int32_t tsShellActivityTimer = 3; // second
float tsNumOfThreadsPerCore = 1.0;
float tsRatioOfQueryThreads = 0.5;
int8_t tsDaylight = 0;
int32_t tsShellActivityTimer = 3; // second
float tsNumOfThreadsPerCore = 1.0f;
int32_t tsNumOfCommitThreads = 1;
float tsRatioOfQueryThreads = 0.5f;
int8_t tsDaylight = 0;
char tsTimezone[TSDB_TIMEZONE_LEN] = {0};
char tsLocale[TSDB_LOCALE_LEN] = {0};
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
int32_t tsEnableCoreFile = 0;
int32_t tsMaxBinaryDisplayWidth = 30;
char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/";
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
@ -99,6 +101,15 @@ float tsStreamComputDelayRatio = 0.1f;
int32_t tsProjectExecInterval = 10000; // every 10sec, the projection will be executed once
int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance
// the maximum allowed query buffer size during query processing for each data node.
// -1 no limit (default)
// 0 no query allowed, queries are disabled
// positive value (in MB)
int32_t tsQueryBufferSize = -1;
// only 50% cpu will be used in query processing in dnode
int32_t tsHalfCoresForQuery = 0;
// db parameters
int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE;
int32_t tsBlocksPerVnode = TSDB_DEFAULT_TOTAL_BLOCKS;
@ -113,6 +124,7 @@ int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
int32_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION;
int32_t tsMaxVgroupsPerDb = 0;
int32_t tsMinTablePerVnode = TSDB_TABLES_STEP;
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
@ -124,6 +136,7 @@ int32_t tsAlternativeRole = 0;
int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400*100; // seconds 10days
int32_t tsMnodeEqualVnodeNum = 4;
int32_t tsFlowCtrl = 1;
// restful
int32_t tsEnableHttpModule = 1;
@ -151,6 +164,9 @@ char tsMonitorDbName[TSDB_DB_NAME_LEN] = "log";
char tsInternalPass[] = "secretkey";
int32_t tsMonitorInterval = 30; // seconds
// stream
int32_t tsEnableStream = 1;
// internal
int32_t tsPrintAuth = 0;
int32_t tscEmbedded = 0;
@ -190,13 +206,13 @@ int32_t tsNumOfLogLines = 10000000;
int32_t mDebugFlag = 135;
int32_t sdbDebugFlag = 135;
int32_t dDebugFlag = 135;
int32_t vDebugFlag = 131;
int32_t vDebugFlag = 135;
int32_t cDebugFlag = 131;
int32_t jniDebugFlag = 131;
int32_t odbcDebugFlag = 131;
int32_t httpDebugFlag = 131;
int32_t mqttDebugFlag = 131;
int32_t monitorDebugFlag = 131;
int32_t monDebugFlag = 131;
int32_t qDebugFlag = 131;
int32_t rpcDebugFlag = 131;
int32_t uDebugFlag = 131;
@ -206,9 +222,11 @@ int32_t wDebugFlag = 135;
int32_t tsdbDebugFlag = 131;
int32_t cqDebugFlag = 135;
int32_t (*monitorStartSystemFp)() = NULL;
void (*monitorStopSystemFp)() = NULL;
void (*monitorExecuteSQLFp)(char *sql) = NULL;
int32_t (*monStartSystemFp)() = NULL;
void (*monStopSystemFp)() = NULL;
void (*monExecuteSQLFp)(char *sql) = NULL;
char *qtypeStr[] = {"rpc", "fwd", "wal", "cq", "query"};
static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT;
@ -223,7 +241,7 @@ void taosSetAllDebugFlag() {
odbcDebugFlag = debugFlag;
httpDebugFlag = debugFlag;
mqttDebugFlag = debugFlag;
monitorDebugFlag = debugFlag;
monDebugFlag = debugFlag;
qDebugFlag = debugFlag;
rpcDebugFlag = debugFlag;
uDebugFlag = debugFlag;
@ -264,15 +282,15 @@ bool taosCfgDynamicOptions(char *msg) {
if (strncasecmp(cfg->option, "monitor", olen) == 0) {
if (1 == vint) {
if (monitorStartSystemFp) {
(*monitorStartSystemFp)();
if (monStartSystemFp) {
(*monStartSystemFp)();
uInfo("monitor is enabled");
} else {
uError("monitor can't be updated, for monitor not initialized");
}
} else {
if (monitorStopSystemFp) {
(*monitorStopSystemFp)();
if (monStopSystemFp) {
(*monStopSystemFp)();
uInfo("monitor is disabled");
} else {
uError("monitor can't be updated, for monitor not initialized");
@ -295,8 +313,8 @@ bool taosCfgDynamicOptions(char *msg) {
}
if (strncasecmp(option, "resetQueryCache", 15) == 0) {
if (monitorExecuteSQLFp) {
(*monitorExecuteSQLFp)("resetQueryCache");
if (monExecuteSQLFp) {
(*monExecuteSQLFp)("resetQueryCache");
uInfo("resetquerycache is executed");
} else {
uError("resetquerycache can't be executed, for monitor not started");
@ -416,6 +434,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "numOfCommitThreads";
cfg.ptr = &tsNumOfCommitThreads;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 1;
cfg.maxValue = 100;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "ratioOfQueryThreads";
cfg.ptr = &tsRatioOfQueryThreads;
cfg.valType = TAOS_CFG_VTYPE_FLOAT;
@ -676,7 +704,7 @@ static void doInitGlobalConfig(void) {
cfg.minValue = TSDB_MIN_CACHE_BLOCK_SIZE;
cfg.maxValue = TSDB_MAX_CACHE_BLOCK_SIZE;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_Mb;
cfg.unitType = TAOS_CFG_UTYPE_MB;
taosInitConfigOption(cfg);
cfg.option = "blocks";
@ -779,6 +807,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "update";
cfg.ptr = &tsUpdate;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_DB_UPDATE;
cfg.maxValue = TSDB_MAX_DB_UPDATE;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "mqttHostName";
cfg.ptr = tsMqttHostName;
cfg.valType = TAOS_CFG_VTYPE_STRING;
@ -839,6 +877,26 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "queryBufferSize";
cfg.ptr = &tsQueryBufferSize;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = -1;
cfg.maxValue = 500000000000.0f;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_BYTE;
taosInitConfigOption(cfg);
cfg.option = "halfCoresForQuery";
cfg.ptr = &tsHalfCoresForQuery;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
cfg.ptrLength = 1;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// locale & charset
cfg.option = "timezone";
cfg.ptr = tsTimezone;
@ -930,6 +988,17 @@ static void doInitGlobalConfig(void) {
cfg.maxValue = 1000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// module configs
cfg.option = "flowctrl";
cfg.ptr = &tsFlowCtrl;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "http";
@ -962,6 +1031,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "stream";
cfg.ptr = &tsEnableStream;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
cfg.ptrLength = 1;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "httpEnableRecordSql";
cfg.ptr = &tsHttpEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@ -1174,8 +1253,8 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "monitorDebugFlag";
cfg.ptr = &monitorDebugFlag;
cfg.option = "monDebugFlag";
cfg.ptr = &monDebugFlag;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG;
cfg.minValue = 0;
@ -1224,7 +1303,7 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "tscEnableRecordSql";
cfg.option = "enableRecordSql";
cfg.ptr = &tsTscEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
@ -1294,13 +1373,23 @@ static void doInitGlobalConfig(void) {
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "tempDir";
cfg.ptr = tsTempDir;
cfg.valType = TAOS_CFG_VTYPE_STRING;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0;
cfg.maxValue = 0;
cfg.ptrLength = tListLen(tsTempDir);
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
}
void taosInitGlobalCfg() {
pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig);
}
bool taosCheckGlobalCfg() {
int32_t taosCheckGlobalCfg() {
char fqdn[TSDB_FQDN_LEN];
uint16_t port;
@ -1359,7 +1448,9 @@ bool taosCheckGlobalCfg() {
tsSyncPort = tsServerPort + TSDB_PORT_SYNC;
tsHttpPort = tsServerPort + TSDB_PORT_HTTP;
return true;
taosPrintGlobalCfg();
return 0;
}
int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) {

View File

@ -188,3 +188,14 @@ void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) {
pToken->z = r;
}
}
SSchema tscGetTbnameColumnSchema() {
struct SSchema s = {
.colId = TSDB_TBNAME_COLUMN_INDEX,
.type = TSDB_DATA_TYPE_BINARY,
.bytes = TSDB_TABLE_NAME_LEN
};
strcpy(s.name, TSQL_TBNAME_L);
return s;
}

View File

@ -355,32 +355,6 @@ bool isValidDataType(int32_t type) {
return type >= TSDB_DATA_TYPE_NULL && type <= TSDB_DATA_TYPE_NCHAR;
}
//bool isNull(const char *val, int32_t type) {
// switch (type) {
// case TSDB_DATA_TYPE_BOOL:
// return *(uint8_t *)val == TSDB_DATA_BOOL_NULL;
// case TSDB_DATA_TYPE_TINYINT:
// return *(uint8_t *)val == TSDB_DATA_TINYINT_NULL;
// case TSDB_DATA_TYPE_SMALLINT:
// return *(uint16_t *)val == TSDB_DATA_SMALLINT_NULL;
// case TSDB_DATA_TYPE_INT:
// return *(uint32_t *)val == TSDB_DATA_INT_NULL;
// case TSDB_DATA_TYPE_BIGINT:
// case TSDB_DATA_TYPE_TIMESTAMP:
// return *(uint64_t *)val == TSDB_DATA_BIGINT_NULL;
// case TSDB_DATA_TYPE_FLOAT:
// return *(uint32_t *)val == TSDB_DATA_FLOAT_NULL;
// case TSDB_DATA_TYPE_DOUBLE:
// return *(uint64_t *)val == TSDB_DATA_DOUBLE_NULL;
// case TSDB_DATA_TYPE_NCHAR:
// return *(uint32_t*) varDataVal(val) == TSDB_DATA_NCHAR_NULL;
// case TSDB_DATA_TYPE_BINARY:
// return *(uint8_t *) varDataVal(val) == TSDB_DATA_BINARY_NULL;
// default:
// return false;
// };
//}
void setVardataNull(char* val, int32_t type) {
if (type == TSDB_DATA_TYPE_BINARY) {
varDataSetLen(val, sizeof(int8_t));
@ -433,14 +407,10 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
*(uint64_t *)(val + i * tDataTypeDesc[type].nSize) = TSDB_DATA_DOUBLE_NULL;
}
break;
case TSDB_DATA_TYPE_NCHAR: // todo : without length?
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint32_t *)(val + i * bytes) = TSDB_DATA_NCHAR_NULL;
}
break;
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_BINARY:
for (int32_t i = 0; i < numOfElems; ++i) {
*(uint8_t *)(val + i * bytes) = TSDB_DATA_BINARY_NULL;
setVardataNull(val + i * bytes, type);
}
break;
default: {

View File

@ -108,7 +108,7 @@ void tVariantCreateFromBinary(tVariant *pVar, const char *pz, size_t len, uint32
break;
}
case TSDB_DATA_TYPE_BINARY: { // todo refactor, extract a method
pVar->pz = calloc(len, sizeof(char));
pVar->pz = calloc(len + 1, sizeof(char));
memcpy(pVar->pz, pz, len);
pVar->nLen = (int32_t)len;
break;
@ -125,7 +125,7 @@ void tVariantDestroy(tVariant *pVar) {
if (pVar == NULL) return;
if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) {
taosTFree(pVar->pz);
tfree(pVar->pz);
pVar->nLen = 0;
}
@ -144,21 +144,24 @@ void tVariantDestroy(tVariant *pVar) {
void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
if (pSrc == NULL || pDst == NULL) return;
*pDst = *pSrc;
pDst->nType = pSrc->nType;
if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR) {
int32_t len = pSrc->nLen + 1;
if (pSrc->nType == TSDB_DATA_TYPE_NCHAR) {
len = len * TSDB_NCHAR_SIZE;
}
int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE;
char* p = realloc(pDst->pz, len);
assert(p);
pDst->pz = calloc(1, len);
memcpy(pDst->pz, pSrc->pz, len);
memset(p, 0, len);
pDst->pz = p;
memcpy(pDst->pz, pSrc->pz, pSrc->nLen);
pDst->nLen = pSrc->nLen;
return;
}
// this is only for string array
if (pSrc->nType == TSDB_DATA_TYPE_ARRAY) {
if (pSrc->nType >= TSDB_DATA_TYPE_BOOL && pSrc->nType <= TSDB_DATA_TYPE_DOUBLE) {
pDst->i64Key = pSrc->i64Key;
} else if (pSrc->nType == TSDB_DATA_TYPE_ARRAY) { // this is only for string array
size_t num = taosArrayGetSize(pSrc->arr);
pDst->arr = taosArrayInit(num, sizeof(char*));
for(size_t i = 0; i < num; i++) {
@ -166,11 +169,11 @@ void tVariantAssign(tVariant *pDst, const tVariant *pSrc) {
char* n = strdup(p);
taosArrayPush(pDst->arr, &n);
}
return;
}
pDst->nLen = tDataTypeDesc[pDst->nType].nSize;
if (pDst->nType != TSDB_DATA_TYPE_ARRAY) {
pDst->nLen = tDataTypeDesc[pDst->nType].nSize;
}
}
int32_t tVariantCompare(const tVariant* p1, const tVariant* p2) {

View File

@ -0,0 +1,154 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace TDengineDriver
{
enum TDengineDataType {
TSDB_DATA_TYPE_NULL = 0, // 1 bytes
TSDB_DATA_TYPE_BOOL = 1, // 1 bytes
TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes
TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes
TSDB_DATA_TYPE_INT = 4, // 4 bytes
TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes
TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
TSDB_DATA_TYPE_BINARY = 8, // string
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
TSDB_DATA_TYPE_NCHAR = 10 // unicode string
}
enum TDengineInitOption
{
TSDB_OPTION_LOCALE = 0,
TSDB_OPTION_CHARSET = 1,
TSDB_OPTION_TIMEZONE = 2,
TDDB_OPTION_CONFIGDIR = 3,
TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4
}
class TDengineMeta
{
public string name;
public short size;
public byte type;
public string TypeName()
{
switch ((TDengineDataType)type)
{
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
return "BOOLEAN";
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
return "BYTE";
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
return "SHORT";
case TDengineDataType.TSDB_DATA_TYPE_INT:
return "INT";
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
return "LONG";
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
return "FLOAT";
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
return "DOUBLE";
case TDengineDataType.TSDB_DATA_TYPE_BINARY:
return "STRING";
case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP:
return "TIMESTAMP";
case TDengineDataType.TSDB_DATA_TYPE_NCHAR:
return "NCHAR";
default:
return "undefine";
}
}
}
class TDengine
{
public const int TSDB_CODE_SUCCESS = 0;
[DllImport("taos.dll", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)]
static extern public void Init();
[DllImport("taos.dll", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)]
static extern public void Cleanup();
[DllImport("taos.dll", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)]
static extern public void Options(int option, string value);
[DllImport("taos.dll", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Connect(string ip, string user, string password, string db, short port);
[DllImport("taos.dll", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_errstr(IntPtr res);
static public string Error(IntPtr res)
{
IntPtr errPtr = taos_errstr(res);
return Marshal.PtrToStringAnsi(errPtr);
}
[DllImport("taos.dll", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)]
static extern public int ErrorNo(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr Query(IntPtr conn, string sqlstr);
[DllImport("taos.dll", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)]
static extern public int AffectRows(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)]
static extern public int FieldCount(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)]
static extern private IntPtr taos_fetch_fields(IntPtr res);
static public List<TDengineMeta> FetchFields(IntPtr res)
{
const int fieldSize = 68;
List<TDengineMeta> metas = new List<TDengineMeta>();
if (res == IntPtr.Zero)
{
return metas;
}
int fieldCount = FieldCount(res);
IntPtr fieldsPtr = taos_fetch_fields(res);
for (int i = 0; i < fieldCount; ++i)
{
int offset = i * fieldSize;
TDengineMeta meta = new TDengineMeta();
meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset);
meta.type = Marshal.ReadByte(fieldsPtr + offset + 65);
meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66);
metas.Add(meta);
}
return metas;
}
[DllImport("taos.dll", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FetchRows(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr FreeResult(IntPtr res);
[DllImport("taos.dll", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
}
}

@ -0,0 +1 @@
Subproject commit b62a26ecc164a310104df57691691b237e091c89

View File

@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.0-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.14-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})

View File

@ -5,14 +5,13 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.6</version>
<version>2.0.14</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
<description>TDengine JDBC Driver</description>
<licenses>
<license>
<name>GNU AFFERO GENERAL PUBLIC LICENSE Version 3</name>
@ -50,17 +49,29 @@
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.5</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13</version>
<scope>test</scope>
</dependency>
<!-- for restful -->
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.8</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.9</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.58</version>
</dependency>
</dependencies>
<build>

View File

@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.0</version>
<version>2.0.14</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
@ -56,6 +56,23 @@
<scope>test</scope>
</dependency>
<!-- for restful -->
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.8</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.9</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.58</version>
</dependency>
</dependencies>
<build>
<plugins>
@ -95,6 +112,13 @@
<artifactId>maven-surefire-plugin</artifactId>
<version>2.12.4</version>
<configuration>
<includes>
<include>**/*Test.java</include>
</includes>
<excludes>
<exclude>**/BatchInsertTest.java</exclude>
<exclude>**/FailOverTest.java</exclude>
</excludes>
<testFailureIgnore>true</testFailureIgnore>
</configuration>
</plugin>

View File

@ -0,0 +1,161 @@
package com.taosdata.jdbc;
import java.io.*;
import java.sql.Driver;
import java.sql.DriverPropertyInfo;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.StringTokenizer;
public abstract class AbstractTaosDriver implements Driver {
private static final String TAOS_CFG_FILENAME = "taos.cfg";
/**
* @param cfgDirPath
* @return return the config dir
**/
protected File loadConfigDir(String cfgDirPath) {
if (cfgDirPath == null)
return loadDefaultConfigDir();
File cfgDir = new File(cfgDirPath);
if (!cfgDir.exists())
return loadDefaultConfigDir();
return cfgDir;
}
/**
* @return search the default config dir, if the config dir is not exist will return null
*/
protected File loadDefaultConfigDir() {
File cfgDir;
File cfgDir_linux = new File("/etc/taos");
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
File cfgDir_windows = new File("C:\\TDengine\\cfg");
cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
return cfgDir;
}
protected List<String> loadConfigEndpoints(File cfgFile) {
List<String> endpoints = new ArrayList<>();
try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
String line = null;
while ((line = reader.readLine()) != null) {
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) {
endpoints.add(line.substring(line.indexOf('p') + 1).trim());
}
if (endpoints.size() > 1)
break;
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return endpoints;
}
protected void loadTaosConfig(Properties info) {
if ((info.getProperty(TSDBDriver.PROPERTY_KEY_HOST) == null ||
info.getProperty(TSDBDriver.PROPERTY_KEY_HOST).isEmpty()) && (
info.getProperty(TSDBDriver.PROPERTY_KEY_PORT) == null ||
info.getProperty(TSDBDriver.PROPERTY_KEY_PORT).isEmpty())) {
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
File cfgFile = cfgDir.listFiles((dir, name) -> TAOS_CFG_FILENAME.equalsIgnoreCase(name))[0];
List<String> endpoints = loadConfigEndpoints(cfgFile);
if (!endpoints.isEmpty()) {
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]);
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]);
}
}
}
protected DriverPropertyInfo[] getPropertyInfo(Properties info) {
DriverPropertyInfo hostProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_HOST, info.getProperty(TSDBDriver.PROPERTY_KEY_HOST));
hostProp.required = false;
hostProp.description = "Hostname";
DriverPropertyInfo portProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_PORT, info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
portProp.required = false;
portProp.description = "Port";
DriverPropertyInfo dbProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_DBNAME, info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME));
dbProp.required = false;
dbProp.description = "Database name";
DriverPropertyInfo userProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_USER, info.getProperty(TSDBDriver.PROPERTY_KEY_USER));
userProp.required = true;
userProp.description = "User";
DriverPropertyInfo passwordProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_PASSWORD, info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
passwordProp.required = true;
passwordProp.description = "Password";
DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5];
propertyInfo[0] = hostProp;
propertyInfo[1] = portProp;
propertyInfo[2] = dbProp;
propertyInfo[3] = userProp;
propertyInfo[4] = passwordProp;
return propertyInfo;
}
protected Properties parseURL(String url, Properties defaults) {
Properties urlProps = (defaults != null) ? defaults : new Properties();
// parse properties
int beginningOfSlashes = url.indexOf("//");
int index = url.indexOf("?");
if (index != -1) {
String paramString = url.substring(index + 1, url.length());
url = url.substring(0, index);
StringTokenizer queryParams = new StringTokenizer(paramString, "&");
while (queryParams.hasMoreElements()) {
String parameterValuePair = queryParams.nextToken();
int indexOfEqual = parameterValuePair.indexOf("=");
String parameter = null;
String value = null;
if (indexOfEqual != -1) {
parameter = parameterValuePair.substring(0, indexOfEqual);
if (indexOfEqual + 1 < parameterValuePair.length()) {
value = parameterValuePair.substring(indexOfEqual + 1);
}
}
if ((value != null && value.length() > 0) && (parameter != null && parameter.length() > 0)) {
urlProps.setProperty(parameter, value);
}
}
}
// parse Product Name
String dbProductName = url.substring(0, beginningOfSlashes);
dbProductName = dbProductName.substring(dbProductName.indexOf(":") + 1);
dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
// parse dbname
url = url.substring(beginningOfSlashes + 2);
int indexOfSlash = url.indexOf("/");
if (indexOfSlash != -1) {
if (indexOfSlash + 1 < url.length()) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_DBNAME, url.substring(indexOfSlash + 1));
}
url = url.substring(0, indexOfSlash);
}
// parse port
int indexOfColon = url.indexOf(":");
if (indexOfColon != -1) {
if (indexOfColon + 1 < url.length()) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_PORT, url.substring(indexOfColon + 1));
}
url = url.substring(0, indexOfColon);
}
// parse host
if (url != null && url.length() > 0 && url.trim().length() > 0) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url);
}
return urlProps;
}
}

View File

@ -16,10 +16,10 @@ package com.taosdata.jdbc;
public class ColumnMetaData {
int colType = 0;
String colName = null;
int colSize = -1;
int colIndex = 0;
private int colType = 0;
private String colName = null;
private int colSize = -1;
private int colIndex = 0;
public int getColSize() {
return colSize;

View File

@ -14,7 +14,6 @@
*****************************************************************************/
package com.taosdata.jdbc;
import java.io.*;
import java.sql.Array;
import java.sql.Blob;
import java.sql.CallableStatement;
@ -35,11 +34,10 @@ import java.util.*;
import java.util.concurrent.Executor;
public class TSDBConnection implements Connection {
protected Properties props = null;
private TSDBJNIConnector connector = null;
protected Properties props = null;
private String catalog = null;
private TSDBDatabaseMetaData dbMetaData = null;
@ -48,15 +46,20 @@ public class TSDBConnection implements Connection {
private int timeoutMilliseconds = 0;
private String tsCharSet = "";
private boolean batchFetch = false;
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
this.dbMetaData = meta;
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME),
info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
String batchLoad = info.getProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD);
if (batchLoad != null) {
this.batchFetch = Boolean.parseBoolean(batchLoad);
}
}
private void connect(String host, int port, String dbName, String user, String password) throws SQLException {
@ -197,12 +200,14 @@ public class TSDBConnection implements Connection {
}
public SQLWarning getWarnings() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
//todo: implement getWarnings according to the warning messages returned from TDengine
return null;
// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void clearWarnings() throws SQLException {
// left blank to support HikariCP connection
//todo: implement getWarnings according to the warning messages returned from TDengine
//todo: implement clearWarnings according to the warning messages returned from TDengine
}
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
@ -223,6 +228,14 @@ public class TSDBConnection implements Connection {
return this.prepareStatement(sql);
}
public Boolean getBatchFetch() {
return this.batchFetch;
}
public void setBatchFetch(Boolean batchFetch) {
this.batchFetch = batchFetch;
}
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}

View File

@ -96,7 +96,7 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
}
public int getDriverMajorVersion() {
return 0;
return 2;
}
public int getDriverMinorVersion() {
@ -587,7 +587,6 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern)
throws SQLException {
/** add by zyyang **********/
Statement stmt = null;
if (null != conn && !conn.isClosed()) {

View File

@ -14,13 +14,8 @@
*****************************************************************************/
package com.taosdata.jdbc;
import java.io.*;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.*;
import java.util.logging.Logger;
/**
@ -42,77 +37,59 @@ import java.util.logging.Logger;
* register it with the DriverManager. This means that a user can load and
* register a driver by doing Class.forName("foo.bah.Driver")
*/
public class TSDBDriver implements java.sql.Driver {
public class TSDBDriver extends AbstractTaosDriver {
@Deprecated
private static final String URL_PREFIX1 = "jdbc:TSDB://";
private static final String URL_PREFIX = "jdbc:TAOS://";
/**
* Key used to retrieve the database value from the properties instance passed
* to the driver.
*/
public static final String PROPERTY_KEY_DBNAME = "dbname";
/**
* Key used to retrieve the host value from the properties instance passed to
* the driver.
*/
public static final String PROPERTY_KEY_HOST = "host";
/**
* Key used to retrieve the password value from the properties instance passed
* to the driver.
*/
public static final String PROPERTY_KEY_PASSWORD = "password";
/**
* Key used to retrieve the port number value from the properties instance
* passed to the driver.
*/
public static final String PROPERTY_KEY_PORT = "port";
/**
* Key used to retrieve the database value from the properties instance passed
* to the driver.
*/
public static final String PROPERTY_KEY_DBNAME = "dbname";
/**
* Key used to retrieve the user value from the properties instance passed to
* the driver.
*/
public static final String PROPERTY_KEY_USER = "user";
/**
* Key used to retrieve the password value from the properties instance passed
* to the driver.
*/
public static final String PROPERTY_KEY_PASSWORD = "password";
/**
* Key for the configuration file directory of TSDB client in properties instance
*/
public static final String PROPERTY_KEY_CONFIG_DIR = "cfgdir";
/**
* Key for the timezone used by the TSDB client in properties instance
*/
public static final String PROPERTY_KEY_TIME_ZONE = "timezone";
/**
* Key for the locale used by the TSDB client in properties instance
*/
public static final String PROPERTY_KEY_LOCALE = "locale";
/**
* Key for the char encoding used by the TSDB client in properties instance
*/
public static final String PROPERTY_KEY_CHARSET = "charset";
public static final String PROPERTY_KEY_PROTOCOL = "protocol";
/**
* Index for port coming out of parseHostPortPair().
* fetch data from native function in a batch model
*/
public final static int PORT_NUMBER_INDEX = 1;
/**
* Index for host coming out of parseHostPortPair().
*/
public final static int HOST_NAME_INDEX = 0;
public static final String PROPERTY_KEY_BATCH_LOAD = "batchfetch";
private TSDBDatabaseMetaData dbMetaData = null;
@ -124,74 +101,23 @@ public class TSDBDriver implements java.sql.Driver {
}
}
private List<String> loadConfigEndpoints(File cfgFile) {
List<String> endpoints = new ArrayList<>();
try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
String line = null;
while ((line = reader.readLine()) != null) {
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) {
endpoints.add(line.substring(line.indexOf('p') + 1).trim());
}
if (endpoints.size() > 1)
break;
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return endpoints;
}
/**
* @param cfgDirPath
* @return return the config dir
**/
private File loadConfigDir(String cfgDirPath) {
if (cfgDirPath == null)
return loadDefaultConfigDir();
File cfgDir = new File(cfgDirPath);
if (!cfgDir.exists())
return loadDefaultConfigDir();
return cfgDir;
}
/**
* @return search the default config dir, if the config dir is not exist will return null
*/
private File loadDefaultConfigDir() {
File cfgDir;
File cfgDir_linux = new File("/etc/taos");
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
File cfgDir_windows = new File("C:\\TDengine\\cfg");
cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
return cfgDir;
}
public Connection connect(String url, Properties info) throws SQLException {
if (url == null) {
if (url == null)
throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!"));
}
if (!acceptsURL(url))
return null;
Properties props = null;
if ((props = parseURL(url, info)) == null) {
return null;
}
//load taos.cfg start
if (info.getProperty(TSDBDriver.PROPERTY_KEY_HOST) == null && info.getProperty(TSDBDriver.PROPERTY_KEY_PORT) == null) {
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0];
List<String> endpoints = loadConfigEndpoints(cfgFile);
if (!endpoints.isEmpty()) {
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]);
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]);
}
}
loadTaosConfig(info);
try {
TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), (String) props.get(PROPERTY_KEY_CHARSET),
(String) props.get(PROPERTY_KEY_TIME_ZONE));
TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE),
(String) props.get(PROPERTY_KEY_CHARSET), (String) props.get(PROPERTY_KEY_TIME_ZONE));
Connection newConn = new TSDBConnection(props, this.dbMetaData);
return newConn;
} catch (SQLWarning sqlWarning) {
@ -208,43 +134,15 @@ public class TSDBDriver implements java.sql.Driver {
}
/**
* Parses hostPortPair in the form of [host][:port] into an array, with the
* element of index HOST_NAME_INDEX being the host (or null if not specified),
* and the element of index PORT_NUMBER_INDEX being the port (or null if not
* specified).
*
* @param hostPortPair host and port in form of of [host][:port]
* @return array containing host and port as Strings
* @throws SQLException if a parse error occurs
* @param url the URL of the database
* @return <code>true</code> if this driver understands the given URL;
* <code>false</code> otherwise
* @throws SQLException if a database access error occurs or the url is {@code null}
*/
protected static String[] parseHostPortPair(String hostPortPair) throws SQLException {
String[] splitValues = new String[2];
int portIndex = hostPortPair.indexOf(":");
String hostname = null;
if (portIndex != -1) {
if ((portIndex + 1) < hostPortPair.length()) {
String portAsString = hostPortPair.substring(portIndex + 1);
hostname = hostPortPair.substring(0, portIndex);
splitValues[HOST_NAME_INDEX] = hostname;
splitValues[PORT_NUMBER_INDEX] = portAsString;
} else {
throw new SQLException(TSDBConstants.WrapErrMsg("port is not proper!"));
}
} else {
splitValues[HOST_NAME_INDEX] = hostPortPair;
splitValues[PORT_NUMBER_INDEX] = null;
}
return splitValues;
}
public boolean acceptsURL(String url) throws SQLException {
return (url != null && url.length() > 0 && url.trim().length() > 0) && url.startsWith(URL_PREFIX);
if (url == null)
throw new SQLException(TSDBConstants.WrapErrMsg("url is null"));
return (url != null && url.length() > 0 && url.trim().length() > 0) && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1));
}
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
@ -252,133 +150,80 @@ public class TSDBDriver implements java.sql.Driver {
info = new Properties();
}
if ((url != null) && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1))) {
if (acceptsURL(url)) {
info = parseURL(url, info);
}
DriverPropertyInfo hostProp = new DriverPropertyInfo(PROPERTY_KEY_HOST, info.getProperty(PROPERTY_KEY_HOST));
hostProp.required = true;
DriverPropertyInfo portProp = new DriverPropertyInfo(PROPERTY_KEY_PORT, info.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
portProp.required = false;
DriverPropertyInfo dbProp = new DriverPropertyInfo(PROPERTY_KEY_DBNAME, info.getProperty(PROPERTY_KEY_DBNAME));
dbProp.required = false;
dbProp.description = "Database name";
DriverPropertyInfo userProp = new DriverPropertyInfo(PROPERTY_KEY_USER, info.getProperty(PROPERTY_KEY_USER));
userProp.required = true;
DriverPropertyInfo passwordProp = new DriverPropertyInfo(PROPERTY_KEY_PASSWORD, info.getProperty(PROPERTY_KEY_PASSWORD));
passwordProp.required = true;
DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5];
propertyInfo[0] = hostProp;
propertyInfo[1] = portProp;
propertyInfo[2] = dbProp;
propertyInfo[3] = userProp;
propertyInfo[4] = passwordProp;
return propertyInfo;
return getPropertyInfo(info);
}
/**
* example: jdbc:TSDB://127.0.0.1:0/db?user=root&password=your_password
* example: jdbc:TAOS://127.0.0.1:0/db?user=root&password=your_password
*/
public Properties parseURL(String url, Properties defaults) throws java.sql.SQLException {
@Override
public Properties parseURL(String url, Properties defaults) {
Properties urlProps = (defaults != null) ? defaults : new Properties();
if (url == null) {
if (url == null || url.length() <= 0 || url.trim().length() <= 0)
return null;
}
if (!url.startsWith(URL_PREFIX) && !url.startsWith(URL_PREFIX1)) {
if (!url.startsWith(URL_PREFIX) && !url.startsWith(URL_PREFIX1))
return null;
}
// parse properties
String urlForMeta = url;
String dbProductName = url.substring(url.indexOf(":") + 1);
dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
int beginningOfSlashes = url.indexOf("//");
int index = url.indexOf("?");
if (index != -1) {
String paramString = url.substring(index + 1, url.length());
url = url.substring(0, index);
StringTokenizer queryParams = new StringTokenizer(paramString, "&");
while (queryParams.hasMoreElements()) {
String oneToken = queryParams.nextToken();
String[] pair = oneToken.split("=");
if ((pair[0] != null && pair[0].trim().length() > 0) && (pair[1] != null && pair[1].trim().length() > 0)) {
urlProps.setProperty(pair[0].trim(), pair[1].trim());
}
}
}
// parse Product Name
String dbProductName = url.substring(0, beginningOfSlashes);
dbProductName = dbProductName.substring(dbProductName.indexOf(":") + 1);
dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
// parse database name
url = url.substring(beginningOfSlashes + 2);
String host = url.substring(0, url.indexOf(":"));
url = url.substring(url.indexOf(":") + 1);
urlProps.setProperty(PROPERTY_KEY_HOST, host);
String port = url.substring(0, url.indexOf("/"));
urlProps.setProperty(PROPERTY_KEY_PORT, port);
url = url.substring(url.indexOf("/") + 1);
if (url.indexOf("?") != -1) {
String dbName = url.substring(0, url.indexOf("?"));
urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
url = url.trim().substring(url.indexOf("?") + 1);
} else {
// without user & password so return
if (!url.trim().isEmpty()) {
String dbName = url.trim();
urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
int indexOfSlash = url.indexOf("/");
if (indexOfSlash != -1) {
if (indexOfSlash + 1 < url.length()) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_DBNAME, url.substring(indexOfSlash + 1));
}
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty("user"));
return urlProps;
url = url.substring(0, indexOfSlash);
}
String user = "";
if (url.indexOf("&") == -1) {
String[] kvPair = url.trim().split("=");
if (kvPair.length == 2) {
setPropertyValue(urlProps, kvPair);
return urlProps;
// parse port
int indexOfColon = url.indexOf(":");
if (indexOfColon != -1) {
if (indexOfColon + 1 < url.length()) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_PORT, url.substring(indexOfColon + 1));
}
url = url.substring(0, indexOfColon);
}
String[] queryStrings = url.trim().split("&");
for (String queryStr : queryStrings) {
String[] kvPair = queryStr.trim().split("=");
if (kvPair.length < 2) {
continue;
}
setPropertyValue(urlProps, kvPair);
if (url != null && url.length() > 0 && url.trim().length() > 0) {
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url);
}
user = urlProps.getProperty(PROPERTY_KEY_USER).toString();
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, user);
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty(TSDBDriver.PROPERTY_KEY_USER));
return urlProps;
}
public void setPropertyValue(Properties property, String[] keyValuePair) {
switch (keyValuePair[0].toLowerCase()) {
case PROPERTY_KEY_USER:
property.setProperty(PROPERTY_KEY_USER, keyValuePair[1]);
break;
case PROPERTY_KEY_PASSWORD:
property.setProperty(PROPERTY_KEY_PASSWORD, keyValuePair[1]);
break;
case PROPERTY_KEY_TIME_ZONE:
property.setProperty(PROPERTY_KEY_TIME_ZONE, keyValuePair[1]);
break;
case PROPERTY_KEY_LOCALE:
property.setProperty(PROPERTY_KEY_LOCALE, keyValuePair[1]);
break;
case PROPERTY_KEY_CHARSET:
property.setProperty(PROPERTY_KEY_CHARSET, keyValuePair[1]);
break;
case PROPERTY_KEY_CONFIG_DIR:
property.setProperty(PROPERTY_KEY_CONFIG_DIR, keyValuePair[1]);
break;
}
}
public int getMajorVersion() {
return 1;
return 2;
}
public int getMinorVersion() {
return 1;
return 0;
}
public boolean jdbcCompliant() {
@ -389,33 +234,4 @@ public class TSDBDriver implements java.sql.Driver {
return null;
}
/**
* Returns the host property
*
* @param props the java.util.Properties instance to retrieve the hostname from.
* @return the host
*/
public String host(Properties props) {
return props.getProperty(PROPERTY_KEY_HOST, "localhost");
}
/**
* Returns the port number property
*
* @param props the properties to get the port number from
* @return the port number
*/
public int port(Properties props) {
return Integer.parseInt(props.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
}
/**
* Returns the database property from <code>props</code>
*
* @param props the Properties to look for the database property.
* @return the database name.
*/
public String database(Properties props) {
return props.getProperty(PROPERTY_KEY_DBNAME);
}
}

View File

@ -111,8 +111,8 @@ public class TSDBJNIConnector {
* @throws SQLException
*/
public long executeQuery(String sql) throws SQLException {
// close previous result set if the user forgets to invoke the
// free method to close previous result set.
// close previous result set if the user forgets to invoke the
// free method to close previous result set.
if (!this.isResultsetClosed) {
freeResultSet(taosResultSetPointer);
}
@ -122,7 +122,7 @@ public class TSDBJNIConnector {
pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos);
} catch (Exception e) {
e.printStackTrace();
this.freeResultSet(pSql);
this.freeResultSetImp(this.taos, pSql);
throw new SQLException(TSDBConstants.WrapErrMsg("Unsupported encoding"));
}
@ -131,7 +131,7 @@ public class TSDBJNIConnector {
affectedRows = -1;
String msg = this.getErrMsg(pSql);
this.freeResultSet(pSql);
this.freeResultSetImp(this.taos, pSql);
throw new SQLException(TSDBConstants.WrapErrMsg(msg), "", code);
}
@ -173,7 +173,7 @@ public class TSDBJNIConnector {
private native long getResultSetImp(long connection, long pSql);
public boolean isUpdateQuery(long pSql) {
return isUpdateQueryImp(this.taos, pSql) == 1? true:false;
return isUpdateQueryImp(this.taos, pSql) == 1 ? true : false;
}
private native long isUpdateQueryImp(long connection, long pSql);
@ -243,6 +243,11 @@ public class TSDBJNIConnector {
private native int fetchRowImp(long connection, long resultSet, TSDBResultSetRowData rowData);
public int fetchBlock(long resultSet, TSDBResultSetBlockData blockData) {
return this.fetchBlockImp(this.taos, resultSet, blockData);
}
private native int fetchBlockImp(long connection, long resultSet, TSDBResultSetBlockData blockData);
/**
* Execute close operation from C to release connection pointer by JNI
*
@ -274,7 +279,7 @@ public class TSDBJNIConnector {
* Consume a subscription
*/
long consume(long subscription) {
return this.consumeImp(subscription);
return this.consumeImp(subscription);
}
private native long consumeImp(long subscription);

View File

@ -47,10 +47,14 @@ public class TSDBResultSet implements ResultSet {
private List<ColumnMetaData> columnMetaDataList = new ArrayList<ColumnMetaData>();
private TSDBResultSetRowData rowData;
private TSDBResultSetBlockData blockData;
private boolean batchFetch = false;
private boolean lastWasNull = false;
private final int COLUMN_INDEX_START_VALUE = 1;
private int rowIndex = 0;
public TSDBJNIConnector getJniConnector() {
return jniConnector;
}
@ -67,6 +71,14 @@ public class TSDBResultSet implements ResultSet {
this.resultSetPointer = resultSetPointer;
}
public void setBatchFetch(boolean batchFetch) {
this.batchFetch = batchFetch;
}
public Boolean getBatchFetch() {
return this.batchFetch;
}
public List<ColumnMetaData> getColumnMetaDataList() {
return columnMetaDataList;
}
@ -94,8 +106,8 @@ public class TSDBResultSet implements ResultSet {
public TSDBResultSet() {
}
public TSDBResultSet(TSDBJNIConnector connecter, long resultSetPointer) throws SQLException {
this.jniConnector = connecter;
public TSDBResultSet(TSDBJNIConnector connector, long resultSetPointer) throws SQLException {
this.jniConnector = connector;
this.resultSetPointer = resultSetPointer;
int code = this.jniConnector.getSchemaMetaData(this.resultSetPointer, this.columnMetaDataList);
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
@ -107,6 +119,7 @@ public class TSDBResultSet implements ResultSet {
}
this.rowData = new TSDBResultSetRowData(this.columnMetaDataList.size());
this.blockData = new TSDBResultSetBlockData(this.columnMetaDataList, this.columnMetaDataList.size());
}
public <T> T unwrap(Class<T> iface) throws SQLException {
@ -118,21 +131,42 @@ public class TSDBResultSet implements ResultSet {
}
public boolean next() throws SQLException {
if (rowData != null) {
this.rowData.clear();
}
if (this.getBatchFetch()) {
if (this.blockData.forward()) {
return true;
}
int code = this.jniConnector.fetchBlock(this.resultSetPointer, this.blockData);
this.blockData.reset();
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} else if (code == TSDBConstants.JNI_RESULT_SET_NULL) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL));
} else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0));
} else if (code == TSDBConstants.JNI_FETCH_END) {
return false;
}
int code = this.jniConnector.fetchRow(this.resultSetPointer, this.rowData);
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} else if (code == TSDBConstants.JNI_RESULT_SET_NULL) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL));
} else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0));
} else if (code == TSDBConstants.JNI_FETCH_END) {
return false;
} else {
return true;
} else {
if (rowData != null) {
this.rowData.clear();
}
int code = this.jniConnector.fetchRow(this.resultSetPointer, this.rowData);
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} else if (code == TSDBConstants.JNI_RESULT_SET_NULL) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_RESULT_SET_NULL));
} else if (code == TSDBConstants.JNI_NUM_OF_FIELDS_0) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_NUM_OF_FIELDS_0));
} else if (code == TSDBConstants.JNI_FETCH_END) {
return false;
} else {
return true;
}
}
}
@ -155,21 +189,30 @@ public class TSDBResultSet implements ResultSet {
String res = null;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return this.blockData.getString(colIndex);
}
return res;
}
public boolean getBoolean(int columnIndex) throws SQLException {
boolean res = false;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getBoolean(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getBoolean(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
} else {
return this.blockData.getBoolean(colIndex);
}
return res;
}
@ -177,66 +220,91 @@ public class TSDBResultSet implements ResultSet {
byte res = 0;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = (byte) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = (byte) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return (byte) this.blockData.getInt(colIndex);
}
return res;
}
public short getShort(int columnIndex) throws SQLException {
short res = 0;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = (short) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = (short) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return (short) this.blockData.getInt(colIndex);
}
return res;
}
public int getInt(int columnIndex) throws SQLException {
int res = 0;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return this.blockData.getInt(colIndex);
}
return res;
}
public long getLong(int columnIndex) throws SQLException {
long res = 0l;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return this.blockData.getLong(colIndex);
}
return res;
}
public float getFloat(int columnIndex) throws SQLException {
float res = 0;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getFloat(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getFloat(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return (float) this.blockData.getDouble(colIndex);
}
return res;
}
public double getDouble(int columnIndex) throws SQLException {
double res = 0;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getDouble(colIndex, this.columnMetaDataList.get(colIndex).getColType());
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getDouble(colIndex, this.columnMetaDataList.get(colIndex).getColType());
}
return res;
} else {
return this.blockData.getDouble(colIndex);
}
return res;
}
/*
@ -249,25 +317,11 @@ public class TSDBResultSet implements ResultSet {
*/
@Deprecated
public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException {
BigDecimal res = null;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()));
}
return res;
return new BigDecimal(getLong(columnIndex));
}
public byte[] getBytes(int columnIndex) throws SQLException {
byte[] res = null;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType()).getBytes();
}
return res;
return getString(columnIndex).getBytes();
}
public Date getDate(int columnIndex) throws SQLException {
@ -284,11 +338,15 @@ public class TSDBResultSet implements ResultSet {
Timestamp res = null;
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getTimestamp(colIndex);
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
if (!lastWasNull) {
res = this.rowData.getTimestamp(colIndex);
}
return res;
} else {
return this.blockData.getTimestamp(columnIndex);
}
return res;
}
public InputStream getAsciiStream(int columnIndex) throws SQLException {
@ -400,8 +458,12 @@ public class TSDBResultSet implements ResultSet {
public Object getObject(int columnIndex) throws SQLException {
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
return this.rowData.get(colIndex);
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
return this.rowData.get(colIndex);
} else {
return this.blockData.get(colIndex);
}
}
public Object getObject(String columnLabel) throws SQLException {
@ -433,8 +495,12 @@ public class TSDBResultSet implements ResultSet {
public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
int colIndex = getTrueColumnIndex(columnIndex);
this.lastWasNull = this.rowData.wasNull(colIndex);
return new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()));
if (!this.getBatchFetch()) {
this.lastWasNull = this.rowData.wasNull(colIndex);
return new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()));
} else {
return new BigDecimal(this.blockData.getLong(colIndex));
}
}
public BigDecimal getBigDecimal(String columnLabel) throws SQLException {

View File

@ -0,0 +1,497 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.DoubleBuffer;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import java.nio.LongBuffer;
import java.nio.ShortBuffer;
import java.sql.SQLDataException;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class TSDBResultSetBlockData {
private int numOfRows = 0;
private int rowIndex = 0;
private List<ColumnMetaData> columnMetaDataList;
private ArrayList<Object> colData = null;
public TSDBResultSetBlockData(List<ColumnMetaData> colMeta, int numOfCols) {
this.columnMetaDataList = colMeta;
this.colData = new ArrayList<Object>(numOfCols);
}
public TSDBResultSetBlockData() {
this.colData = new ArrayList<Object>();
}
public void clear() {
int size = this.colData.size();
if (this.colData != null) {
this.colData.clear();
}
setNumOfCols(size);
}
public int getNumOfRows() {
return this.numOfRows;
}
public void setNumOfRows(int numOfRows) {
this.numOfRows = numOfRows;
}
public int getNumOfCols() {
return this.colData.size();
}
public void setNumOfCols(int numOfCols) {
this.colData = new ArrayList<Object>(numOfCols);
this.colData.addAll(Collections.nCopies(numOfCols, null));
}
public boolean hasMore() {
return this.rowIndex < this.numOfRows;
}
public boolean forward() {
if (this.rowIndex > this.numOfRows) {
return false;
}
return ((++this.rowIndex) < this.numOfRows);
}
public void reset() {
this.rowIndex = 0;
}
public void setBoolean(int col, boolean value) {
colData.set(col, value);
}
public void setByteArray(int col, int length, byte[] value) {
try {
switch (this.columnMetaDataList.get(col).getColType()) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
buf.order(ByteOrder.LITTLE_ENDIAN).asCharBuffer();
this.colData.set(col, buf);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_TINYINT: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
buf.order(ByteOrder.LITTLE_ENDIAN);
this.colData.set(col, buf);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
ShortBuffer sb = buf.order(ByteOrder.LITTLE_ENDIAN).asShortBuffer();
this.colData.set(col, sb);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_INT: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
IntBuffer ib = buf.order(ByteOrder.LITTLE_ENDIAN).asIntBuffer();
this.colData.set(col, ib);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
LongBuffer lb = buf.order(ByteOrder.LITTLE_ENDIAN).asLongBuffer();
this.colData.set(col, lb);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
FloatBuffer fb = buf.order(ByteOrder.LITTLE_ENDIAN).asFloatBuffer();
this.colData.set(col, fb);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
DoubleBuffer db = buf.order(ByteOrder.LITTLE_ENDIAN).asDoubleBuffer();
this.colData.set(col, db);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
buf.order(ByteOrder.LITTLE_ENDIAN);
this.colData.set(col, buf);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
LongBuffer lb = buf.order(ByteOrder.LITTLE_ENDIAN).asLongBuffer();
this.colData.set(col, lb);
break;
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: {
ByteBuffer buf = ByteBuffer.wrap(value, 0, length);
buf.order(ByteOrder.LITTLE_ENDIAN);
this.colData.set(col, buf);
break;
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
private static class NullType {
private static final byte NULL_BOOL_VAL = 0x2;
private static final String NULL_STR = "null";
public String toString() {
return NullType.NULL_STR;
}
public static boolean isBooleanNull(byte val) {
return val == NullType.NULL_BOOL_VAL;
}
private static boolean isTinyIntNull(byte val) {
return val == Byte.MIN_VALUE;
}
private static boolean isSmallIntNull(short val) {
return val == Short.MIN_VALUE;
}
private static boolean isIntNull(int val) {
return val == Integer.MIN_VALUE;
}
private static boolean isBigIntNull(long val) {
return val == Long.MIN_VALUE;
}
private static boolean isFloatNull(float val) {
return Float.isNaN(val);
}
private static boolean isDoubleNull(double val) {
return Double.isNaN(val);
}
private static boolean isBinaryNull(byte[] val, int length) {
if (length != Byte.BYTES) {
return false;
}
return val[0] == 0xFF;
}
private static boolean isNcharNull(byte[] val, int length) {
if (length != Integer.BYTES) {
return false;
}
return (val[0] & val[1] & val[2] & val[3]) == 0xFF;
}
}
/**
* The original type may not be a string type, but will be converted to by
* calling this method
*
* @param col column index
* @return
* @throws SQLException
*/
public String getString(int col) throws SQLException {
Object obj = get(col);
if (obj == null) {
return new NullType().toString();
}
return obj.toString();
}
public int getInt(int col) {
Object obj = get(col);
if (obj == null) {
return 0;
}
int type = this.columnMetaDataList.get(col).getColType();
switch (type) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
case TSDBConstants.TSDB_DATA_TYPE_INT: {
return (int) obj;
}
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
return ((Long) obj).intValue();
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
return ((Double) obj).intValue();
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
return Integer.parseInt((String) obj);
}
}
return 0;
}
public boolean getBoolean(int col) throws SQLException {
Object obj = get(col);
if (obj == null) {
return Boolean.FALSE;
}
int type = this.columnMetaDataList.get(col).getColType();
switch (type) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
case TSDBConstants.TSDB_DATA_TYPE_INT: {
return ((int) obj == 0L) ? Boolean.FALSE : Boolean.TRUE;
}
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
return (((Long) obj) == 0L) ? Boolean.FALSE : Boolean.TRUE;
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
return (((Double) obj) == 0) ? Boolean.FALSE : Boolean.TRUE;
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
if ("TRUE".compareToIgnoreCase((String) obj) == 0) {
return Boolean.TRUE;
} else if ("FALSE".compareToIgnoreCase((String) obj) == 0) {
return Boolean.TRUE;
} else {
throw new SQLDataException();
}
}
}
return Boolean.FALSE;
}
public long getLong(int col) throws SQLException {
Object obj = get(col);
if (obj == null) {
return 0;
}
int type = this.columnMetaDataList.get(col).getColType();
switch (type) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
case TSDBConstants.TSDB_DATA_TYPE_INT: {
return (int) obj;
}
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
return (long) obj;
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
return ((Double) obj).longValue();
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
return Long.parseLong((String) obj);
}
}
return 0;
}
public Timestamp getTimestamp(int col) {
try {
return new Timestamp(getLong(col));
} catch (SQLException e) {
e.printStackTrace();
}
return null;
}
public double getDouble(int col) {
Object obj = get(col);
if (obj == null) {
return 0;
}
int type = this.columnMetaDataList.get(col).getColType();
switch (type) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
case TSDBConstants.TSDB_DATA_TYPE_INT: {
return (int) obj;
}
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
return (long) obj;
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
return (double) obj;
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
return Double.parseDouble((String) obj);
}
}
return 0;
}
public Object get(int col) {
int fieldSize = this.columnMetaDataList.get(col).getColSize();
switch (this.columnMetaDataList.get(col).getColType()) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
byte val = bb.get(this.rowIndex);
if (NullType.isBooleanNull(val)) {
return null;
}
return (val == 0x0) ? Boolean.FALSE : Boolean.TRUE;
}
case TSDBConstants.TSDB_DATA_TYPE_TINYINT: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
byte val = bb.get(this.rowIndex);
if (NullType.isTinyIntNull(val)) {
return null;
}
return val;
}
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: {
ShortBuffer sb = (ShortBuffer) this.colData.get(col);
short val = sb.get(this.rowIndex);
if (NullType.isSmallIntNull(val)) {
return null;
}
return val;
}
case TSDBConstants.TSDB_DATA_TYPE_INT: {
IntBuffer ib = (IntBuffer) this.colData.get(col);
int val = ib.get(this.rowIndex);
if (NullType.isIntNull(val)) {
return null;
}
return val;
}
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: {
LongBuffer lb = (LongBuffer) this.colData.get(col);
long val = lb.get(this.rowIndex);
if (NullType.isBigIntNull(val)) {
return null;
}
return (long) val;
}
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: {
FloatBuffer fb = (FloatBuffer) this.colData.get(col);
float val = fb.get(this.rowIndex);
if (NullType.isFloatNull(val)) {
return null;
}
return val;
}
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
DoubleBuffer lb = (DoubleBuffer) this.colData.get(col);
double val = lb.get(this.rowIndex);
if (NullType.isDoubleNull(val)) {
return null;
}
return val;
}
case TSDBConstants.TSDB_DATA_TYPE_BINARY: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
bb.position(fieldSize * this.rowIndex);
int length = bb.getShort();
byte[] dest = new byte[length];
bb.get(dest, 0, length);
if (NullType.isBinaryNull(dest, length)) {
return null;
}
return new String(dest);
}
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: {
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
bb.position(fieldSize * this.rowIndex);
int length = bb.getShort();
byte[] dest = new byte[length];
bb.get(dest, 0, length);
if (NullType.isNcharNull(dest, length)) {
return null;
}
try {
String ss = TaosGlobalConfig.getCharset();
return new String(dest, ss);
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
}
}
return 0;
}
}

View File

@ -218,5 +218,4 @@ public class TSDBResultSetRowData {
public void setData(ArrayList<Object> data) {
this.data = (ArrayList<Object>) data.clone();
}
}

View File

@ -19,7 +19,7 @@ import java.util.ArrayList;
import java.util.List;
public class TSDBStatement implements Statement {
private TSDBJNIConnector connecter = null;
private TSDBJNIConnector connector = null;
/**
* To store batched commands
@ -45,9 +45,9 @@ public class TSDBStatement implements Statement {
this.connection = connection;
}
TSDBStatement(TSDBConnection connection, TSDBJNIConnector connecter) {
TSDBStatement(TSDBConnection connection, TSDBJNIConnector connector) {
this.connection = connection;
this.connecter = connecter;
this.connector = connector;
this.isClosed = false;
}
@ -65,25 +65,27 @@ public class TSDBStatement implements Statement {
}
// TODO make sure it is not a update query
pSql = this.connecter.executeQuery(sql);
pSql = this.connector.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet();
long resultSetPointer = this.connector.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql);
this.connector.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
// create/insert/update/delete/alter
if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
this.connecter.freeResultSet(pSql);
this.connector.freeResultSet(pSql);
return null;
}
if (!this.connecter.isUpdateQuery(pSql)) {
return new TSDBResultSet(this.connecter, resultSetPointer);
if (!this.connector.isUpdateQuery(pSql)) {
TSDBResultSet res = new TSDBResultSet(this.connector, resultSetPointer);
res.setBatchFetch(this.connection.getBatchFetch());
return res;
} else {
this.connecter.freeResultSet(pSql);
this.connector.freeResultSet(pSql);
return null;
}
@ -95,28 +97,28 @@ public class TSDBStatement implements Statement {
}
// TODO check if current query is update query
pSql = this.connecter.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet();
pSql = this.connector.executeQuery(sql);
long resultSetPointer = this.connector.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql);
this.connector.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
this.affectedRows = this.connecter.getAffectedRows(pSql);
this.connecter.freeResultSet(pSql);
this.affectedRows = this.connector.getAffectedRows(pSql);
this.connector.freeResultSet(pSql);
return this.affectedRows;
}
public String getErrorMsg(long pSql) {
return this.connecter.getErrMsg(pSql);
return this.connector.getErrMsg(pSql);
}
public void close() throws SQLException {
if (!isClosed) {
if (!this.connecter.isResultsetClosed()) {
this.connecter.freeResultSet();
if (!this.connector.isResultsetClosed()) {
this.connector.freeResultSet();
}
isClosed = true;
}
@ -136,7 +138,7 @@ public class TSDBStatement implements Statement {
}
public void setMaxRows(int max) throws SQLException {
// always set maxRows to zero, meaning unlimitted rows in a resultSet
// always set maxRows to zero, meaning unlimited rows in a resultSet
}
public void setEscapeProcessing(boolean enable) throws SQLException {
@ -172,15 +174,15 @@ public class TSDBStatement implements Statement {
throw new SQLException("Invalid method call on a closed statement.");
}
boolean res = true;
pSql = this.connecter.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet();
pSql = this.connector.executeQuery(sql);
long resultSetPointer = this.connector.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql);
this.connector.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
// no result set is retrieved
this.connecter.freeResultSet(pSql);
this.connector.freeResultSet(pSql);
res = false;
}
@ -191,10 +193,10 @@ public class TSDBStatement implements Statement {
if (isClosed) {
throw new SQLException("Invalid method call on a closed statement.");
}
long resultSetPointer = connecter.getResultSet();
long resultSetPointer = connector.getResultSet();
TSDBResultSet resSet = null;
if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
resSet = new TSDBResultSet(connecter, resultSetPointer);
resSet = new TSDBResultSet(connector, resultSetPointer);
}
return resSet;
}
@ -267,7 +269,7 @@ public class TSDBStatement implements Statement {
}
public Connection getConnection() throws SQLException {
if (this.connecter != null)
if (this.connector != null)
return this.connection;
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}

View File

@ -0,0 +1,334 @@
package com.taosdata.jdbc.rs;
import com.taosdata.jdbc.TSDBConstants;
import java.sql.*;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Executor;
public class RestfulConnection implements Connection {
private final String host;
private final int port;
private final Properties props;
private final String database;
private final String url;
public RestfulConnection(String host, String port, Properties props, String database, String url) {
this.host = host;
this.port = Integer.parseInt(port);
this.props = props;
this.database = database;
this.url = url;
}
@Override
public Statement createStatement() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg("restful TDengine connection is closed."));
return new RestfulStatement(this, database);
}
@Override
public PreparedStatement prepareStatement(String sql) throws SQLException {
return null;
}
@Override
public CallableStatement prepareCall(String sql) throws SQLException {
return null;
}
@Override
public String nativeSQL(String sql) throws SQLException {
return null;
}
@Override
public void setAutoCommit(boolean autoCommit) throws SQLException {
}
@Override
public boolean getAutoCommit() throws SQLException {
return false;
}
@Override
public void commit() throws SQLException {
}
@Override
public void rollback() throws SQLException {
}
@Override
public void close() throws SQLException {
}
@Override
public boolean isClosed() throws SQLException {
return false;
}
@Override
public DatabaseMetaData getMetaData() throws SQLException {
//TODO: RestfulDatabaseMetaData is not implemented
return new RestfulDatabaseMetaData();
}
@Override
public void setReadOnly(boolean readOnly) throws SQLException {
}
@Override
public boolean isReadOnly() throws SQLException {
return false;
}
@Override
public void setCatalog(String catalog) throws SQLException {
}
@Override
public String getCatalog() throws SQLException {
return null;
}
@Override
public void setTransactionIsolation(int level) throws SQLException {
//transaction is not supported
throw new SQLFeatureNotSupportedException("transactions are not supported");
}
/**
*
*/
@Override
public int getTransactionIsolation() throws SQLException {
//Connection.TRANSACTION_NONE specifies that transactions are not supported.
return Connection.TRANSACTION_NONE;
}
@Override
public SQLWarning getWarnings() throws SQLException {
//TODO: getWarnings not implemented
return null;
}
@Override
public void clearWarnings() throws SQLException {
throw new SQLFeatureNotSupportedException("clearWarnings not supported.");
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
return null;
}
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
return null;
}
@Override
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
return null;
}
@Override
public Map<String, Class<?>> getTypeMap() throws SQLException {
return null;
}
@Override
public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
}
@Override
public void setHoldability(int holdability) throws SQLException {
}
@Override
public int getHoldability() throws SQLException {
return 0;
}
@Override
public Savepoint setSavepoint() throws SQLException {
return null;
}
@Override
public Savepoint setSavepoint(String name) throws SQLException {
return null;
}
@Override
public void rollback(Savepoint savepoint) throws SQLException {
}
@Override
public void releaseSavepoint(Savepoint savepoint) throws SQLException {
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
return null;
}
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
return null;
}
@Override
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
return null;
}
@Override
public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
return null;
}
@Override
public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
return null;
}
@Override
public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
return null;
}
@Override
public Clob createClob() throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
}
@Override
public Blob createBlob() throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
}
@Override
public NClob createNClob() throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
}
@Override
public SQLXML createSQLXML() throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
}
@Override
public boolean isValid(int timeout) throws SQLException {
return false;
}
@Override
public void setClientInfo(String name, String value) throws SQLClientInfoException {
}
@Override
public void setClientInfo(Properties properties) throws SQLClientInfoException {
}
@Override
public String getClientInfo(String name) throws SQLException {
return null;
}
@Override
public Properties getClientInfo() throws SQLException {
return null;
}
@Override
public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
}
@Override
public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
}
@Override
public void setSchema(String schema) throws SQLException {
}
@Override
public String getSchema() throws SQLException {
return null;
}
@Override
public void abort(Executor executor) throws SQLException {
}
@Override
public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
}
@Override
public int getNetworkTimeout() throws SQLException {
return 0;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
try {
return iface.cast(this);
} catch (ClassCastException cce) {
throw new SQLException("Unable to unwrap to " + iface.toString());
}
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface.isInstance(this);
}
public String getHost() {
return host;
}
public int getPort() {
return port;
}
public Properties getProps() {
return props;
}
public String getDatabase() {
return database;
}
public String getUrl() {
return url;
}
}

View File

@ -0,0 +1,886 @@
package com.taosdata.jdbc.rs;
import java.sql.*;
public class RestfulDatabaseMetaData implements DatabaseMetaData {
@Override
public boolean allProceduresAreCallable() throws SQLException {
return false;
}
@Override
public boolean allTablesAreSelectable() throws SQLException {
return false;
}
@Override
public String getURL() throws SQLException {
return null;
}
@Override
public String getUserName() throws SQLException {
return null;
}
@Override
public boolean isReadOnly() throws SQLException {
return false;
}
@Override
public boolean nullsAreSortedHigh() throws SQLException {
return false;
}
@Override
public boolean nullsAreSortedLow() throws SQLException {
return false;
}
@Override
public boolean nullsAreSortedAtStart() throws SQLException {
return false;
}
@Override
public boolean nullsAreSortedAtEnd() throws SQLException {
return false;
}
@Override
public String getDatabaseProductName() throws SQLException {
return null;
}
@Override
public String getDatabaseProductVersion() throws SQLException {
return null;
}
@Override
public String getDriverName() throws SQLException {
return null;
}
@Override
public String getDriverVersion() throws SQLException {
return null;
}
@Override
public int getDriverMajorVersion() {
return 0;
}
@Override
public int getDriverMinorVersion() {
return 0;
}
@Override
public boolean usesLocalFiles() throws SQLException {
return false;
}
@Override
public boolean usesLocalFilePerTable() throws SQLException {
return false;
}
@Override
public boolean supportsMixedCaseIdentifiers() throws SQLException {
return false;
}
@Override
public boolean storesUpperCaseIdentifiers() throws SQLException {
return false;
}
@Override
public boolean storesLowerCaseIdentifiers() throws SQLException {
return false;
}
@Override
public boolean storesMixedCaseIdentifiers() throws SQLException {
return false;
}
@Override
public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
return false;
}
@Override
public boolean storesUpperCaseQuotedIdentifiers() throws SQLException {
return false;
}
@Override
public boolean storesLowerCaseQuotedIdentifiers() throws SQLException {
return false;
}
@Override
public boolean storesMixedCaseQuotedIdentifiers() throws SQLException {
return false;
}
@Override
public String getIdentifierQuoteString() throws SQLException {
return null;
}
@Override
public String getSQLKeywords() throws SQLException {
return null;
}
@Override
public String getNumericFunctions() throws SQLException {
return null;
}
@Override
public String getStringFunctions() throws SQLException {
return null;
}
@Override
public String getSystemFunctions() throws SQLException {
return null;
}
@Override
public String getTimeDateFunctions() throws SQLException {
return null;
}
@Override
public String getSearchStringEscape() throws SQLException {
return null;
}
@Override
public String getExtraNameCharacters() throws SQLException {
return null;
}
@Override
public boolean supportsAlterTableWithAddColumn() throws SQLException {
return false;
}
@Override
public boolean supportsAlterTableWithDropColumn() throws SQLException {
return false;
}
@Override
public boolean supportsColumnAliasing() throws SQLException {
return false;
}
@Override
public boolean nullPlusNonNullIsNull() throws SQLException {
return false;
}
@Override
public boolean supportsConvert() throws SQLException {
return false;
}
@Override
public boolean supportsConvert(int fromType, int toType) throws SQLException {
return false;
}
@Override
public boolean supportsTableCorrelationNames() throws SQLException {
return false;
}
@Override
public boolean supportsDifferentTableCorrelationNames() throws SQLException {
return false;
}
@Override
public boolean supportsExpressionsInOrderBy() throws SQLException {
return false;
}
@Override
public boolean supportsOrderByUnrelated() throws SQLException {
return false;
}
@Override
public boolean supportsGroupBy() throws SQLException {
return false;
}
@Override
public boolean supportsGroupByUnrelated() throws SQLException {
return false;
}
@Override
public boolean supportsGroupByBeyondSelect() throws SQLException {
return false;
}
@Override
public boolean supportsLikeEscapeClause() throws SQLException {
return false;
}
@Override
public boolean supportsMultipleResultSets() throws SQLException {
return false;
}
@Override
public boolean supportsMultipleTransactions() throws SQLException {
return false;
}
@Override
public boolean supportsNonNullableColumns() throws SQLException {
return false;
}
@Override
public boolean supportsMinimumSQLGrammar() throws SQLException {
return false;
}
@Override
public boolean supportsCoreSQLGrammar() throws SQLException {
return false;
}
@Override
public boolean supportsExtendedSQLGrammar() throws SQLException {
return false;
}
@Override
public boolean supportsANSI92EntryLevelSQL() throws SQLException {
return false;
}
@Override
public boolean supportsANSI92IntermediateSQL() throws SQLException {
return false;
}
@Override
public boolean supportsANSI92FullSQL() throws SQLException {
return false;
}
@Override
public boolean supportsIntegrityEnhancementFacility() throws SQLException {
return false;
}
@Override
public boolean supportsOuterJoins() throws SQLException {
return false;
}
@Override
public boolean supportsFullOuterJoins() throws SQLException {
return false;
}
@Override
public boolean supportsLimitedOuterJoins() throws SQLException {
return false;
}
@Override
public String getSchemaTerm() throws SQLException {
return null;
}
@Override
public String getProcedureTerm() throws SQLException {
return null;
}
@Override
public String getCatalogTerm() throws SQLException {
return null;
}
@Override
public boolean isCatalogAtStart() throws SQLException {
return false;
}
@Override
public String getCatalogSeparator() throws SQLException {
return null;
}
@Override
public boolean supportsSchemasInDataManipulation() throws SQLException {
return false;
}
@Override
public boolean supportsSchemasInProcedureCalls() throws SQLException {
return false;
}
@Override
public boolean supportsSchemasInTableDefinitions() throws SQLException {
return false;
}
@Override
public boolean supportsSchemasInIndexDefinitions() throws SQLException {
return false;
}
@Override
public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException {
return false;
}
@Override
public boolean supportsCatalogsInDataManipulation() throws SQLException {
return false;
}
@Override
public boolean supportsCatalogsInProcedureCalls() throws SQLException {
return false;
}
@Override
public boolean supportsCatalogsInTableDefinitions() throws SQLException {
return false;
}
@Override
public boolean supportsCatalogsInIndexDefinitions() throws SQLException {
return false;
}
@Override
public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException {
return false;
}
@Override
public boolean supportsPositionedDelete() throws SQLException {
return false;
}
@Override
public boolean supportsPositionedUpdate() throws SQLException {
return false;
}
@Override
public boolean supportsSelectForUpdate() throws SQLException {
return false;
}
@Override
public boolean supportsStoredProcedures() throws SQLException {
return false;
}
@Override
public boolean supportsSubqueriesInComparisons() throws SQLException {
return false;
}
@Override
public boolean supportsSubqueriesInExists() throws SQLException {
return false;
}
@Override
public boolean supportsSubqueriesInIns() throws SQLException {
return false;
}
@Override
public boolean supportsSubqueriesInQuantifieds() throws SQLException {
return false;
}
@Override
public boolean supportsCorrelatedSubqueries() throws SQLException {
return false;
}
@Override
public boolean supportsUnion() throws SQLException {
return false;
}
@Override
public boolean supportsUnionAll() throws SQLException {
return false;
}
@Override
public boolean supportsOpenCursorsAcrossCommit() throws SQLException {
return false;
}
@Override
public boolean supportsOpenCursorsAcrossRollback() throws SQLException {
return false;
}
@Override
public boolean supportsOpenStatementsAcrossCommit() throws SQLException {
return false;
}
@Override
public boolean supportsOpenStatementsAcrossRollback() throws SQLException {
return false;
}
@Override
public int getMaxBinaryLiteralLength() throws SQLException {
return 0;
}
@Override
public int getMaxCharLiteralLength() throws SQLException {
return 0;
}
@Override
public int getMaxColumnNameLength() throws SQLException {
return 0;
}
@Override
public int getMaxColumnsInGroupBy() throws SQLException {
return 0;
}
@Override
public int getMaxColumnsInIndex() throws SQLException {
return 0;
}
@Override
public int getMaxColumnsInOrderBy() throws SQLException {
return 0;
}
@Override
public int getMaxColumnsInSelect() throws SQLException {
return 0;
}
@Override
public int getMaxColumnsInTable() throws SQLException {
return 0;
}
@Override
public int getMaxConnections() throws SQLException {
return 0;
}
@Override
public int getMaxCursorNameLength() throws SQLException {
return 0;
}
@Override
public int getMaxIndexLength() throws SQLException {
return 0;
}
@Override
public int getMaxSchemaNameLength() throws SQLException {
return 0;
}
@Override
public int getMaxProcedureNameLength() throws SQLException {
return 0;
}
@Override
public int getMaxCatalogNameLength() throws SQLException {
return 0;
}
@Override
public int getMaxRowSize() throws SQLException {
return 0;
}
@Override
public boolean doesMaxRowSizeIncludeBlobs() throws SQLException {
return false;
}
@Override
public int getMaxStatementLength() throws SQLException {
return 0;
}
@Override
public int getMaxStatements() throws SQLException {
return 0;
}
@Override
public int getMaxTableNameLength() throws SQLException {
return 0;
}
@Override
public int getMaxTablesInSelect() throws SQLException {
return 0;
}
@Override
public int getMaxUserNameLength() throws SQLException {
return 0;
}
@Override
public int getDefaultTransactionIsolation() throws SQLException {
return 0;
}
@Override
public boolean supportsTransactions() throws SQLException {
return false;
}
@Override
public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
return false;
}
@Override
public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException {
return false;
}
@Override
public boolean supportsDataManipulationTransactionsOnly() throws SQLException {
return false;
}
@Override
public boolean dataDefinitionCausesTransactionCommit() throws SQLException {
return false;
}
@Override
public boolean dataDefinitionIgnoredInTransactions() throws SQLException {
return false;
}
@Override
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException {
return null;
}
@Override
public ResultSet getSchemas() throws SQLException {
return null;
}
@Override
public ResultSet getCatalogs() throws SQLException {
return null;
}
@Override
public ResultSet getTableTypes() throws SQLException {
return null;
}
@Override
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException {
return null;
}
@Override
public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException {
return null;
}
@Override
public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
return null;
}
@Override
public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException {
return null;
}
@Override
public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException {
return null;
}
@Override
public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException {
return null;
}
@Override
public ResultSet getTypeInfo() throws SQLException {
return null;
}
@Override
public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException {
return null;
}
@Override
public boolean supportsResultSetType(int type) throws SQLException {
return false;
}
@Override
public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException {
return false;
}
@Override
public boolean ownUpdatesAreVisible(int type) throws SQLException {
return false;
}
@Override
public boolean ownDeletesAreVisible(int type) throws SQLException {
return false;
}
@Override
public boolean ownInsertsAreVisible(int type) throws SQLException {
return false;
}
@Override
public boolean othersUpdatesAreVisible(int type) throws SQLException {
return false;
}
@Override
public boolean othersDeletesAreVisible(int type) throws SQLException {
return false;
}
@Override
public boolean othersInsertsAreVisible(int type) throws SQLException {
return false;
}
@Override
public boolean updatesAreDetected(int type) throws SQLException {
return false;
}
@Override
public boolean deletesAreDetected(int type) throws SQLException {
return false;
}
@Override
public boolean insertsAreDetected(int type) throws SQLException {
return false;
}
@Override
public boolean supportsBatchUpdates() throws SQLException {
return false;
}
@Override
public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException {
return null;
}
@Override
public Connection getConnection() throws SQLException {
return null;
}
@Override
public boolean supportsSavepoints() throws SQLException {
return false;
}
@Override
public boolean supportsNamedParameters() throws SQLException {
return false;
}
@Override
public boolean supportsMultipleOpenResults() throws SQLException {
return false;
}
@Override
public boolean supportsGetGeneratedKeys() throws SQLException {
return false;
}
@Override
public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) throws SQLException {
return null;
}
@Override
public boolean supportsResultSetHoldability(int holdability) throws SQLException {
return false;
}
@Override
public int getResultSetHoldability() throws SQLException {
return 0;
}
@Override
public int getDatabaseMajorVersion() throws SQLException {
return 0;
}
@Override
public int getDatabaseMinorVersion() throws SQLException {
return 0;
}
@Override
public int getJDBCMajorVersion() throws SQLException {
return 0;
}
@Override
public int getJDBCMinorVersion() throws SQLException {
return 0;
}
@Override
public int getSQLStateType() throws SQLException {
return 0;
}
@Override
public boolean locatorsUpdateCopy() throws SQLException {
return false;
}
@Override
public boolean supportsStatementPooling() throws SQLException {
return false;
}
@Override
public RowIdLifetime getRowIdLifetime() throws SQLException {
return null;
}
@Override
public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
return null;
}
@Override
public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
return false;
}
@Override
public boolean autoCommitFailureClosesAllResultSets() throws SQLException {
return false;
}
@Override
public ResultSet getClientInfoProperties() throws SQLException {
return null;
}
@Override
public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) throws SQLException {
return null;
}
@Override
public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
return null;
}
@Override
public boolean generatedKeyAlwaysReturned() throws SQLException {
return false;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return false;
}
}

Some files were not shown because too many files have changed in this diff Show More