Merge branch 'develop' of https://github.com/taosdata/TDengine into develop
This commit is contained in:
commit
ddc6748276
|
@ -19,6 +19,7 @@ SET(TD_MEM_CHECK FALSE)
|
|||
|
||||
SET(TD_PAGMODE_LITE FALSE)
|
||||
SET(TD_SOMODE_STATIC FALSE)
|
||||
SET(TD_POWER FALSE)
|
||||
SET(TD_GODLL FALSE)
|
||||
|
||||
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
|
||||
|
|
74
README.md
74
README.md
|
@ -29,24 +29,69 @@ For user manual, system design and architecture, engineering blogs, refer to [TD
|
|||
# Building
|
||||
At the moment, TDengine only supports building and running on Linux systems. You can choose to [install from packages](https://www.taosdata.com/en/getting-started/#Install-from-Package) or from the source code. This quick guide is for installation from the source only.
|
||||
|
||||
To build TDengine, use [CMake](https://cmake.org/) 2.8 or higher versions in the project directory. Install CMake for example on Ubuntu:
|
||||
```
|
||||
sudo apt-get install -y cmake build-essential
|
||||
To build TDengine, use [CMake](https://cmake.org/) 3.5 or higher versions in the project directory.
|
||||
|
||||
## Install tools
|
||||
|
||||
### Ubuntu & Debian:
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git
|
||||
```
|
||||
|
||||
To compile and package the JDBC driver source code, you should have a Java jdk-8 or higher and Apache Maven 2.7 or higher installed.
|
||||
To install openjdk-8 on Ubuntu:
|
||||
```
|
||||
sudo apt-get install openjdk-8-jdk
|
||||
```
|
||||
To install Apache Maven on Ubuntu:
|
||||
```
|
||||
sudo apt-get install maven
|
||||
To install openjdk-8:
|
||||
```bash
|
||||
sudo apt-get install -y openjdk-8-jdk
|
||||
```
|
||||
|
||||
Build TDengine:
|
||||
|
||||
To install Apache Maven:
|
||||
```bash
|
||||
sudo apt-get install -y maven
|
||||
```
|
||||
|
||||
### Centos 7:
|
||||
```bash
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 epel-release git
|
||||
sudo yum remove -y cmake
|
||||
sudo ln -s /usr/bin/cmake3 /usr/bin/cmake
|
||||
```
|
||||
|
||||
To install openjdk-8:
|
||||
```bash
|
||||
sudo yum install -y java-1.8.0-openjdk
|
||||
```
|
||||
|
||||
To install Apache Maven:
|
||||
```bash
|
||||
sudo yum install -y maven
|
||||
```
|
||||
|
||||
### Centos 8 & Fedora:
|
||||
```bash
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git
|
||||
```
|
||||
|
||||
To install openjdk-8:
|
||||
```bash
|
||||
sudo dnf install -y java-1.8.0-openjdk
|
||||
```
|
||||
|
||||
To install Apache Maven:
|
||||
```bash
|
||||
sudo dnf install -y maven
|
||||
```
|
||||
|
||||
## Get the source codes
|
||||
|
||||
- github:
|
||||
```bash
|
||||
git clone https://github.com/taosdata/TDengine.git
|
||||
cd TDengine
|
||||
```
|
||||
|
||||
## Build TDengine
|
||||
|
||||
```bash
|
||||
mkdir debug && cd debug
|
||||
cmake .. && cmake --build .
|
||||
```
|
||||
|
@ -54,12 +99,12 @@ cmake .. && cmake --build .
|
|||
To compile on an ARM processor (aarch64 or aarch32), please add option CPUTYPE as below:
|
||||
|
||||
aarch64:
|
||||
```cmd
|
||||
```bash
|
||||
cmake .. -DCPUTYPE=aarch64 && cmake --build .
|
||||
```
|
||||
|
||||
aarch32:
|
||||
```cmd
|
||||
```bash
|
||||
cmake .. -DCPUTYPE=aarch32 && cmake --build .
|
||||
```
|
||||
|
||||
|
@ -124,6 +169,7 @@ The TDengine community has also kindly built some of their own connectors! Follo
|
|||
|
||||
- [Rust Connector](https://github.com/taosdata/TDengine/tree/master/tests/examples/rust)
|
||||
- [.Net Core Connector](https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos)
|
||||
- [Lua Connector](https://github.com/taosdata/TDengine/tree/develop/tests/examples/lua)
|
||||
|
||||
# How to run the test cases and how to add a new test case?
|
||||
TDengine's test framework and all test cases are fully open source.
|
||||
|
|
|
@ -17,6 +17,10 @@ IF (TD_GODLL)
|
|||
ADD_DEFINITIONS(-D_TD_GO_DLL_)
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_POWER)
|
||||
ADD_DEFINITIONS(-D_TD_POWER_)
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_MEM_CHECK)
|
||||
ADD_DEFINITIONS(-DTAOS_MEM_CHECK)
|
||||
ENDIF ()
|
||||
|
|
|
@ -27,6 +27,11 @@ IF (${SOMODE} MATCHES "static")
|
|||
MESSAGE(STATUS "Link so using static mode")
|
||||
ENDIF ()
|
||||
|
||||
IF (${DBNAME} MATCHES "power")
|
||||
SET(TD_POWER TRUE)
|
||||
MESSAGE(STATUS "power is true")
|
||||
ENDIF ()
|
||||
|
||||
IF (${DLLTYPE} MATCHES "go")
|
||||
SET(TD_GODLL TRUE)
|
||||
MESSAGE(STATUS "input dll type: " ${DLLTYPE})
|
||||
|
|
|
@ -2,9 +2,14 @@ IF (TD_LINUX)
|
|||
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
||||
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
|
||||
INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})")
|
||||
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})")
|
||||
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Linux ${TD_VER_NUMBER})")
|
||||
ELSEIF (TD_WINDOWS)
|
||||
IF (TD_POWER)
|
||||
SET(CMAKE_INSTALL_PREFIX C:/PowerDB)
|
||||
ELSE ()
|
||||
SET(CMAKE_INSTALL_PREFIX C:/TDengine)
|
||||
ENDIF ()
|
||||
|
||||
IF (NOT TD_GODLL)
|
||||
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
|
||||
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector)
|
||||
|
@ -15,7 +20,13 @@ ELSEIF (TD_WINDOWS)
|
|||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
|
||||
|
||||
IF (TD_POWER)
|
||||
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .)
|
||||
ELSE ()
|
||||
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
|
||||
ENDIF ()
|
||||
|
||||
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
||||
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
||||
IF (TD_MVN_INSTALLED)
|
||||
|
@ -29,5 +40,5 @@ ELSEIF (TD_DARWIN)
|
|||
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
||||
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
|
||||
INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})")
|
||||
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Darwin)")
|
||||
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Darwin ${TD_VER_NUMBER})")
|
||||
ENDIF ()
|
|
@ -52,7 +52,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
|
|||
## [连接器](https://www.taosdata.com/cn/documentation20/connector)
|
||||
|
||||
- [C/C++ Connector](https://www.taosdata.com/cn/documentation20/connector/#C/C++-Connector):通过libtaos客户端的库,连接TDengine服务器的主要方法
|
||||
- [Java Connector(JDBC)](https://www.taosdata.com/cn/documentation20/connector/#Java-Connector):通过标准的JDBC API,给Java应用提供到TDengine的连接
|
||||
- [Java Connector(JDBC)](https://www.taosdata.com/cn/documentation20/connector-java):通过标准的JDBC API,给Java应用提供到TDengine的连接
|
||||
- [Python Connector](https://www.taosdata.com/cn/documentation20/connector/#Python-Connector):给Python应用提供一个连接TDengine服务器的驱动
|
||||
- [RESTful Connector](https://www.taosdata.com/cn/documentation20/connector/#RESTful-Connector):提供一最简单的连接TDengine服务器的方式
|
||||
- [Go Connector](https://www.taosdata.com/cn/documentation20/connector/#Go-Connector):给Go应用提供一个连接TDengine服务器的驱动
|
||||
|
|
|
@ -42,7 +42,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
|
|||
|
||||
| | 类型 | Bytes | 说明 |
|
||||
| ---- | :-------: | ------ | ------------------------------------------------------------ |
|
||||
| 1 | TIMESTAMP | 8 | 时间戳。最小精度毫秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
|
||||
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
|
||||
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null |
|
||||
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63用于NULL |
|
||||
| 4 | FLOAT | 4 | 浮点型,有效位数6-7,范围 [-3.4E38, 3.4E38] |
|
||||
|
@ -416,7 +416,7 @@ taos> SELECT database();
|
|||
power |
|
||||
Query OK, 1 row(s) in set (0.000079s)
|
||||
```
|
||||
如果登录的时候没有指定默认数据库,且没有使用```use``命令切换数据,则返回NULL。
|
||||
如果登录的时候没有指定默认数据库,且没有使用```use```命令切换数据,则返回NULL。
|
||||
```
|
||||
taos> SELECT database();
|
||||
database() |
|
||||
|
@ -503,10 +503,10 @@ Query OK, 1 row(s) in set (0.001091s)
|
|||
| % | match with any char sequences | **`binary`** **`nchar`** |
|
||||
| _ | match with a single char | **`binary`** **`nchar`** |
|
||||
|
||||
1. 同时进行多个字段的范围过滤需要使用关键词AND进行连接不同的查询条件,暂不支持OR连接的查询条件。
|
||||
2. 针对某一字段的过滤只支持单一区间的过滤条件。例如:value>20 and value<30是合法的过滤条件, 而Value<20 AND value<>5是非法的过滤条件。
|
||||
1. 同时进行多个字段的范围过滤需要使用关键词AND进行连接不同的查询条件,暂不支持OR连接的不同列之间的查询过滤条件。
|
||||
2. 针对某一字段的过滤只支持单一时间区间过滤条件。但是针对其他的(普通)列或标签列,可以使用``` OR``` 条件进行组合条件的查询过滤。例如:((value > 20 and value < 30) OR (value < 12)) 。
|
||||
|
||||
### Some Examples
|
||||
### SQL 示例
|
||||
|
||||
- 对于下面的例子,表tb1用以下语句创建
|
||||
|
||||
|
|
|
@ -280,365 +280,10 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
|
|||
取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。
|
||||
|
||||
|
||||
## Java Connector
|
||||
|
||||
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
|
||||
|
||||
由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
|
||||
|
||||
* libtaos.so
|
||||
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
|
||||
|
||||
* taos.dll
|
||||
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
|
||||
|
||||
> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
|
||||
|
||||
TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
|
||||
|
||||
* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
|
||||
* 由于不支持删除和修改,所以也不支持事务操作。
|
||||
* 目前不支持表间的 union 操作。
|
||||
* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。
|
||||
|
||||
|
||||
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
|
||||
|
||||
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
|
||||
| --- | --- | --- |
|
||||
| 2.0.2 | 2.0.0.x 及以上 | 1.8.x |
|
||||
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
|
||||
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
|
||||
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
|
||||
|
||||
## TDengine DataType 和 Java DataType
|
||||
|
||||
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
|
||||
|
||||
| TDengine DataType | Java DataType |
|
||||
| --- | --- |
|
||||
| TIMESTAMP | java.sql.Timestamp |
|
||||
| INT | java.lang.Integer |
|
||||
| BIGINT | java.lang.Long |
|
||||
| FLOAT | java.lang.Float |
|
||||
| DOUBLE | java.lang.Double |
|
||||
| SMALLINT, TINYINT |java.lang.Short |
|
||||
| BOOL | java.lang.Boolean |
|
||||
| BINARY, NCHAR | java.lang.String |
|
||||
|
||||
## 如何获取 TAOS-JDBCDriver
|
||||
|
||||
### maven 仓库
|
||||
|
||||
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
|
||||
* [sonatype][8]
|
||||
* [mvnrepository][9]
|
||||
* [maven.aliyun][10]
|
||||
|
||||
maven 项目中使用如下 pom.xml 配置即可:
|
||||
|
||||
```xml
|
||||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.2</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
### 源码编译打包
|
||||
|
||||
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
|
||||
|
||||
|
||||
## 使用说明
|
||||
|
||||
### 获取连接
|
||||
|
||||
如下所示配置即可获取 TDengine Connection:
|
||||
```java
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl);
|
||||
```
|
||||
> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。
|
||||
|
||||
TDengine 的 JDBC URL 规范格式为:
|
||||
`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
|
||||
|
||||
其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下:
|
||||
|
||||
* user:登录 TDengine 用户名,默认值 root。
|
||||
* password:用户登录密码,默认值 taosdata。
|
||||
* charset:客户端使用的字符集,默认值为系统字符集。
|
||||
* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
|
||||
* locale:客户端语言环境,默认值系统当前 locale。
|
||||
* timezone:客户端使用的时区,默认值为系统当前时区。
|
||||
|
||||
以上参数可以在 3 处配置,`优先级由高到低`分别如下:
|
||||
1. JDBC URL 参数
|
||||
如上所述,可以在 JDBC URL 的参数中指定。
|
||||
2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
|
||||
```java
|
||||
public Connection getConn() throws Exception{
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
|
||||
return conn;
|
||||
}
|
||||
```
|
||||
|
||||
3. 客户端配置文件 taos.cfg
|
||||
|
||||
linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。
|
||||
```properties
|
||||
# client default username
|
||||
# defaultUser root
|
||||
|
||||
# client default password
|
||||
# defaultPass taosdata
|
||||
|
||||
# default system charset
|
||||
# charset UTF-8
|
||||
|
||||
# system locale
|
||||
# locale en_US.UTF-8
|
||||
```
|
||||
> 更多详细配置请参考[客户端配置][13]
|
||||
|
||||
### 创建数据库和表
|
||||
|
||||
```java
|
||||
Statement stmt = conn.createStatement();
|
||||
|
||||
// create database
|
||||
stmt.executeUpdate("create database if not exists db");
|
||||
|
||||
// use database
|
||||
stmt.executeUpdate("use db");
|
||||
|
||||
// create table
|
||||
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
|
||||
```
|
||||
> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
|
||||
|
||||
### 插入数据
|
||||
|
||||
```java
|
||||
// insert data
|
||||
int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
|
||||
|
||||
System.out.println("insert " + affectedRows + " rows.");
|
||||
```
|
||||
> now 为系统内部函数,默认为服务器当前时间。
|
||||
> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。
|
||||
|
||||
### 查询数据
|
||||
|
||||
```java
|
||||
// query data
|
||||
ResultSet resultSet = stmt.executeQuery("select * from tb");
|
||||
|
||||
Timestamp ts = null;
|
||||
int temperature = 0;
|
||||
float humidity = 0;
|
||||
while(resultSet.next()){
|
||||
|
||||
ts = resultSet.getTimestamp(1);
|
||||
temperature = resultSet.getInt(2);
|
||||
humidity = resultSet.getFloat("humidity");
|
||||
|
||||
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
|
||||
}
|
||||
```
|
||||
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
|
||||
|
||||
|
||||
### 订阅
|
||||
|
||||
#### 创建
|
||||
|
||||
```java
|
||||
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
|
||||
```
|
||||
|
||||
`subscribe` 方法的三个参数含义如下:
|
||||
|
||||
* topic:订阅的主题(即名称),此参数是订阅的唯一标识
|
||||
* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
|
||||
* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅
|
||||
|
||||
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic' 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
|
||||
|
||||
#### 消费数据
|
||||
|
||||
```java
|
||||
int total = 0;
|
||||
while(true) {
|
||||
TSDBResultSet rs = sub.consume();
|
||||
int count = 0;
|
||||
while(rs.next()) {
|
||||
count++;
|
||||
}
|
||||
total += count;
|
||||
System.out.printf("%d rows consumed, total %d\n", count, total);
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
```
|
||||
|
||||
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
|
||||
|
||||
#### 关闭订阅
|
||||
|
||||
```java
|
||||
sub.close(true);
|
||||
```
|
||||
|
||||
`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
|
||||
|
||||
|
||||
### 关闭资源
|
||||
|
||||
```java
|
||||
resultSet.close();
|
||||
stmt.close();
|
||||
conn.close();
|
||||
```
|
||||
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
|
||||
## 与连接池使用
|
||||
|
||||
**HikariCP**
|
||||
|
||||
* 引入相应 HikariCP maven 依赖:
|
||||
```xml
|
||||
<dependency>
|
||||
<groupId>com.zaxxer</groupId>
|
||||
<artifactId>HikariCP</artifactId>
|
||||
<version>3.4.1</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
* 使用示例如下:
|
||||
```java
|
||||
public static void main(String[] args) throws SQLException {
|
||||
HikariConfig config = new HikariConfig();
|
||||
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
|
||||
config.setUsername("root");
|
||||
config.setPassword("taosdata");
|
||||
|
||||
config.setMinimumIdle(3); //minimum number of idle connection
|
||||
config.setMaximumPoolSize(10); //maximum number of connection in the pool
|
||||
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
|
||||
config.setIdleTimeout(60000); // max idle time for recycle idle connection
|
||||
config.setConnectionTestQuery("describe log.dn"); //validation query
|
||||
config.setValidationTimeout(3000); //validation query timeout
|
||||
|
||||
HikariDataSource ds = new HikariDataSource(config); //create datasource
|
||||
|
||||
Connection connection = ds.getConnection(); // get connection
|
||||
Statement statement = connection.createStatement(); // get statement
|
||||
|
||||
//query or insert
|
||||
// ...
|
||||
|
||||
connection.close(); // put back to conneciton pool
|
||||
}
|
||||
```
|
||||
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
|
||||
> 更多 HikariCP 使用问题请查看[官方说明][5]
|
||||
|
||||
**Druid**
|
||||
|
||||
* 引入相应 Druid maven 依赖:
|
||||
|
||||
```xml
|
||||
<dependency>
|
||||
<groupId>com.alibaba</groupId>
|
||||
<artifactId>druid</artifactId>
|
||||
<version>1.1.20</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
* 使用示例如下:
|
||||
```java
|
||||
public static void main(String[] args) throws Exception {
|
||||
Properties properties = new Properties();
|
||||
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
|
||||
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
|
||||
properties.put("username","root");
|
||||
properties.put("password","taosdata");
|
||||
|
||||
properties.put("maxActive","10"); //maximum number of connection in the pool
|
||||
properties.put("initialSize","3");//initial number of connection
|
||||
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
|
||||
properties.put("minIdle","3");//minimum number of connection in the pool
|
||||
|
||||
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
|
||||
|
||||
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
|
||||
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
|
||||
|
||||
properties.put("validationQuery","describe log.dn"); //validation query
|
||||
properties.put("testWhileIdle","true"); // test connection while idle
|
||||
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
|
||||
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
|
||||
|
||||
//create druid datasource
|
||||
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
|
||||
Connection connection = ds.getConnection(); // get connection
|
||||
Statement statement = connection.createStatement(); // get statement
|
||||
|
||||
//query or insert
|
||||
// ...
|
||||
|
||||
connection.close(); // put back to conneciton pool
|
||||
}
|
||||
```
|
||||
> 更多 druid 使用问题请查看[官方说明][6]
|
||||
|
||||
**注意事项**
|
||||
* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。
|
||||
|
||||
如下所示,`select server_status()` 执行成功会返回 `1`。
|
||||
```shell
|
||||
taos> select server_status();
|
||||
server_status()|
|
||||
================
|
||||
1 |
|
||||
Query OK, 1 row(s) in set (0.000141s)
|
||||
```
|
||||
|
||||
## 与框架使用
|
||||
|
||||
* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11]
|
||||
* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
|
||||
|
||||
## 常见问题
|
||||
|
||||
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
||||
|
||||
**原因**:程序没有找到依赖的本地函数库 taos。
|
||||
|
||||
**解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
|
||||
|
||||
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
|
||||
|
||||
**原因**:目前 TDengine 只支持 64 位 JDK。
|
||||
|
||||
**解决方法**:重新安装 64 位 JDK。
|
||||
|
||||
* 其它问题请参考 [Issues][7]
|
||||
|
||||
## Python Connector
|
||||
|
||||
### 安装准备
|
||||
* 已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端 [(Windows TDengine 客户端安装)](https://www.taosdata.com/cn/documentation/connector/#Windows客户端及程序接口)
|
||||
* 已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端 [(Windows TDengine 客户端安装)][4]
|
||||
* 已安装python 2.7 or >= 3.4
|
||||
* 已安装pip
|
||||
|
||||
|
@ -1137,18 +782,5 @@ promise2.then(function(result) {
|
|||
[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`.
|
||||
|
||||
|
||||
[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
|
||||
[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
|
||||
[3]: https://github.com/taosdata/TDengine
|
||||
[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
|
||||
[5]: https://github.com/brettwooldridge/HikariCP
|
||||
[6]: https://github.com/alibaba/druid
|
||||
[7]: https://github.com/taosdata/TDengine/issues
|
||||
[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
|
||||
[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
|
||||
[10]: https://maven.aliyun.com/mvn/search
|
||||
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
|
||||
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
|
||||
[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
|
||||
[14]: https://www.taosdata.com/cn/documentation20/connector/#Windows
|
||||
[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B
|
||||
[4]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
|
||||
|
||||
|
|
|
@ -0,0 +1,370 @@
|
|||
# Java Connector
|
||||
|
||||
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
|
||||
|
||||
由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
|
||||
|
||||
* libtaos.so
|
||||
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
|
||||
|
||||
* taos.dll
|
||||
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
|
||||
|
||||
> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
|
||||
|
||||
TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
|
||||
|
||||
* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
|
||||
* 由于不支持删除和修改,所以也不支持事务操作。
|
||||
* 目前不支持表间的 union 操作。
|
||||
* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。
|
||||
|
||||
|
||||
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
|
||||
|
||||
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
|
||||
| --- | --- | --- |
|
||||
| 2.0.4 | 2.0.0.x 及以上 | 1.8.x |
|
||||
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
|
||||
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
|
||||
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
|
||||
|
||||
## TDengine DataType 和 Java DataType
|
||||
|
||||
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
|
||||
|
||||
| TDengine DataType | Java DataType |
|
||||
| --- | --- |
|
||||
| TIMESTAMP | java.sql.Timestamp |
|
||||
| INT | java.lang.Integer |
|
||||
| BIGINT | java.lang.Long |
|
||||
| FLOAT | java.lang.Float |
|
||||
| DOUBLE | java.lang.Double |
|
||||
| SMALLINT, TINYINT |java.lang.Short |
|
||||
| BOOL | java.lang.Boolean |
|
||||
| BINARY, NCHAR | java.lang.String |
|
||||
|
||||
## 如何获取 TAOS-JDBCDriver
|
||||
|
||||
### maven 仓库
|
||||
|
||||
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
|
||||
* [sonatype][8]
|
||||
* [mvnrepository][9]
|
||||
* [maven.aliyun][10]
|
||||
|
||||
maven 项目中使用如下 pom.xml 配置即可:
|
||||
|
||||
```xml
|
||||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.4</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
### 源码编译打包
|
||||
|
||||
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
|
||||
|
||||
|
||||
## 使用说明
|
||||
|
||||
### 获取连接
|
||||
|
||||
如下所示配置即可获取 TDengine Connection:
|
||||
```java
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl);
|
||||
```
|
||||
> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。
|
||||
|
||||
TDengine 的 JDBC URL 规范格式为:
|
||||
`jdbc:TAOS://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
|
||||
|
||||
其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下:
|
||||
|
||||
* user:登录 TDengine 用户名,默认值 root。
|
||||
* password:用户登录密码,默认值 taosdata。
|
||||
* charset:客户端使用的字符集,默认值为系统字符集。
|
||||
* cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。
|
||||
* locale:客户端语言环境,默认值系统当前 locale。
|
||||
* timezone:客户端使用的时区,默认值为系统当前时区。
|
||||
|
||||
以上参数可以在 3 处配置,`优先级由高到低`分别如下:
|
||||
1. JDBC URL 参数
|
||||
如上所述,可以在 JDBC URL 的参数中指定。
|
||||
2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
|
||||
```java
|
||||
public Connection getConn() throws Exception{
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
|
||||
return conn;
|
||||
}
|
||||
```
|
||||
|
||||
3. 客户端配置文件 taos.cfg
|
||||
|
||||
linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。
|
||||
```properties
|
||||
# client default username
|
||||
# defaultUser root
|
||||
|
||||
# client default password
|
||||
# defaultPass taosdata
|
||||
|
||||
# default system charset
|
||||
# charset UTF-8
|
||||
|
||||
# system locale
|
||||
# locale en_US.UTF-8
|
||||
```
|
||||
> 更多详细配置请参考[客户端配置][13]
|
||||
|
||||
### 创建数据库和表
|
||||
|
||||
```java
|
||||
Statement stmt = conn.createStatement();
|
||||
|
||||
// create database
|
||||
stmt.executeUpdate("create database if not exists db");
|
||||
|
||||
// use database
|
||||
stmt.executeUpdate("use db");
|
||||
|
||||
// create table
|
||||
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
|
||||
```
|
||||
> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
|
||||
|
||||
### 插入数据
|
||||
|
||||
```java
|
||||
// insert data
|
||||
int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
|
||||
|
||||
System.out.println("insert " + affectedRows + " rows.");
|
||||
```
|
||||
> now 为系统内部函数,默认为服务器当前时间。
|
||||
> `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。
|
||||
|
||||
### 查询数据
|
||||
|
||||
```java
|
||||
// query data
|
||||
ResultSet resultSet = stmt.executeQuery("select * from tb");
|
||||
|
||||
Timestamp ts = null;
|
||||
int temperature = 0;
|
||||
float humidity = 0;
|
||||
while(resultSet.next()){
|
||||
|
||||
ts = resultSet.getTimestamp(1);
|
||||
temperature = resultSet.getInt(2);
|
||||
humidity = resultSet.getFloat("humidity");
|
||||
|
||||
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
|
||||
}
|
||||
```
|
||||
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
|
||||
|
||||
|
||||
### 订阅
|
||||
|
||||
#### 创建
|
||||
|
||||
```java
|
||||
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
|
||||
```
|
||||
|
||||
`subscribe` 方法的三个参数含义如下:
|
||||
|
||||
* topic:订阅的主题(即名称),此参数是订阅的唯一标识
|
||||
* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
|
||||
* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅
|
||||
|
||||
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic' 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
|
||||
|
||||
#### 消费数据
|
||||
|
||||
```java
|
||||
int total = 0;
|
||||
while(true) {
|
||||
TSDBResultSet rs = sub.consume();
|
||||
int count = 0;
|
||||
while(rs.next()) {
|
||||
count++;
|
||||
}
|
||||
total += count;
|
||||
System.out.printf("%d rows consumed, total %d\n", count, total);
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
```
|
||||
|
||||
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
|
||||
|
||||
#### 关闭订阅
|
||||
|
||||
```java
|
||||
sub.close(true);
|
||||
```
|
||||
|
||||
`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
|
||||
|
||||
|
||||
### 关闭资源
|
||||
|
||||
```java
|
||||
resultSet.close();
|
||||
stmt.close();
|
||||
conn.close();
|
||||
```
|
||||
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
|
||||
## 与连接池使用
|
||||
|
||||
**HikariCP**
|
||||
|
||||
* 引入相应 HikariCP maven 依赖:
|
||||
```xml
|
||||
<dependency>
|
||||
<groupId>com.zaxxer</groupId>
|
||||
<artifactId>HikariCP</artifactId>
|
||||
<version>3.4.1</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
* 使用示例如下:
|
||||
```java
|
||||
public static void main(String[] args) throws SQLException {
|
||||
HikariConfig config = new HikariConfig();
|
||||
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
|
||||
config.setUsername("root");
|
||||
config.setPassword("taosdata");
|
||||
|
||||
config.setMinimumIdle(3); //minimum number of idle connection
|
||||
config.setMaximumPoolSize(10); //maximum number of connection in the pool
|
||||
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
|
||||
config.setIdleTimeout(60000); // max idle time for recycle idle connection
|
||||
config.setConnectionTestQuery("describe log.dn"); //validation query
|
||||
config.setValidationTimeout(3000); //validation query timeout
|
||||
|
||||
HikariDataSource ds = new HikariDataSource(config); //create datasource
|
||||
|
||||
Connection connection = ds.getConnection(); // get connection
|
||||
Statement statement = connection.createStatement(); // get statement
|
||||
|
||||
//query or insert
|
||||
// ...
|
||||
|
||||
connection.close(); // put back to conneciton pool
|
||||
}
|
||||
```
|
||||
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
|
||||
> 更多 HikariCP 使用问题请查看[官方说明][5]
|
||||
|
||||
**Druid**
|
||||
|
||||
* 引入相应 Druid maven 依赖:
|
||||
|
||||
```xml
|
||||
<dependency>
|
||||
<groupId>com.alibaba</groupId>
|
||||
<artifactId>druid</artifactId>
|
||||
<version>1.1.20</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
* 使用示例如下:
|
||||
```java
|
||||
public static void main(String[] args) throws Exception {
|
||||
Properties properties = new Properties();
|
||||
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
|
||||
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
|
||||
properties.put("username","root");
|
||||
properties.put("password","taosdata");
|
||||
|
||||
properties.put("maxActive","10"); //maximum number of connection in the pool
|
||||
properties.put("initialSize","3");//initial number of connection
|
||||
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
|
||||
properties.put("minIdle","3");//minimum number of connection in the pool
|
||||
|
||||
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
|
||||
|
||||
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
|
||||
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
|
||||
|
||||
properties.put("validationQuery","describe log.dn"); //validation query
|
||||
properties.put("testWhileIdle","true"); // test connection while idle
|
||||
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
|
||||
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
|
||||
|
||||
//create druid datasource
|
||||
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
|
||||
Connection connection = ds.getConnection(); // get connection
|
||||
Statement statement = connection.createStatement(); // get statement
|
||||
|
||||
//query or insert
|
||||
// ...
|
||||
|
||||
connection.close(); // put back to conneciton pool
|
||||
}
|
||||
```
|
||||
> 更多 druid 使用问题请查看[官方说明][6]
|
||||
|
||||
**注意事项**
|
||||
* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。
|
||||
|
||||
如下所示,`select server_status()` 执行成功会返回 `1`。
|
||||
```shell
|
||||
taos> select server_status();
|
||||
server_status()|
|
||||
================
|
||||
1 |
|
||||
Query OK, 1 row(s) in set (0.000141s)
|
||||
```
|
||||
|
||||
## 与框架使用
|
||||
|
||||
* Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11]
|
||||
* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
|
||||
|
||||
## 常见问题
|
||||
|
||||
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
||||
|
||||
**原因**:程序没有找到依赖的本地函数库 taos。
|
||||
|
||||
**解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
|
||||
|
||||
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
|
||||
|
||||
**原因**:目前 TDengine 只支持 64 位 JDK。
|
||||
|
||||
**解决方法**:重新安装 64 位 JDK。
|
||||
|
||||
* 其它问题请参考 [Issues][7]
|
||||
|
||||
[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
|
||||
[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
|
||||
[3]: https://github.com/taosdata/TDengine
|
||||
[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
|
||||
[5]: https://github.com/brettwooldridge/HikariCP
|
||||
[6]: https://github.com/alibaba/druid
|
||||
[7]: https://github.com/taosdata/TDengine/issues
|
||||
[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
|
||||
[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
|
||||
[10]: https://maven.aliyun.com/mvn/search
|
||||
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
|
||||
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
|
||||
[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
|
||||
[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
|
||||
[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B
|
|
@ -23,18 +23,88 @@
|
|||
|
||||
客户端遇到链接故障,请按照下面的步骤进行检查:
|
||||
|
||||
1. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用
|
||||
2. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
|
||||
3. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)
|
||||
4. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
|
||||
5. 检查防火墙设置,确认TCP/UDP 端口6030-6039 是打开的
|
||||
6. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH*里
|
||||
7. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
|
||||
8. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
|
||||
1. 检查网络环境
|
||||
* 云服务器:检查云服务器的安全组是否打开TCP/UDP 端口6030-6042的访问权限
|
||||
* 本地虚拟机:检查网络能否ping通,尽量避免使用`localhost` 作为hostname
|
||||
* 公司服务器:如果为NAT网络环境,请务必检查服务器能否将消息返回值客户端
|
||||
|
||||
2. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用
|
||||
|
||||
3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
|
||||
|
||||
4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)
|
||||
|
||||
5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
|
||||
|
||||
6. 检查防火墙设置,确认TCP/UDP 端口6030-6042 是打开的
|
||||
|
||||
7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/lib/taos*里, 并且*/usr/local/lib/taos*在系统库函数搜索路径*LD_LIBRARY_PATH*里
|
||||
|
||||
8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*driver/c/taos.dll*在你的系统搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
|
||||
|
||||
9. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
|
||||
检查UDP端口连接是否工作:`nc -vuz {hostIP} {port} `
|
||||
检查服务器侧TCP端口连接是否工作:`nc -l {port}`
|
||||
检查客户端侧TCP端口链接是否工作:`nc {hostIP} {port}`
|
||||
|
||||
10. 可以使用taos程序内嵌的网络连通检测功能:验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP)。
|
||||
|
||||
taos通过参数 -n 来确定运行服务端功能,还是客户端功能。-n server:表示运行检测服务端功能;-n client:表示运行检测客户端功能。
|
||||
|
||||
1)首先在服务器上停止taosd服务;
|
||||
|
||||
2)在服务器上运行taos内嵌的网络连通检测的服务端功能:taos -n server -P 6030 -e 6042 -l 1000;
|
||||
|
||||
3)在客户端运行taos内嵌的网络连通检测的客户端功能:taos -n client -h host -P 6030 -e 6042 -l 1000;
|
||||
|
||||
-n :指示运行网络连通检测的服务端功能,或客户端功能,缺省值为空,表示不启动网络连通检测;
|
||||
|
||||
-h:指示服务端名称,可以是ip地址或fqdn格式。如:192.168.1.160,或 192.168.1.160:6030,或 hostname1,或hostname1:6030。缺省值是127.0.0.1。
|
||||
|
||||
-P :检测的起始端口号,缺省值是6030;
|
||||
|
||||
-e:检测的结束端口号,必须大于等于起始端口号,缺省值是6042;
|
||||
|
||||
-l:指定检测端口连通的报文长度,最大64000字节,缺省值是1000字节,测试时服务端和客户端必须指定相同;
|
||||
|
||||
服务端设置的起始端口和结束端口号,必须包含客户端设置的起始端口和结束端口号;
|
||||
|
||||
对于起始端口号有三种设置方式:缺省值、-h指定、-P指定,优先级是:-P指定 > -h指定 > 缺省值。
|
||||
|
||||
客户端运行的输出样例:
|
||||
|
||||
`sum@sum-virtualBox /home/sum $ taos -n client -h ubuntu-vbox6`
|
||||
|
||||
`host: ubuntu-vbox6 start port: 6030 end port: 6042 packet len: 1000`
|
||||
|
||||
`tcp port:6030 test ok. udp port:6030 test ok.`
|
||||
|
||||
`tcp port:6031 test ok. udp port:6031 test ok.`
|
||||
|
||||
`tcp port:6032 test ok. udp port:6032 test ok.`
|
||||
|
||||
`tcp port:6033 test ok. udp port:6033 test ok.`
|
||||
|
||||
`tcp port:6034 test ok. udp port:6034 test ok.`
|
||||
|
||||
`tcp port:6035 test ok. udp port:6035 test ok.`
|
||||
|
||||
`tcp port:6036 test ok. udp port:6036 test ok.`
|
||||
|
||||
`tcp port:6037 test ok. udp port:6037 test ok.`
|
||||
|
||||
`tcp port:6038 test ok. udp port:6038 test ok.`
|
||||
|
||||
`tcp port:6039 test ok. udp port:6039 test ok.`
|
||||
|
||||
`tcp port:6040 test ok. udp port:6040 test ok.`
|
||||
|
||||
`tcp port:6041 test ok. udp port:6041 test ok.`
|
||||
|
||||
`tcp port:6042 test ok. udp port:6042 test ok.`
|
||||
|
||||
如果某个端口不通,会输出 `port:xxxx test fail`的信息。
|
||||
|
||||
|
||||
## 6. 遇到错误“Unexpected generic error in RPC”, 我怎么办?
|
||||
产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查:
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Modified from original source: Elastic Search
|
||||
# https://github.com/elasticsearch/elasticsearch
|
||||
# Thank you to the Elastic Search authors
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: PowerDB
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop: $local_fs $network $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Starts PowerDB powerd
|
||||
# Description: Starts PowerDB powerd, a time-series database engine
|
||||
### END INIT INFO
|
||||
|
||||
set -e
|
||||
|
||||
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
|
||||
NAME="PowerDB"
|
||||
USER="root"
|
||||
GROUP="root"
|
||||
DAEMON="/usr/local/power/bin/powerd"
|
||||
DAEMON_OPTS=""
|
||||
PID_FILE="/var/run/$NAME.pid"
|
||||
APPARGS=""
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
|
||||
log_action_begin_msg "Starting PowerDB..."
|
||||
if start-stop-daemon --test --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS &> /dev/null; then
|
||||
|
||||
touch "$PID_FILE" && chown "$USER":"$GROUP" "$PID_FILE"
|
||||
|
||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
fi
|
||||
|
||||
start-stop-daemon --start --chuid "$USER:$GROUP" --background --make-pidfile --pidfile "$PID_FILE" --exec "$DAEMON" -- $APPARGS
|
||||
|
||||
log_end_msg $?
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
log_action_begin_msg "Stopping PowerDB..."
|
||||
set +e
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
start-stop-daemon --stop --pidfile "$PID_FILE" --user "$USER" --retry=TERM/120/KILL/5 > /dev/null
|
||||
if [ $? -eq 1 ]; then
|
||||
log_action_cont_msg "TSD is not running but pid file exists, cleaning up"
|
||||
elif [ $? -eq 3 ]; then
|
||||
PID="`cat $PID_FILE`"
|
||||
log_failure_msg "Failed to stop PowerDB (pid $PID)"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
else
|
||||
log_action_cont_msg "PowerDB was not running"
|
||||
fi
|
||||
log_action_end_msg 0
|
||||
set -e
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
$0 stop
|
||||
sleep 1
|
||||
fi
|
||||
$0 start
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME"
|
||||
;;
|
||||
*)
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
|
@ -11,6 +11,7 @@ set -e
|
|||
# -V [stable | beta]
|
||||
# -l [full | lite]
|
||||
# -s [static | dynamic]
|
||||
# -d [taos | power]
|
||||
# -n [2.0.0.3]
|
||||
# -m [2.0.0.0]
|
||||
|
||||
|
@ -21,10 +22,11 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
|
|||
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
pagMode=full # [full | lite]
|
||||
soMode=dynamic # [static | dynamic]
|
||||
dbName=taos # [taos | power]
|
||||
verNumber=""
|
||||
verNumberComp="2.0.0.0"
|
||||
|
||||
while getopts "hv:V:c:o:l:s:n:m:" arg
|
||||
while getopts "hv:V:c:o:l:s:d:n:m:" arg
|
||||
do
|
||||
case $arg in
|
||||
v)
|
||||
|
@ -47,6 +49,10 @@ do
|
|||
#echo "soMode=$OPTARG"
|
||||
soMode=$(echo $OPTARG)
|
||||
;;
|
||||
d)
|
||||
#echo "dbName=$OPTARG"
|
||||
dbName=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "verNumber=$OPTARG"
|
||||
verNumber=$(echo $OPTARG)
|
||||
|
@ -66,6 +72,7 @@ do
|
|||
echo " -V [stable | beta] "
|
||||
echo " -l [full | lite] "
|
||||
echo " -s [static | dynamic] "
|
||||
echo " -d [taos | power] "
|
||||
echo " -n [version number] "
|
||||
echo " -m [compatible version number] "
|
||||
exit 0
|
||||
|
@ -77,7 +84,7 @@ do
|
|||
esac
|
||||
done
|
||||
|
||||
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} verNumber=${verNumber} verNumberComp=${verNumberComp}"
|
||||
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} verNumber=${verNumber} verNumberComp=${verNumberComp}"
|
||||
|
||||
curr_dir=$(pwd)
|
||||
|
||||
|
@ -170,9 +177,9 @@ cd ${compile_dir}
|
|||
# check support cpu type
|
||||
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
|
||||
if [ "$verMode" != "cluster" ]; then
|
||||
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode}
|
||||
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode}
|
||||
else
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp}
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp}
|
||||
fi
|
||||
else
|
||||
echo "input cpuType=${cpuType} error!!!"
|
||||
|
@ -185,7 +192,7 @@ cd ${curr_dir}
|
|||
|
||||
# 3. Call the corresponding script for packaging
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]]; then
|
||||
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
|
||||
echo "====do deb package for the ubuntu system===="
|
||||
output_dir="${top_dir}/debs"
|
||||
if [ -d ${output_dir} ]; then
|
||||
|
@ -208,11 +215,17 @@ if [ "$osType" != "Darwin" ]; then
|
|||
echo "====do tar.gz package for all systems===="
|
||||
cd ${script_dir}/tools
|
||||
|
||||
if [[ "$dbName" == "taos" ]]; then
|
||||
${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
|
||||
${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
|
||||
${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
|
||||
else
|
||||
${csudo} ./makepkg_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
|
||||
${csudo} ./makeclient_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
|
||||
${csudo} ./makearbi_power.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
|
||||
fi
|
||||
else
|
||||
cd ${script_dir}/tools
|
||||
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
|
||||
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${dbName}
|
||||
fi
|
||||
|
||||
|
|
|
@ -0,0 +1,145 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# power This shell script takes care of starting and stopping PowerDB.
|
||||
#
|
||||
# chkconfig: 2345 99 01
|
||||
# description: PowerDB is a districuted, scalable, high-performance Time Series Database
|
||||
# (TSDB). More than just a pure database, PowerDB also provides the ability
|
||||
# to do stream computing, aggregation etc.
|
||||
#
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: powerd
|
||||
# Required-Start: $network $local_fs $remote_fs
|
||||
# Required-Stop: $network $local_fs $remote_fs
|
||||
# Short-Description: start and stop powerd
|
||||
# Description: PowerDB is a districuted, scalable, high-performance Time Series Database
|
||||
# (TSDB). More than just a pure database, PowerDB also provides the ability
|
||||
# to do stream computing, aggregation etc.
|
||||
### END INIT INFO
|
||||
|
||||
# Source init functions
|
||||
. /etc/init.d/functions
|
||||
|
||||
# Maximum number of open files
|
||||
MAX_OPEN_FILES=65535
|
||||
|
||||
# Default program options
|
||||
NAME=powerd
|
||||
PROG=/usr/local/power/bin/powerd
|
||||
USER=root
|
||||
GROUP=root
|
||||
|
||||
# Default directories
|
||||
LOCK_DIR=/var/lock/subsys
|
||||
PID_DIR=/var/run/$NAME
|
||||
|
||||
# Set file names
|
||||
LOCK_FILE=$LOCK_DIR/$NAME
|
||||
PID_FILE=$PID_DIR/$NAME.pid
|
||||
|
||||
[ -e $PID_DIR ] || mkdir -p $PID_DIR
|
||||
|
||||
PROG_OPTS=""
|
||||
|
||||
start() {
|
||||
echo -n "Starting ${NAME}: "
|
||||
# check identity
|
||||
curid="`id -u -n`"
|
||||
if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then
|
||||
echo "Must be run as root or $USER, but was run as $curid"
|
||||
return 1
|
||||
fi
|
||||
# Sets the maximum number of open file descriptors allowed.
|
||||
ulimit -n $MAX_OPEN_FILES
|
||||
curulimit="`ulimit -n`"
|
||||
if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then
|
||||
echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "`id -u -n`" == root ] ; then
|
||||
# Changes the owner of the lock, and the pid files to allow
|
||||
# non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py.
|
||||
touch $LOCK_FILE && chown $USER:$GROUP $LOCK_FILE
|
||||
touch $PID_FILE && chown $USER:$GROUP $PID_FILE
|
||||
daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
else
|
||||
# Don't have to change user.
|
||||
daemon --pidfile $PID_FILE "$PROG $PROG_OPTS &> /dev/null &"
|
||||
fi
|
||||
retval=$?
|
||||
sleep 2
|
||||
echo
|
||||
[ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
stop() {
|
||||
echo -n "Stopping ${NAME}: "
|
||||
killproc -p $PID_FILE $NAME
|
||||
retval=$?
|
||||
echo
|
||||
# Non-root users don't have enough permission to remove pid and lock files.
|
||||
# So, the opentsdb_restart.py cannot get rid of the files, and the command
|
||||
# "service opentsdb status" will complain about the existing pid file.
|
||||
# Makes the pid file empty.
|
||||
echo > $PID_FILE
|
||||
[ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE)
|
||||
return $retval
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
force_reload() {
|
||||
restart
|
||||
}
|
||||
|
||||
rh_status() {
|
||||
# run checks to determine if the service is running or use generic status
|
||||
status -p $PID_FILE -l $LOCK_FILE $NAME
|
||||
}
|
||||
|
||||
rh_status_q() {
|
||||
rh_status >/dev/null 2>&1
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
rh_status_q && exit 0
|
||||
$1
|
||||
;;
|
||||
stop)
|
||||
rh_status_q || exit 0
|
||||
$1
|
||||
;;
|
||||
restart)
|
||||
$1
|
||||
;;
|
||||
reload)
|
||||
rh_status_q || exit 7
|
||||
$1
|
||||
;;
|
||||
force-reload)
|
||||
force_reload
|
||||
;;
|
||||
status)
|
||||
rh_status
|
||||
;;
|
||||
condrestart|try-restart)
|
||||
rh_status_q || exit 0
|
||||
restart
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
esac
|
||||
|
||||
exit $?
|
|
@ -0,0 +1,297 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="/usr/local/tarbitrator/bin"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo"
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
|
||||
# get the operating system type for using the corresponding init file
|
||||
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
|
||||
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
if [[ -e /etc/os-release ]]; then
|
||||
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
|
||||
else
|
||||
osinfo=""
|
||||
fi
|
||||
#echo "osinfo: ${osinfo}"
|
||||
os_type=0
|
||||
if echo $osinfo | grep -qwi "ubuntu" ; then
|
||||
# echo "This is ubuntu system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "debian" ; then
|
||||
# echo "This is debian system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "Kylin" ; then
|
||||
# echo "This is Kylin system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "centos" ; then
|
||||
# echo "This is centos system"
|
||||
os_type=2
|
||||
elif echo $osinfo | grep -qwi "fedora" ; then
|
||||
# echo "This is fedora system"
|
||||
os_type=2
|
||||
else
|
||||
echo " osinfo: ${osinfo}"
|
||||
echo " This is an officially unverified linux system,"
|
||||
echo " if there are any problems with the installation and operation, "
|
||||
echo " please feel free to contact taosdata.com for support."
|
||||
os_type=1
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo} kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo} rm -rf ${install_main_dir} || :
|
||||
${csudo} mkdir -p ${install_main_dir}
|
||||
${csudo} mkdir -p ${install_main_dir}/bin
|
||||
#${csudo} mkdir -p ${install_main_dir}/include
|
||||
${csudo} mkdir -p ${install_main_dir}/init.d
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo} rm -f ${bin_link_dir}/rmtarbitrator || :
|
||||
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
|
||||
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/remove_arbi_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_arbi_power.sh ${bin_link_dir}/rmtarbitrator || :
|
||||
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
|
||||
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
|
||||
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
|
||||
|
||||
if pidof tarbitrator &> /dev/null; then
|
||||
${csudo} service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo} rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo} init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_service_on_sysvinit() {
|
||||
clean_service_on_sysvinit
|
||||
sleep 1
|
||||
|
||||
# Install powerd service
|
||||
|
||||
if ((${os_type}==1)); then
|
||||
${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
|
||||
elif ((${os_type}==2)); then
|
||||
${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
|
||||
fi
|
||||
|
||||
#restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
|
||||
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
${csudo} chkconfig --add tarbitratord || :
|
||||
${csudo} chkconfig --level 2345 tarbitratord on || :
|
||||
elif ((${initd_mod}==2)); then
|
||||
${csudo} insserv tarbitratord || :
|
||||
${csudo} insserv -d tarbitratord || :
|
||||
elif ((${initd_mod}==3)); then
|
||||
${csudo} update-rc.d tarbitratord defaults || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
if systemctl is-active --quiet tarbitratord; then
|
||||
echo "tarbitrator is running, stopping it..."
|
||||
${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo} rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
# power:2345:respawn:/etc/init.d/tarbitratord start
|
||||
|
||||
function install_service_on_systemd() {
|
||||
clean_service_on_systemd
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
|
||||
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Description=PowerDB arbitrator service' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
|
||||
${csudo} systemctl enable tarbitratord
|
||||
}
|
||||
|
||||
function install_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
install_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
install_service_on_sysvinit
|
||||
else
|
||||
# must manual stop taosd
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
function update_PowerDB() {
|
||||
# Start to update
|
||||
echo -e "${GREEN}Start to update PowerDB's arbitrator ...${NC}"
|
||||
# Stop the service if running
|
||||
if pidof tarbitrator &> /dev/null; then
|
||||
if ((${service_mod}==0)); then
|
||||
${csudo} systemctl stop tarbitratord || :
|
||||
elif ((${service_mod}==1)); then
|
||||
${csudo} service tarbitratord stop || :
|
||||
else
|
||||
kill_tarbitrator
|
||||
fi
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
|
||||
echo
|
||||
#echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg"
|
||||
if ((${service_mod}==0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod}==1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ./tarbitrator${NC}"
|
||||
fi
|
||||
echo
|
||||
echo -e "\033[44;32;1mPowerDB's arbitrator is updated successfully!${NC}"
|
||||
}
|
||||
|
||||
function install_PowerDB() {
|
||||
# Start to install
|
||||
echo -e "${GREEN}Start to install PowerDB's arbitrator ...${NC}"
|
||||
|
||||
install_main_path
|
||||
#install_header
|
||||
install_bin
|
||||
install_service
|
||||
echo
|
||||
#echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/taos/taos.cfg"
|
||||
if ((${service_mod}==0)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} systemctl start tarbitratord${NC}"
|
||||
elif ((${service_mod}==1)); then
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: ${csudo} service tarbitratord start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start arbitrator ${NC}: tarbitrator${NC}"
|
||||
fi
|
||||
|
||||
echo -e "\033[44;32;1mPowerDB's arbitrator is installed successfully!${NC}"
|
||||
echo
|
||||
}
|
||||
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
# Install server and client
|
||||
if [ -x ${bin_dir}/tarbitrator ]; then
|
||||
update_flag=1
|
||||
update_PowerDB
|
||||
else
|
||||
install_PowerDB
|
||||
fi
|
||||
|
|
@ -85,7 +85,7 @@ function install_bin() {
|
|||
# Remove links
|
||||
${csudo} rm -f ${bin_link_dir}/taos || :
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
${csudo} rm -f ${bin_link_dir}/taosdump || :
|
||||
${csudo} rm -f ${bin_link_dir}/taosdemo || :
|
||||
fi
|
||||
${csudo} rm -f ${bin_link_dir}/rmtaos || :
|
||||
${csudo} rm -f ${bin_link_dir}/set_core || :
|
||||
|
@ -95,7 +95,7 @@ function install_bin() {
|
|||
#Make link
|
||||
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
|
||||
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
|
||||
fi
|
||||
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
|
||||
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||
|
|
|
@ -0,0 +1,249 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install PowerDB client on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
|
||||
osType=Linux
|
||||
pagMode=full
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
# Dynamic directory
|
||||
data_dir="/var/lib/power"
|
||||
log_dir="/var/log/power"
|
||||
else
|
||||
script_dir=`dirname $0`
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
data_dir="/var/lib/power"
|
||||
log_dir="~/PowerDBLog"
|
||||
fi
|
||||
|
||||
log_link_dir="/usr/local/power/log"
|
||||
|
||||
cfg_install_dir="/etc/power"
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
else
|
||||
bin_link_dir="/usr/local/bin"
|
||||
lib_link_dir="/usr/local/lib"
|
||||
inc_link_dir="/usr/local/include"
|
||||
fi
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/power"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="/usr/local/power/bin"
|
||||
|
||||
# v1.5 jar dir
|
||||
#v15_java_app_dir="/usr/local/lib/power"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo"
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
function kill_client() {
|
||||
pid=$(ps -ef | grep "power" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo} kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo} rm -rf ${install_main_dir} || :
|
||||
${csudo} mkdir -p ${install_main_dir}
|
||||
${csudo} mkdir -p ${install_main_dir}/cfg
|
||||
${csudo} mkdir -p ${install_main_dir}/bin
|
||||
${csudo} mkdir -p ${install_main_dir}/connector
|
||||
${csudo} mkdir -p ${install_main_dir}/driver
|
||||
${csudo} mkdir -p ${install_main_dir}/examples
|
||||
${csudo} mkdir -p ${install_main_dir}/include
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo} rm -f ${bin_link_dir}/power || :
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
${csudo} rm -f ${bin_link_dir}/powerdemo || :
|
||||
fi
|
||||
${csudo} rm -f ${bin_link_dir}/rmpower || :
|
||||
${csudo} rm -f ${bin_link_dir}/set_core || :
|
||||
|
||||
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || :
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
[ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
|
||||
fi
|
||||
[ -x ${install_main_dir}/bin/remove_client_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_power.sh ${bin_link_dir}/rmpower || :
|
||||
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
sudo rm -f /usr/lib/libtaos.* || :
|
||||
sudo rm -rf ${lib_dir} || :
|
||||
}
|
||||
|
||||
function install_lib() {
|
||||
# Remove links
|
||||
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo} rm -rf ${v15_java_app_dir} || :
|
||||
|
||||
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
|
||||
if [ -d "${lib64_link_dir}" ]; then
|
||||
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
fi
|
||||
else
|
||||
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
|
||||
${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
|
||||
fi
|
||||
|
||||
${csudo} ldconfig
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
|
||||
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
#${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
|
||||
|
||||
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
|
||||
${csudo} mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
|
||||
${csudo} chmod 644 ${cfg_install_dir}/*
|
||||
fi
|
||||
|
||||
${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
|
||||
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
|
||||
}
|
||||
|
||||
|
||||
function install_log() {
|
||||
${csudo} rm -rf ${log_dir} || :
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
|
||||
else
|
||||
mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
|
||||
fi
|
||||
${csudo} ln -s ${log_dir} ${install_main_dir}/log
|
||||
}
|
||||
|
||||
function install_connector() {
|
||||
${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
|
||||
}
|
||||
|
||||
function install_examples() {
|
||||
if [ -d ${script_dir}/examples ]; then
|
||||
${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
|
||||
fi
|
||||
}
|
||||
|
||||
function update_PowerDB() {
|
||||
# Start to update
|
||||
if [ ! -e power.tar.gz ]; then
|
||||
echo "File power.tar.gz does not exist"
|
||||
exit 1
|
||||
fi
|
||||
tar -zxf power.tar.gz
|
||||
|
||||
echo -e "${GREEN}Start to update PowerDB client...${NC}"
|
||||
# Stop the client shell if running
|
||||
if pidof power &> /dev/null; then
|
||||
kill_client
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
if [ "$pagMode" != "lite" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1mPowerDB client is updated successfully!${NC}"
|
||||
|
||||
rm -rf $(tar -tf power.tar.gz)
|
||||
}
|
||||
|
||||
function install_PowerDB() {
|
||||
# Start to install
|
||||
if [ ! -e power.tar.gz ]; then
|
||||
echo "File power.tar.gz does not exist"
|
||||
exit 1
|
||||
fi
|
||||
tar -zxf power.tar.gz
|
||||
|
||||
echo -e "${GREEN}Start to install PowerDB client...${NC}"
|
||||
|
||||
install_main_path
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
if [ "$pagMode" != "lite" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1mPowerDB client is installed successfully!${NC}"
|
||||
|
||||
rm -rf $(tar -tf power.tar.gz)
|
||||
}
|
||||
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
# Install or updata client and client
|
||||
# if server is already install, don't install client
|
||||
if [ -e ${bin_dir}/powerd ]; then
|
||||
echo -e "\033[44;32;1mThere are already installed PowerDB server, so don't need install client!${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ -x ${bin_dir}/power ]; then
|
||||
update_flag=1
|
||||
update_PowerDB
|
||||
else
|
||||
install_PowerDB
|
||||
fi
|
|
@ -0,0 +1,733 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This file is used to install database on linux systems. The operating system
|
||||
# is required to use systemd to manage services at boot
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
pagMode=full
|
||||
|
||||
# -----------------------Variables definition---------------------
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
# Dynamic directory
|
||||
data_dir="/var/lib/power"
|
||||
log_dir="/var/log/power"
|
||||
|
||||
data_link_dir="/usr/local/power/data"
|
||||
log_link_dir="/usr/local/power/log"
|
||||
|
||||
cfg_install_dir="/etc/power"
|
||||
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/power"
|
||||
|
||||
# old bin dir
|
||||
bin_dir="/usr/local/power/bin"
|
||||
|
||||
# v1.5 jar dir
|
||||
#v15_java_app_dir="/usr/local/lib/power"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
nginx_port=6060
|
||||
nginx_dir="/usr/local/nginxd"
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo"
|
||||
fi
|
||||
|
||||
update_flag=0
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
|
||||
# get the operating system type for using the corresponding init file
|
||||
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
|
||||
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
if [[ -e /etc/os-release ]]; then
|
||||
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) ||:
|
||||
else
|
||||
osinfo=""
|
||||
fi
|
||||
#echo "osinfo: ${osinfo}"
|
||||
os_type=0
|
||||
if echo $osinfo | grep -qwi "ubuntu" ; then
|
||||
# echo "This is ubuntu system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "debian" ; then
|
||||
# echo "This is debian system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "Kylin" ; then
|
||||
# echo "This is Kylin system"
|
||||
os_type=1
|
||||
elif echo $osinfo | grep -qwi "centos" ; then
|
||||
# echo "This is centos system"
|
||||
os_type=2
|
||||
elif echo $osinfo | grep -qwi "fedora" ; then
|
||||
# echo "This is fedora system"
|
||||
os_type=2
|
||||
else
|
||||
echo " osinfo: ${osinfo}"
|
||||
echo " This is an officially unverified linux system,"
|
||||
echo " if there are any problems with the installation and operation, "
|
||||
echo " please feel free to contact taosdata.com for support."
|
||||
os_type=1
|
||||
fi
|
||||
|
||||
|
||||
# ============================= get input parameters =================================================
|
||||
|
||||
# install.sh -v [server | client] -e [yes | no] -i [systemd | service | ...]
|
||||
|
||||
# set parameters by default value
|
||||
interactiveFqdn=yes # [yes | no]
|
||||
verType=server # [server | client]
|
||||
initType=systemd # [systemd | service | ...]
|
||||
|
||||
while getopts "hv:e:i:" arg
|
||||
do
|
||||
case $arg in
|
||||
e)
|
||||
#echo "interactiveFqdn=$OPTARG"
|
||||
interactiveFqdn=$( echo $OPTARG )
|
||||
;;
|
||||
v)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
i)
|
||||
#echo "initType=$OPTARG"
|
||||
initType=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: `basename $0` -v [server | client] -e [yes | no]"
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
#echo "verType=${verType} interactiveFqdn=${interactiveFqdn}"
|
||||
|
||||
function kill_powerd() {
|
||||
pid=$(ps -ef | grep "powerd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo} kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_main_path() {
|
||||
#create install main dir and all sub dir
|
||||
${csudo} rm -rf ${install_main_dir} || :
|
||||
${csudo} mkdir -p ${install_main_dir}
|
||||
${csudo} mkdir -p ${install_main_dir}/cfg
|
||||
${csudo} mkdir -p ${install_main_dir}/bin
|
||||
${csudo} mkdir -p ${install_main_dir}/connector
|
||||
${csudo} mkdir -p ${install_main_dir}/driver
|
||||
${csudo} mkdir -p ${install_main_dir}/examples
|
||||
${csudo} mkdir -p ${install_main_dir}/include
|
||||
${csudo} mkdir -p ${install_main_dir}/init.d
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
${csudo} mkdir -p ${nginx_dir}
|
||||
fi
|
||||
}
|
||||
|
||||
function install_bin() {
|
||||
# Remove links
|
||||
${csudo} rm -f ${bin_link_dir}/power || :
|
||||
${csudo} rm -f ${bin_link_dir}/powerd || :
|
||||
${csudo} rm -f ${bin_link_dir}/powerdemo || :
|
||||
${csudo} rm -f ${bin_link_dir}/rmpower || :
|
||||
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
|
||||
${csudo} rm -f ${bin_link_dir}/set_core || :
|
||||
|
||||
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
|
||||
|
||||
#Make link
|
||||
[ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || :
|
||||
[ -x ${install_main_dir}/bin/powerd ] && ${csudo} ln -s ${install_main_dir}/bin/powerd ${bin_link_dir}/powerd || :
|
||||
[ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
|
||||
[ -x ${install_main_dir}/bin/remove_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_power.sh ${bin_link_dir}/rmpower || :
|
||||
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/*
|
||||
${csudo} mkdir -p ${nginx_dir}/logs
|
||||
${csudo} chmod 777 ${nginx_dir}/sbin/nginx
|
||||
fi
|
||||
}
|
||||
|
||||
function install_lib() {
|
||||
# Remove links
|
||||
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo} rm -rf ${v15_java_app_dir} || :
|
||||
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
|
||||
|
||||
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
|
||||
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
|
||||
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
fi
|
||||
|
||||
#if [ "$verMode" == "cluster" ]; then
|
||||
# # Compatible with version 1.5
|
||||
# ${csudo} mkdir -p ${v15_java_app_dir}
|
||||
# ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar
|
||||
# ${csudo} chmod 777 ${v15_java_app_dir} || :
|
||||
#fi
|
||||
|
||||
${csudo} ldconfig
|
||||
}
|
||||
|
||||
function install_header() {
|
||||
${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || :
|
||||
${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/*
|
||||
${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||
${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
#${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
|
||||
|
||||
if [ ! -f ${cfg_install_dir}/taos.cfg ]; then
|
||||
${csudo} mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir}
|
||||
${csudo} chmod 644 ${cfg_install_dir}/*
|
||||
fi
|
||||
|
||||
${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
|
||||
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
|
||||
|
||||
[ ! -z $1 ] && return 0 || : # only install client
|
||||
|
||||
if ((${update_flag}==1)); then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [ "$interactiveFqdn" == "no" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
#FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
|
||||
#FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)"
|
||||
#PORT_FORMAT="(/[1-6][0-9][0-9][0-9][0-9]?/)"
|
||||
#FQDN_PATTERN=":[0-9]{1,5}$"
|
||||
|
||||
# first full-qualified domain name (FQDN) for PowerDB cluster system
|
||||
echo
|
||||
echo -e -n "${GREEN}Enter FQDN:port (like h1.powerdata.com:6030) of an existing PowerDB cluster node to join${NC}"
|
||||
echo
|
||||
echo -e -n "${GREEN}OR leave it blank to build one${NC}:"
|
||||
read firstEp
|
||||
while true; do
|
||||
if [ ! -z "$firstEp" ]; then
|
||||
# check the format of the firstEp
|
||||
#if [[ $firstEp == $FQDN_PATTERN ]]; then
|
||||
# Write the first FQDN to configuration file
|
||||
${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg
|
||||
break
|
||||
#else
|
||||
# read -p "Please enter the correct FQDN:port: " firstEp
|
||||
#fi
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
function install_log() {
|
||||
${csudo} rm -rf ${log_dir} || :
|
||||
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
|
||||
|
||||
${csudo} ln -s ${log_dir} ${install_main_dir}/log
|
||||
}
|
||||
|
||||
function install_data() {
|
||||
${csudo} mkdir -p ${data_dir}
|
||||
|
||||
${csudo} ln -s ${data_dir} ${install_main_dir}/data
|
||||
}
|
||||
|
||||
function install_connector() {
|
||||
${csudo} cp -rf ${script_dir}/connector/* ${install_main_dir}/connector
|
||||
}
|
||||
|
||||
function install_examples() {
|
||||
if [ -d ${script_dir}/examples ]; then
|
||||
${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
#restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
|
||||
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
|
||||
|
||||
if pidof powerd &> /dev/null; then
|
||||
${csudo} service powerd stop || :
|
||||
fi
|
||||
|
||||
if pidof tarbitrator &> /dev/null; then
|
||||
${csudo} service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
if [ -e ${service_config_dir}/powerd ]; then
|
||||
${csudo} chkconfig --del powerd || :
|
||||
fi
|
||||
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==2)); then
|
||||
if [ -e ${service_config_dir}/powerd ]; then
|
||||
${csudo} insserv -r powerd || :
|
||||
fi
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==3)); then
|
||||
if [ -e ${service_config_dir}/powerd ]; then
|
||||
${csudo} update-rc.d -f powerd remove || :
|
||||
fi
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo} rm -f ${service_config_dir}/powerd || :
|
||||
${csudo} rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo} init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function install_service_on_sysvinit() {
|
||||
clean_service_on_sysvinit
|
||||
sleep 1
|
||||
|
||||
# Install powerd service
|
||||
|
||||
if ((${os_type}==1)); then
|
||||
${csudo} cp -f ${script_dir}/init.d/powerd.deb ${install_main_dir}/init.d/powerd
|
||||
${csudo} cp ${script_dir}/init.d/powerd.deb ${service_config_dir}/powerd && ${csudo} chmod a+x ${service_config_dir}/powerd
|
||||
${csudo} cp -f ${script_dir}/init.d/tarbitratord.deb ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo} cp ${script_dir}/init.d/tarbitratord.deb ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
|
||||
elif ((${os_type}==2)); then
|
||||
${csudo} cp -f ${script_dir}/init.d/powerd.rpm ${install_main_dir}/init.d/powerd
|
||||
${csudo} cp ${script_dir}/init.d/powerd.rpm ${service_config_dir}/powerd && ${csudo} chmod a+x ${service_config_dir}/powerd
|
||||
${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord
|
||||
${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord
|
||||
fi
|
||||
|
||||
#restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
|
||||
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
${csudo} chkconfig --add powerd || :
|
||||
${csudo} chkconfig --level 2345 powerd on || :
|
||||
${csudo} chkconfig --add tarbitratord || :
|
||||
${csudo} chkconfig --level 2345 tarbitratord on || :
|
||||
elif ((${initd_mod}==2)); then
|
||||
${csudo} insserv powerd || :
|
||||
${csudo} insserv -d powerd || :
|
||||
${csudo} insserv tarbitratord || :
|
||||
${csudo} insserv -d tarbitratord || :
|
||||
elif ((${initd_mod}==3)); then
|
||||
${csudo} update-rc.d powerd defaults || :
|
||||
${csudo} update-rc.d tarbitratord defaults || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
powerd_service_config="${service_config_dir}/powerd.service"
|
||||
if systemctl is-active --quiet powerd; then
|
||||
echo "PowerDB is running, stopping it..."
|
||||
${csudo} systemctl stop powerd &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable powerd &> /dev/null || echo &> /dev/null
|
||||
${csudo} rm -f ${powerd_service_config}
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
if systemctl is-active --quiet tarbitratord; then
|
||||
echo "tarbitrator is running, stopping it..."
|
||||
${csudo} systemctl stop tarbitratord &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null
|
||||
${csudo} rm -f ${tarbitratord_service_config}
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
nginx_service_config="${service_config_dir}/nginxd.service"
|
||||
if systemctl is-active --quiet nginxd; then
|
||||
echo "Nginx for TDengine is running, stopping it..."
|
||||
${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
|
||||
${csudo} rm -f ${nginx_service_config}
|
||||
fi
|
||||
}
|
||||
|
||||
# power:2345:respawn:/etc/init.d/powerd start
|
||||
|
||||
function install_service_on_systemd() {
|
||||
clean_service_on_systemd
|
||||
|
||||
powerd_service_config="${service_config_dir}/powerd.service"
|
||||
${csudo} bash -c "echo '[Unit]' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'Description=PowerDB server service' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'After=network-online.target' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'Wants=network-online.target' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo '[Service]' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'Type=simple' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'ExecStart=/usr/bin/powerd' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'StandardOutput=null' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'Restart=always' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitBurst=3' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo '[Install]' >> ${powerd_service_config}"
|
||||
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${powerd_service_config}"
|
||||
${csudo} systemctl enable powerd
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/tarbitratord.service"
|
||||
${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'After=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Wants=network-online.target' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StandardOutput=null' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'Restart=always' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitBurst=3' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}"
|
||||
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}"
|
||||
#${csudo} systemctl enable tarbitratord
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
nginx_service_config="${service_config_dir}/nginxd.service"
|
||||
${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'Description=Nginx For PowrDB Service' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}"
|
||||
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}"
|
||||
if ! ${csudo} systemctl enable nginxd &> /dev/null; then
|
||||
${csudo} systemctl daemon-reexec
|
||||
${csudo} systemctl enable nginxd
|
||||
fi
|
||||
${csudo} systemctl start nginxd
|
||||
fi
|
||||
}
|
||||
|
||||
function install_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
install_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
install_service_on_sysvinit
|
||||
else
|
||||
# must manual stop powerd
|
||||
kill_powerd
|
||||
fi
|
||||
}
|
||||
|
||||
vercomp () {
|
||||
if [[ $1 == $2 ]]; then
|
||||
return 0
|
||||
fi
|
||||
local IFS=.
|
||||
local i ver1=($1) ver2=($2)
|
||||
# fill empty fields in ver1 with zeros
|
||||
for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
|
||||
ver1[i]=0
|
||||
done
|
||||
|
||||
for ((i=0; i<${#ver1[@]}; i++)); do
|
||||
if [[ -z ${ver2[i]} ]]
|
||||
then
|
||||
# fill empty fields in ver2 with zeros
|
||||
ver2[i]=0
|
||||
fi
|
||||
if ((10#${ver1[i]} > 10#${ver2[i]}))
|
||||
then
|
||||
return 1
|
||||
fi
|
||||
if ((10#${ver1[i]} < 10#${ver2[i]}))
|
||||
then
|
||||
return 2
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
function is_version_compatible() {
|
||||
|
||||
curr_version=$(${bin_dir}/powerd -V | head -1 | cut -d ' ' -f 3)
|
||||
|
||||
min_compatible_version=$(${script_dir}/bin/powerd -V | head -1 | cut -d ' ' -f 5)
|
||||
|
||||
vercomp $curr_version $min_compatible_version
|
||||
case $? in
|
||||
0) return 0;;
|
||||
1) return 0;;
|
||||
2) return 1;;
|
||||
esac
|
||||
}
|
||||
|
||||
function update_PowerDB() {
|
||||
# Start to update
|
||||
if [ ! -e power.tar.gz ]; then
|
||||
echo "File power.tar.gz does not exist"
|
||||
exit 1
|
||||
fi
|
||||
tar -zxf power.tar.gz
|
||||
|
||||
# Check if version compatible
|
||||
if ! is_version_compatible; then
|
||||
echo -e "${RED}Version incompatible${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Start to update PowerDB...${NC}"
|
||||
# Stop the service if running
|
||||
if pidof powerd &> /dev/null; then
|
||||
if ((${service_mod}==0)); then
|
||||
${csudo} systemctl stop powerd || :
|
||||
elif ((${service_mod}==1)); then
|
||||
${csudo} service powerd stop || :
|
||||
else
|
||||
kill_powerd
|
||||
fi
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
install_main_path
|
||||
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
if [ "$pagMode" != "lite" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
if [ -z $1 ]; then
|
||||
install_bin
|
||||
install_service
|
||||
install_config
|
||||
|
||||
openresty_work=false
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
# Check if openresty is installed
|
||||
# Check if nginx is installed successfully
|
||||
if type curl &> /dev/null; then
|
||||
if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
|
||||
echo -e "\033[44;32;1mNginx for PowerDB is updated successfully!${NC}"
|
||||
openresty_work=true
|
||||
else
|
||||
echo -e "\033[44;31;5mNginx for PowerDB does not work! Please try again!\033[0m"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
#echo
|
||||
#echo -e "\033[44;32;1mPowerDB is updated successfully!${NC}"
|
||||
echo
|
||||
echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/taos.cfg"
|
||||
if ((${service_mod}==0)); then
|
||||
echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} systemctl start powerd${NC}"
|
||||
elif ((${service_mod}==1)); then
|
||||
echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} service powerd start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start PowerDB ${NC}: ./powerd${NC}"
|
||||
fi
|
||||
|
||||
if [ ${openresty_work} = 'true' ]; then
|
||||
echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell${NC}"
|
||||
fi
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1mPowerDB is updated successfully!${NC}"
|
||||
else
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1mPowerDB client is updated successfully!${NC}"
|
||||
fi
|
||||
|
||||
rm -rf $(tar -tf power.tar.gz)
|
||||
}
|
||||
|
||||
function install_PowerDB() {
|
||||
# Start to install
|
||||
if [ ! -e power.tar.gz ]; then
|
||||
echo "File power.tar.gz does not exist"
|
||||
exit 1
|
||||
fi
|
||||
tar -zxf power.tar.gz
|
||||
|
||||
echo -e "${GREEN}Start to install PowerDB...${NC}"
|
||||
|
||||
install_main_path
|
||||
|
||||
if [ -z $1 ]; then
|
||||
install_data
|
||||
fi
|
||||
|
||||
install_log
|
||||
install_header
|
||||
install_lib
|
||||
if [ "$pagMode" != "lite" ]; then
|
||||
install_connector
|
||||
fi
|
||||
install_examples
|
||||
|
||||
if [ -z $1 ]; then # install service and client
|
||||
# For installing new
|
||||
install_bin
|
||||
install_service
|
||||
|
||||
openresty_work=false
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
# Check if nginx is installed successfully
|
||||
if type curl &> /dev/null; then
|
||||
if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
|
||||
echo -e "\033[44;32;1mNginx for PowerDB is installed successfully!${NC}"
|
||||
openresty_work=true
|
||||
else
|
||||
echo -e "\033[44;31;5mNginx for PowerDB does not work! Please try again!\033[0m"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
install_config
|
||||
|
||||
# Ask if to start the service
|
||||
#echo
|
||||
#echo -e "\033[44;32;1mPowerDB is installed successfully!${NC}"
|
||||
echo
|
||||
echo -e "${GREEN_DARK}To configure PowerDB ${NC}: edit /etc/power/taos.cfg"
|
||||
if ((${service_mod}==0)); then
|
||||
echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} systemctl start powerd${NC}"
|
||||
elif ((${service_mod}==1)); then
|
||||
echo -e "${GREEN_DARK}To start PowerDB ${NC}: ${csudo} service powerd start${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start PowerDB ${NC}: powerd${NC}"
|
||||
fi
|
||||
|
||||
if [ ${openresty_work} = 'true' ]; then
|
||||
echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell${NC}"
|
||||
fi
|
||||
|
||||
if [ ! -z "$firstEp" ]; then
|
||||
echo
|
||||
echo -e "${GREEN_DARK}Please run${NC}: power -h $firstEp${GREEN_DARK} to login into cluster, then${NC}"
|
||||
echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
|
||||
echo
|
||||
fi
|
||||
echo -e "\033[44;32;1mPowerDB is installed successfully!${NC}"
|
||||
echo
|
||||
else # Only install client
|
||||
install_bin
|
||||
install_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1mPowerDB client is installed successfully!${NC}"
|
||||
fi
|
||||
|
||||
rm -rf $(tar -tf power.tar.gz)
|
||||
}
|
||||
|
||||
|
||||
## ==============================Main program starts from here============================
|
||||
if [ "$verType" == "server" ]; then
|
||||
# Install server and client
|
||||
if [ -x ${bin_dir}/powerd ]; then
|
||||
update_flag=1
|
||||
update_PowerDB
|
||||
else
|
||||
install_PowerDB
|
||||
fi
|
||||
elif [ "$verType" == "client" ]; then
|
||||
interactiveFqdn=no
|
||||
# Only install client
|
||||
if [ -x ${bin_dir}/power ]; then
|
||||
update_flag=1
|
||||
update_PowerDB client
|
||||
else
|
||||
install_PowerDB client
|
||||
fi
|
||||
else
|
||||
echo "please input correct verType"
|
||||
fi
|
|
@ -10,6 +10,7 @@ set -e
|
|||
source_dir=$1
|
||||
binary_dir=$2
|
||||
osType=$3
|
||||
verNumber=$4
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir=$(dirname $(readlink -f "$0"))
|
||||
|
@ -179,19 +180,18 @@ function install_lib() {
|
|||
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
|
||||
versioninfo=$(${script_dir}/get_version.sh ${source_dir}/src/util/src/version.c)
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo} cp ${binary_dir}/build/lib/libtaos.so.${versioninfo} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
|
||||
${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib_link_dir}/libtaos.so.1
|
||||
${csudo} cp ${binary_dir}/build/lib/libtaos.so.${verNumber} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
|
||||
${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
|
||||
if [ -d "${lib64_link_dir}" ]; then
|
||||
${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib64_link_dir}/libtaos.so.1
|
||||
${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1
|
||||
${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so
|
||||
fi
|
||||
else
|
||||
${csudo} cp ${binary_dir}/build/lib/libtaos.${versioninfo}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
|
||||
${csudo} ln -sf ${install_main_dir}/driver/libtaos.${versioninfo}.dylib ${lib_link_dir}/libtaos.1.dylib
|
||||
${csudo} cp ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
|
||||
${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
|
||||
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
|
||||
fi
|
||||
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate arbitrator's tar.gz setup package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}/src"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/PowerDB-enterprise-arbitrator"
|
||||
else
|
||||
install_dir="${release_dir}/PowerDB-arbitrator"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
bin_files="${build_dir}/bin/tarbitrator ${script_dir}/remove_arbi_power.sh"
|
||||
install_files="${script_dir}/install_arbi_power.sh"
|
||||
|
||||
#header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
|
||||
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
|
||||
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir} && cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install_arbi_power.sh || :
|
||||
#mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc || :
|
||||
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
pkg_name=${pkg_name}-${verType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
|
@ -41,7 +41,7 @@ fi
|
|||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/taosd
|
||||
#strip ${build_dir}/bin/taosd
|
||||
strip ${build_dir}/bin/taos
|
||||
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
|
||||
else
|
||||
|
|
|
@ -0,0 +1,197 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate tar.gz package for linux client in all os system
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
else
|
||||
script_dir=`dirname $0`
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
top_dir=${script_dir}/../..
|
||||
fi
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}/src"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/PowerDB-enterprise-client"
|
||||
else
|
||||
install_dir="${release_dir}/PowerDB-client"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
# if [ "$pagMode" == "lite" ]; then
|
||||
# strip ${build_dir}/bin/powerd
|
||||
# strip ${build_dir}/bin/power
|
||||
# bin_files="${build_dir}/bin/power ${script_dir}/remove_client_power.sh"
|
||||
# else
|
||||
# bin_files="${build_dir}/bin/power ${build_dir}/bin/powerdemo ${script_dir}/remove_client_power.sh ${script_dir}/set_core.sh"
|
||||
# fi
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
else
|
||||
bin_files="${build_dir}/bin/power ${script_dir}/remove_client_power.sh"
|
||||
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
|
||||
fi
|
||||
|
||||
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
|
||||
cfg_dir="${top_dir}/packaging/cfg"
|
||||
|
||||
install_files="${script_dir}/install_client_power.sh"
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
|
||||
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
|
||||
|
||||
sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
|
||||
sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
|
||||
sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg
|
||||
|
||||
mkdir -p ${install_dir}/bin
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/taos
|
||||
cp ${build_dir}/bin/taos ${install_dir}/bin/power
|
||||
cp ${script_dir}/remove_power.sh ${install_dir}/bin
|
||||
else
|
||||
cp ${build_dir}/bin/taos ${install_dir}/bin/power
|
||||
cp ${script_dir}/remove_power.sh ${install_dir}/bin
|
||||
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
|
||||
cp ${script_dir}/set_core.sh ${install_dir}/bin
|
||||
fi
|
||||
else
|
||||
cp ${bin_files} ${install_dir}/bin
|
||||
fi
|
||||
chmod a+x ${install_dir}/bin/* || :
|
||||
|
||||
cd ${install_dir}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
tar -zcv -f power.tar.gz * --remove-files || :
|
||||
else
|
||||
tar -zcv -f power.tar.gz * || :
|
||||
mv power.tar.gz ..
|
||||
rm -rf ./*
|
||||
mv ../power.tar.gz .
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
cp ${install_files} ${install_dir}
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client_power.sh >> install_client_power_temp.sh
|
||||
mv install_client_power_temp.sh ${install_dir}/install_client_power.sh
|
||||
fi
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client_power.sh >> install_client_power_temp.sh
|
||||
mv install_client_power_temp.sh ${install_dir}/install_client_power.sh
|
||||
fi
|
||||
chmod a+x ${install_dir}/install_client_power.sh
|
||||
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
examples_dir="${top_dir}/tests/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
sed -i '/passwd/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
|
||||
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
|
||||
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py
|
||||
cp -r ${examples_dir}/R ${install_dir}/examples
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt
|
||||
cp -r ${examples_dir}/go ${install_dir}/examples
|
||||
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/src/taosapp/taosapp.go
|
||||
fi
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver
|
||||
cp ${lib_files} ${install_dir}/driver
|
||||
|
||||
# Copy connector
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector
|
||||
fi
|
||||
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector/
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
|
||||
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
|
||||
|
||||
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
|
||||
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
|
||||
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
|
||||
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
|
||||
fi
|
||||
# Copy release note
|
||||
# cp ${script_dir}/release_note ${install_dir}
|
||||
|
||||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
pkg_name=${pkg_name}-Lite
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
pkg_name=${pkg_name}-${verType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stable or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
else
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
|
||||
mv "$(basename ${pkg_name}).tar.gz" ..
|
||||
rm -rf ./*
|
||||
mv ../"$(basename ${pkg_name}).tar.gz" .
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
|
@ -0,0 +1,215 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate tar.gz package for all os system
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
curr_dir=$(pwd)
|
||||
compile_dir=$1
|
||||
version=$2
|
||||
build_time=$3
|
||||
cpuType=$4
|
||||
osType=$5
|
||||
verMode=$6
|
||||
verType=$7
|
||||
pagMode=$8
|
||||
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/../..)"
|
||||
|
||||
# create compressed install file.
|
||||
build_dir="${compile_dir}/build"
|
||||
code_dir="${top_dir}/src"
|
||||
release_dir="${top_dir}/release"
|
||||
|
||||
#package_name='linux'
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
install_dir="${release_dir}/PowerDB-enterprise-server"
|
||||
else
|
||||
install_dir="${release_dir}/PowerDB-server"
|
||||
fi
|
||||
|
||||
# Directories and files.
|
||||
#if [ "$pagMode" == "lite" ]; then
|
||||
# strip ${build_dir}/bin/taosd
|
||||
# strip ${build_dir}/bin/taos
|
||||
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh"
|
||||
#else
|
||||
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh"
|
||||
#fi
|
||||
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
|
||||
cfg_dir="${top_dir}/packaging/cfg"
|
||||
install_files="${script_dir}/install_power.sh"
|
||||
nginx_dir="${code_dir}/../../enterprise/src/plugins/web"
|
||||
|
||||
# Init file
|
||||
#init_dir=${script_dir}/deb
|
||||
#if [ $package_type = "centos" ]; then
|
||||
# init_dir=${script_dir}/rpm
|
||||
#fi
|
||||
#init_files=${init_dir}/powerd
|
||||
# temp use rpm's powerd. TODO: later modify according to os type
|
||||
init_file_deb=${script_dir}/../deb/powerd
|
||||
init_file_rpm=${script_dir}/../rpm/powerd
|
||||
init_file_tarbitrator_deb=${script_dir}/../deb/tarbitratord
|
||||
init_file_tarbitrator_rpm=${script_dir}/../rpm/tarbitratord
|
||||
|
||||
# make directories.
|
||||
mkdir -p ${install_dir}
|
||||
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
|
||||
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
|
||||
|
||||
#mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
|
||||
mkdir -p ${install_dir}/bin
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
strip ${build_dir}/bin/taosd
|
||||
strip ${build_dir}/bin/taos
|
||||
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${script_dir}/remove_power.sh"
|
||||
cp ${build_dir}/bin/taos ${install_dir}/bin/power
|
||||
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
|
||||
cp ${script_dir}/remove_power.sh ${install_dir}/bin
|
||||
else
|
||||
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh"
|
||||
cp ${build_dir}/bin/taos ${install_dir}/bin/power
|
||||
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
|
||||
cp ${script_dir}/remove_power.sh ${install_dir}/bin
|
||||
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
|
||||
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
|
||||
cp ${script_dir}/set_core.sh ${install_dir}/bin
|
||||
fi
|
||||
chmod a+x ${install_dir}/bin/* || :
|
||||
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/powerd.deb
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/powerd.rpm
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || :
|
||||
mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || :
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove_power.sh >> remove_power_temp.sh
|
||||
mv remove_power_temp.sh ${install_dir}/bin/remove_power.sh
|
||||
|
||||
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
|
||||
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
|
||||
rm -rf ${install_dir}/nginxd/png
|
||||
|
||||
sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/*.html
|
||||
sed -i "s/TDengine/PowerDB/g" ${install_dir}/nginxd/admin/js/*.js
|
||||
|
||||
sed -i '/dataDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
|
||||
sed -i '/logDir/ {s/taos/power/g}' ${install_dir}/cfg/taos.cfg
|
||||
sed -i "s/TDengine/PowerDB/g" ${install_dir}/cfg/taos.cfg
|
||||
|
||||
if [ "$cpuType" == "aarch64" ]; then
|
||||
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
|
||||
elif [ "$cpuType" == "aarch32" ]; then
|
||||
cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
|
||||
fi
|
||||
rm -rf ${install_dir}/nginxd/sbin/arm
|
||||
fi
|
||||
|
||||
cd ${install_dir}
|
||||
tar -zcv -f power.tar.gz * --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar power.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
cp ${install_files} ${install_dir}
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install_power.sh >> install_power_temp.sh
|
||||
mv install_power_temp.sh ${install_dir}/install_power.sh
|
||||
fi
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >> install_power_temp.sh
|
||||
mv install_power_temp.sh ${install_dir}/install_power.sh
|
||||
fi
|
||||
chmod a+x ${install_dir}/install_power.sh
|
||||
|
||||
# Copy example code
|
||||
mkdir -p ${install_dir}/examples
|
||||
examples_dir="${top_dir}/tests/examples"
|
||||
cp -r ${examples_dir}/c ${install_dir}/examples
|
||||
sed -i '/passwd/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
|
||||
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/c/*.c
|
||||
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp -r ${examples_dir}/JDBC ${install_dir}/examples
|
||||
cp -r ${examples_dir}/matlab ${install_dir}/examples
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/matlab/TDengineDemo.m
|
||||
cp -r ${examples_dir}/python ${install_dir}/examples
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/python/read_example.py
|
||||
cp -r ${examples_dir}/R ${install_dir}/examples
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/examples/R/command.txt
|
||||
cp -r ${examples_dir}/go ${install_dir}/examples
|
||||
sed -i '/root/ {s/taosdata/powerdb/g}' ${install_dir}/examples/go/src/taosapp/taosapp.go
|
||||
fi
|
||||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver
|
||||
cp ${lib_files} ${install_dir}/driver
|
||||
|
||||
# Copy connector
|
||||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
cp ${build_dir}/lib/*.jar ${install_dir}/connector
|
||||
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
|
||||
cp -r ${connector_dir}/python ${install_dir}/connector/
|
||||
cp -r ${connector_dir}/go ${install_dir}/connector
|
||||
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
|
||||
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
|
||||
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
|
||||
|
||||
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
|
||||
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
|
||||
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
|
||||
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
|
||||
fi
|
||||
# Copy release note
|
||||
# cp ${script_dir}/release_note ${install_dir}
|
||||
|
||||
# exit 1
|
||||
|
||||
cd ${release_dir}
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
|
||||
elif [ "$verMode" == "edge" ]; then
|
||||
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
|
||||
else
|
||||
echo "unknow verMode, nor cluster or edge"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$pagMode" == "lite" ]; then
|
||||
pkg_name=${pkg_name}-Lite
|
||||
fi
|
||||
|
||||
if [ "$verType" == "beta" ]; then
|
||||
pkg_name=${pkg_name}-${verType}
|
||||
elif [ "$verType" == "stable" ]; then
|
||||
pkg_name=${pkg_name}
|
||||
else
|
||||
echo "unknow verType, nor stabel or beta"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
|
||||
exitcode=$?
|
||||
if [ "$exitcode" != "0" ]; then
|
||||
echo "tar ${pkg_name}.tar.gz error !!!"
|
||||
exit $exitcode
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
|
@ -102,26 +102,22 @@ function clean_log() {
|
|||
|
||||
function clean_service_on_systemd() {
|
||||
taosd_service_config="${service_config_dir}/${taos_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${taos_service_name}; then
|
||||
echo "TDengine taosd is running, stopping it..."
|
||||
${csudo} systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo} rm -f ${taosd_service_config}
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${tarbitrator_service_name}; then
|
||||
echo "TDengine tarbitrator is running, stopping it..."
|
||||
${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo} rm -f ${tarbitratord_service_config}
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
|
||||
if [ -d ${bin_dir}/web ]; then
|
||||
if systemctl is-active --quiet ${nginx_service_name}; then
|
||||
|
@ -129,7 +125,6 @@ function clean_service_on_systemd() {
|
|||
${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo} rm -f ${nginx_service_config}
|
||||
fi
|
||||
fi
|
||||
|
@ -227,3 +222,4 @@ elif echo $osinfo | grep -qwi "centos" ; then
|
|||
fi
|
||||
|
||||
echo -e "${GREEN}TDengine is removed successfully!${NC}"
|
||||
echo
|
|
@ -0,0 +1,130 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the service and uninstall PowerDB's arbitrator
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/tarbitrator"
|
||||
bin_link_dir="/usr/bin"
|
||||
#inc_link_dir="/usr/include"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
tarbitrator_service_name="tarbitratord"
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo"
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo} kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo} rm -rf /arbitrator.log || :
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
|
||||
|
||||
if systemctl is-active --quiet ${tarbitrator_service_name}; then
|
||||
echo "PowerDB tarbitrator is running, stopping it..."
|
||||
${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo} rm -f ${tarbitratord_service_config}
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
if pidof tarbitrator &> /dev/null; then
|
||||
echo "PowerDB's tarbitrator is running, stopping it..."
|
||||
${csudo} service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==2)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==3)); then
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo} rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo} init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
# must manual stop
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop service and disable booting start.
|
||||
clean_service
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
##clean_header
|
||||
# Remove log file
|
||||
clean_log
|
||||
|
||||
${csudo} rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}PowerDB's arbitrator is removed successfully!${NC}"
|
||||
echo
|
|
@ -39,6 +39,7 @@ function clean_bin() {
|
|||
${csudo} rm -f ${bin_link_dir}/taos || :
|
||||
${csudo} rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo} rm -f ${bin_link_dir}/rmtaos || :
|
||||
${csudo} rm -f ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
|
@ -80,3 +81,4 @@ clean_config
|
|||
${csudo} rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}TDengine client is removed successfully!${NC}"
|
||||
echo
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the client and uninstall database, but retain the config and log files.
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/power"
|
||||
|
||||
log_link_dir="/usr/local/power/log"
|
||||
cfg_link_dir="/usr/local/power/cfg"
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
|
||||
|
||||
# v1.5 jar dir
|
||||
#v15_java_app_dir="/usr/local/lib/power"
|
||||
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo"
|
||||
fi
|
||||
|
||||
function kill_client() {
|
||||
#pid=$(ps -ef | grep "power" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$(pidof power)" ]; then
|
||||
${csudo} kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${bin_link_dir}/power || :
|
||||
${csudo} rm -f ${bin_link_dir}/powerdemo || :
|
||||
${csudo} rm -f ${bin_link_dir}/rmpower || :
|
||||
${csudo} rm -f ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo} rm -rf ${v15_java_app_dir} || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_config() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${cfg_link_dir}/* || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo} rm -rf ${log_link_dir} || :
|
||||
}
|
||||
|
||||
# Stop client.
|
||||
kill_client
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
clean_header
|
||||
# Remove lib file
|
||||
clean_lib
|
||||
# Remove link log directory
|
||||
clean_log
|
||||
# Remove link configuration file
|
||||
clean_config
|
||||
|
||||
${csudo} rm -rf ${install_main_dir}
|
||||
|
||||
echo -e "${GREEN}PowerDB client is removed successfully!${NC}"
|
||||
echo
|
|
@ -0,0 +1,226 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to stop the service and uninstall TDengine, but retain the config, data and log files.
|
||||
|
||||
set -e
|
||||
#set -x
|
||||
|
||||
verMode=edge
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
#install main path
|
||||
install_main_dir="/usr/local/power"
|
||||
data_link_dir="/usr/local/power/data"
|
||||
log_link_dir="/usr/local/power/log"
|
||||
cfg_link_dir="/usr/local/power/cfg"
|
||||
bin_link_dir="/usr/bin"
|
||||
lib_link_dir="/usr/lib"
|
||||
lib64_link_dir="/usr/lib64"
|
||||
inc_link_dir="/usr/include"
|
||||
install_nginxd_dir="/usr/local/nginxd"
|
||||
|
||||
# v1.5 jar dir
|
||||
#v15_java_app_dir="/usr/local/lib/power"
|
||||
|
||||
service_config_dir="/etc/systemd/system"
|
||||
power_service_name="powerd"
|
||||
tarbitrator_service_name="tarbitratord"
|
||||
nginx_service_name="nginxd"
|
||||
csudo=""
|
||||
if command -v sudo > /dev/null; then
|
||||
csudo="sudo"
|
||||
fi
|
||||
|
||||
initd_mod=0
|
||||
service_mod=2
|
||||
if pidof systemd &> /dev/null; then
|
||||
service_mod=0
|
||||
elif $(which service &> /dev/null); then
|
||||
service_mod=1
|
||||
service_config_dir="/etc/init.d"
|
||||
if $(which chkconfig &> /dev/null); then
|
||||
initd_mod=1
|
||||
elif $(which insserv &> /dev/null); then
|
||||
initd_mod=2
|
||||
elif $(which update-rc.d &> /dev/null); then
|
||||
initd_mod=3
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
else
|
||||
service_mod=2
|
||||
fi
|
||||
|
||||
function kill_powerd() {
|
||||
pid=$(ps -ef | grep "powerd" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo} kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function kill_tarbitrator() {
|
||||
pid=$(ps -ef | grep "tarbitrator" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo} kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
function clean_bin() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${bin_link_dir}/power || :
|
||||
${csudo} rm -f ${bin_link_dir}/powerd || :
|
||||
${csudo} rm -f ${bin_link_dir}/powerdemo || :
|
||||
${csudo} rm -f ${bin_link_dir}/rmpower || :
|
||||
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
|
||||
${csudo} rm -f ${bin_link_dir}/set_core || :
|
||||
}
|
||||
|
||||
function clean_lib() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
#${csudo} rm -rf ${v15_java_app_dir} || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${inc_link_dir}/taos.h || :
|
||||
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
|
||||
}
|
||||
|
||||
function clean_config() {
|
||||
# Remove link
|
||||
${csudo} rm -f ${cfg_link_dir}/* || :
|
||||
}
|
||||
|
||||
function clean_log() {
|
||||
# Remove link
|
||||
${csudo} rm -rf ${log_link_dir} || :
|
||||
}
|
||||
|
||||
function clean_service_on_systemd() {
|
||||
power_service_config="${service_config_dir}/${power_service_name}.service"
|
||||
if systemctl is-active --quiet ${power_service_name}; then
|
||||
echo "PowerDB powerd is running, stopping it..."
|
||||
${csudo} systemctl stop ${power_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable ${power_service_name} &> /dev/null || echo &> /dev/null
|
||||
${csudo} rm -f ${power_service_config}
|
||||
|
||||
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
|
||||
if systemctl is-active --quiet ${tarbitrator_service_name}; then
|
||||
echo "TDengine tarbitrator is running, stopping it..."
|
||||
${csudo} systemctl stop ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable ${tarbitrator_service_name} &> /dev/null || echo &> /dev/null
|
||||
${csudo} rm -f ${tarbitratord_service_config}
|
||||
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
|
||||
if [ -d ${bin_dir}/web ]; then
|
||||
if systemctl is-active --quiet ${nginx_service_name}; then
|
||||
echo "Nginx for TDengine is running, stopping it..."
|
||||
${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
|
||||
fi
|
||||
${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
|
||||
|
||||
${csudo} rm -f ${nginx_service_config}
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service_on_sysvinit() {
|
||||
#restart_config_str="power:2345:respawn:${service_config_dir}/powerd start"
|
||||
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
|
||||
|
||||
if pidof powerd &> /dev/null; then
|
||||
echo "PowerDB powerd is running, stopping it..."
|
||||
${csudo} service powerd stop || :
|
||||
fi
|
||||
|
||||
if pidof tarbitrator &> /dev/null; then
|
||||
echo "PowerDB tarbitrator is running, stopping it..."
|
||||
${csudo} service tarbitratord stop || :
|
||||
fi
|
||||
|
||||
if ((${initd_mod}==1)); then
|
||||
if [ -e ${service_config_dir}/powerd ]; then
|
||||
${csudo} chkconfig --del powerd || :
|
||||
fi
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} chkconfig --del tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==2)); then
|
||||
if [ -e ${service_config_dir}/powerd ]; then
|
||||
${csudo} insserv -r powerd || :
|
||||
fi
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} insserv -r tarbitratord || :
|
||||
fi
|
||||
elif ((${initd_mod}==3)); then
|
||||
if [ -e ${service_config_dir}/powerd ]; then
|
||||
${csudo} update-rc.d -f powerd remove || :
|
||||
fi
|
||||
if [ -e ${service_config_dir}/tarbitratord ]; then
|
||||
${csudo} update-rc.d -f tarbitratord remove || :
|
||||
fi
|
||||
fi
|
||||
|
||||
${csudo} rm -f ${service_config_dir}/powerd || :
|
||||
${csudo} rm -f ${service_config_dir}/tarbitratord || :
|
||||
|
||||
if $(which init &> /dev/null); then
|
||||
${csudo} init q || :
|
||||
fi
|
||||
}
|
||||
|
||||
function clean_service() {
|
||||
if ((${service_mod}==0)); then
|
||||
clean_service_on_systemd
|
||||
elif ((${service_mod}==1)); then
|
||||
clean_service_on_sysvinit
|
||||
else
|
||||
# must manual stop taosd
|
||||
kill_powerd
|
||||
kill_tarbitrator
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop service and disable booting start.
|
||||
clean_service
|
||||
# Remove binary file and links
|
||||
clean_bin
|
||||
# Remove header file.
|
||||
clean_header
|
||||
# Remove lib file
|
||||
clean_lib
|
||||
# Remove link log directory
|
||||
clean_log
|
||||
# Remove link configuration file
|
||||
clean_config
|
||||
# Remove data link directory
|
||||
${csudo} rm -rf ${data_link_dir} || :
|
||||
|
||||
${csudo} rm -rf ${install_main_dir}
|
||||
${csudo} rm -rf ${install_nginxd_dir}
|
||||
if [[ -e /etc/os-release ]]; then
|
||||
osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
|
||||
else
|
||||
osinfo=""
|
||||
fi
|
||||
|
||||
#if echo $osinfo | grep -qwi "ubuntu" ; then
|
||||
## echo "this is ubuntu system"
|
||||
# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
|
||||
#elif echo $osinfo | grep -qwi "debian" ; then
|
||||
## echo "this is debian system"
|
||||
# ${csudo} rm -f /var/lib/dpkg/info/tdengine* || :
|
||||
#elif echo $osinfo | grep -qwi "centos" ; then
|
||||
## echo "this is centos system"
|
||||
# ${csudo} rpm -e --noscripts tdengine || :
|
||||
#fi
|
||||
|
||||
echo -e "${GREEN}PowerDB is removed successfully!${NC}"
|
||||
echo
|
|
@ -5,7 +5,9 @@ if [ ! -d /var/lib/taos ]; then
|
|||
fi
|
||||
|
||||
if [ ! -d /var/log/taos ]; then
|
||||
mkdir -p -m777 /var/log/taos
|
||||
mkdir -p --mode=777 /var/log/taos
|
||||
else
|
||||
chmod 777 /var/log/taos
|
||||
fi
|
||||
|
||||
if [ ! -d /etc/taos ]; then
|
||||
|
@ -13,5 +15,8 @@ if [ ! -d /etc/taos ]; then
|
|||
fi
|
||||
|
||||
if [ ! -f /etc/taos/taos.cfg ]; then
|
||||
if [ ! -d /etc/taos ]; then
|
||||
mkdir -p /etc/taos
|
||||
fi
|
||||
cp $SNAP/etc/taos/taos.cfg /etc/taos/taos.cfg
|
||||
fi
|
||||
|
|
|
@ -15,11 +15,12 @@ case "$SNAP_USER_COMMON" in
|
|||
*) COMMON=$SNAP_USER_COMMON ;;
|
||||
esac
|
||||
|
||||
if [ -d /etc/taos ]; then
|
||||
CONFIG_FILE="/etc/taos"
|
||||
else
|
||||
CONFIG_FILE="$SNAP/etc/taos"
|
||||
if [ ! -f $SNAP_DATA/etc/taos/taos.cfg ]; then
|
||||
if [ ! -d $SNAP_DATA/etc/taos ]; then
|
||||
mkdir -p $SNAP_DATA/etc/taos
|
||||
fi
|
||||
cp $SNAP/etc/taos/taos.cfg $SNAP_DATA/etc/taos
|
||||
fi
|
||||
|
||||
# Launch the snap
|
||||
$SNAP/usr/bin/taosd -c $CONFIG_FILE $@
|
||||
$SNAP/usr/bin/taosd -c /etc/taos $@
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
name: tdengine
|
||||
base: core18 # the base snap is the execution environment for this snap
|
||||
version: '2.0.0.6' # just for humans, typically '1.2+git' or '1.3.2'
|
||||
version: '2.0.2.0' # just for humans, typically '1.2+git' or '1.3.2'
|
||||
icon: snap/gui/t-dengine.svg
|
||||
summary: an open-source big data platform designed and optimized for IoT.
|
||||
description: |
|
||||
TDengine is an open-source big data platform designed and optimized for Internet of Things (IoT), Connected Vehicles, and Industrial IoT. Besides the 10x faster time-series database, it provides caching, stream computing, message queuing and other functionalities to reduce the complexity and costs of development and operations.
|
||||
|
||||
grade: stable
|
||||
confinement: classic
|
||||
confinement: strict
|
||||
|
||||
apps:
|
||||
tdengine:
|
||||
|
@ -24,7 +24,9 @@ apps:
|
|||
command: taoswrapper.sh
|
||||
plugs:
|
||||
- network
|
||||
- system-observe
|
||||
- systemfiles
|
||||
- historyfile
|
||||
|
||||
taosdemo:
|
||||
command: usr/bin/taosdemo
|
||||
|
@ -32,11 +34,19 @@ apps:
|
|||
- network
|
||||
|
||||
plugs:
|
||||
historyfile:
|
||||
interface: personal-files
|
||||
read:
|
||||
- $HOME/.taos_history
|
||||
write:
|
||||
- $HOME/.taos_history
|
||||
|
||||
systemfiles:
|
||||
interface: system-files
|
||||
read:
|
||||
- /etc/taos
|
||||
- /var/lib/taos
|
||||
- /var/log/taos
|
||||
- /tmp
|
||||
write:
|
||||
- /var/log/taos
|
||||
|
@ -77,7 +87,7 @@ parts:
|
|||
mkdir -p $SNAPCRAFT_STAGE/var/lib/taos
|
||||
fi
|
||||
if [ ! -d $SNAPCRAFT_STAGE/var/log/taos ]; then
|
||||
mkdir -p $SNAPCRAFT_STAGE/var/log/taos
|
||||
mkdir -p --mode=777 $SNAPCRAFT_STAGE/var/log/taos
|
||||
fi
|
||||
|
||||
prime:
|
||||
|
@ -85,16 +95,16 @@ parts:
|
|||
- usr/bin/taosd
|
||||
- usr/bin/taos
|
||||
- usr/bin/taosdemo
|
||||
- usr/lib/libtaos.so.2.0.0.6
|
||||
- usr/lib/libtaos.so.2.0.2.0
|
||||
- usr/lib/libtaos.so.1
|
||||
- usr/lib/libtaos.so
|
||||
|
||||
override-prime: |
|
||||
snapcraftctl prime
|
||||
if [ ! -d $SNAPCRAFT_STAGE/var/lib/taos ]; then
|
||||
if [ ! -d $SNAPCRAFT_PRIME/var/lib/taos ]; then
|
||||
cp -rf $SNAPCRAFT_STAGE/var/lib/taos $SNAPCRAFT_PRIME
|
||||
fi
|
||||
if [ ! -d $SNAPCRAFT_STAGE/var/log/taos ]; then
|
||||
if [ ! -d $SNAPCRAFT_PRIME/var/log/taos ]; then
|
||||
cp -rf $SNAPCRAFT_STAGE/var/log/taos $SNAPCRAFT_PRIME
|
||||
fi
|
||||
|
||||
|
@ -103,11 +113,10 @@ layout:
|
|||
bind: $SNAP_DATA/var/lib/taos
|
||||
/var/log/taos:
|
||||
bind: $SNAP_DATA/var/log/taos
|
||||
/etc/taos/taos.cfg:
|
||||
bind-file: $SNAP_DATA/etc/taos/taos.cfg
|
||||
/etc/taos:
|
||||
bind: $SNAP_DATA/etc/taos
|
||||
|
||||
|
||||
hooks:
|
||||
install:
|
||||
plugs: [systemfiles]
|
||||
|
||||
plugs: [systemfiles, historyfile]
|
||||
|
|
|
@ -23,12 +23,8 @@ IF (TD_LINUX)
|
|||
#set version of .so
|
||||
#VERSION so version
|
||||
#SOVERSION api version
|
||||
execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh)
|
||||
execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c
|
||||
OUTPUT_VARIABLE
|
||||
VERSION_INFO)
|
||||
MESSAGE(STATUS "build version ${VERSION_INFO}")
|
||||
SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1)
|
||||
#MESSAGE(STATUS "build version ${TD_VER_NUMBER}")
|
||||
SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1)
|
||||
|
||||
ADD_SUBDIRECTORY(tests)
|
||||
|
||||
|
@ -65,11 +61,7 @@ ELSEIF (TD_DARWIN)
|
|||
#set version of .so
|
||||
#VERSION so version
|
||||
#SOVERSION api version
|
||||
execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh)
|
||||
execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c
|
||||
OUTPUT_VARIABLE
|
||||
VERSION_INFO)
|
||||
MESSAGE(STATUS "build version ${VERSION_INFO}")
|
||||
SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1)
|
||||
#MESSAGE(STATUS "build version ${TD_VER_NUMBER}")
|
||||
SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1)
|
||||
ENDIF ()
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code);
|
|||
|
||||
SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, int32_t index);
|
||||
|
||||
int32_t tscHandleMasterJoinQuery(SSqlObj* pSql);
|
||||
void tscHandleMasterJoinQuery(SSqlObj* pSql);
|
||||
|
||||
int32_t tscHandleMasterSTableQuery(SSqlObj *pSql);
|
||||
|
||||
|
|
|
@ -39,7 +39,6 @@ extern "C" {
|
|||
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\
|
||||
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
|
||||
|
||||
#define TSDB_COL_IS_TAG(f) (((f)&TSDB_COL_TAG) != 0)
|
||||
|
||||
typedef struct SParsedColElem {
|
||||
int16_t colIndex;
|
||||
|
@ -70,6 +69,8 @@ typedef struct SJoinSupporter {
|
|||
SSubqueryState* pState;
|
||||
SSqlObj* pObj; // parent SqlObj
|
||||
int32_t subqueryIndex; // index of sub query
|
||||
char intervalTimeUnit;
|
||||
char slidingTimeUnit;
|
||||
int64_t intervalTime; // interval time
|
||||
int64_t slidingTime; // sliding time
|
||||
SLimitVal limit; // limit info
|
||||
|
@ -186,7 +187,7 @@ SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functi
|
|||
size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo);
|
||||
|
||||
SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index);
|
||||
void tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy);
|
||||
int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy);
|
||||
void tscSqlExprInfoDestroy(SArray* pExprInfo);
|
||||
|
||||
SColumn* tscColumnClone(const SColumn* src);
|
||||
|
@ -204,7 +205,7 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t
|
|||
SCond* tsGetSTableQueryCond(STagCond* pCond, uint64_t uid);
|
||||
void tsSetSTableQueryCond(STagCond* pTagCond, uint64_t uid, SBufferWriter* bw);
|
||||
|
||||
void tscTagCondCopy(STagCond* dest, const STagCond* src);
|
||||
int32_t tscTagCondCopy(STagCond* dest, const STagCond* src);
|
||||
void tscTagCondRelease(STagCond* pCond);
|
||||
|
||||
void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo);
|
||||
|
|
|
@ -229,8 +229,9 @@ typedef struct STableDataBlocks {
|
|||
|
||||
typedef struct SQueryInfo {
|
||||
int16_t command; // the command may be different for each subclause, so keep it seperately.
|
||||
uint32_t type; // query/insert type
|
||||
char intervalTimeUnit;
|
||||
char slidingTimeUnit;
|
||||
uint32_t type; // query/insert type
|
||||
STimeWindow window; // query time window
|
||||
int64_t intervalTime; // aggregation time interval
|
||||
int64_t slidingTime; // sliding window in mseconds
|
||||
|
@ -366,6 +367,8 @@ typedef struct SSqlStream {
|
|||
uint32_t streamId;
|
||||
char listed;
|
||||
bool isProject;
|
||||
char intervalTimeUnit;
|
||||
char slidingTimeUnit;
|
||||
int16_t precision;
|
||||
int64_t num; // number of computing count
|
||||
|
||||
|
@ -379,7 +382,7 @@ typedef struct SSqlStream {
|
|||
int64_t ctime; // stream created time
|
||||
int64_t stime; // stream next executed time
|
||||
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
|
||||
int64_t interval;
|
||||
int64_t intervalTime;
|
||||
int64_t slidingTime;
|
||||
void * pTimer;
|
||||
|
||||
|
@ -455,6 +458,7 @@ bool tscResultsetFetchCompleted(TAOS_RES *result);
|
|||
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
|
||||
|
||||
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
|
||||
int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* sql);
|
||||
|
||||
int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo);
|
||||
|
||||
|
@ -468,7 +472,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
|
|||
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
|
||||
|
||||
// user defined constant value output columns
|
||||
if (pInfo->pSqlExpr->colInfo.flag == TSDB_COL_UDC) {
|
||||
if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
|
||||
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
|
||||
pData = pInfo->pSqlExpr->param[1].pz;
|
||||
pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
|
||||
|
|
|
@ -50,7 +50,8 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
|
|||
pSql->sqlstr = calloc(1, sqlLen + 1);
|
||||
if (pSql->sqlstr == NULL) {
|
||||
tscError("%p failed to malloc sql string buffer", pSql);
|
||||
tscQueueAsyncError(pSql->fp, pSql->param, TSDB_CODE_TSC_OUT_OF_MEMORY);
|
||||
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
tscQueueAsyncRes(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -94,7 +95,6 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
|
|||
SSqlObj *pSql = (SSqlObj *)calloc(1, sizeof(SSqlObj));
|
||||
if (pSql == NULL) {
|
||||
tscError("failed to malloc sqlObj");
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_OUT_OF_MEMORY);
|
||||
return;
|
||||
}
|
||||
|
@ -191,7 +191,7 @@ void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRo
|
|||
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchSingleRowProxy);
|
||||
}
|
||||
|
||||
void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), void *param) {
|
||||
void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
|
||||
SSqlObj *pSql = (SSqlObj *)taosa;
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
tscError("sql object is NULL");
|
||||
|
@ -209,6 +209,8 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi
|
|||
if (pRes->qhandle == 0) {
|
||||
tscError("qhandle is NULL");
|
||||
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
|
||||
pSql->param = param;
|
||||
|
||||
tscQueueAsyncRes(pSql);
|
||||
return;
|
||||
}
|
||||
|
@ -269,7 +271,10 @@ void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
|
|||
|
||||
if (pRes->qhandle == 0) {
|
||||
tscError("qhandle is NULL");
|
||||
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_INVALID_QHANDLE);
|
||||
pSql->param = param;
|
||||
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
|
||||
|
||||
tscQueueAsyncRes(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -352,36 +357,17 @@ void tscProcessFetchRow(SSchedMsg *pMsg) {
|
|||
|
||||
void tscProcessAsyncRes(SSchedMsg *pMsg) {
|
||||
SSqlObj *pSql = (SSqlObj *)pMsg->ahandle;
|
||||
// SSqlCmd *pCmd = &pSql->cmd;
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
|
||||
// void *taosres = pSql;
|
||||
|
||||
// pCmd may be released, so cache pCmd->command
|
||||
// int cmd = pCmd->command;
|
||||
// int code = pRes->code;
|
||||
|
||||
// in case of async insert, restore the user specified callback function
|
||||
// bool shouldFree = tscShouldBeFreed(pSql);
|
||||
|
||||
// if (pCmd->command == TSDB_SQL_INSERT) {
|
||||
// assert(pSql->fp != NULL);
|
||||
assert(pSql->fp != NULL && pSql->fetchFp != NULL);
|
||||
// }
|
||||
|
||||
// if (pSql->fp) {
|
||||
pSql->fp = pSql->fetchFp;
|
||||
(*pSql->fp)(pSql->param, pSql, pRes->code);
|
||||
// }
|
||||
|
||||
// if (shouldFree) {
|
||||
// tscDebug("%p sqlObj is automatically freed in async res", pSql);
|
||||
// tscFreeSqlObj(pSql);
|
||||
// }
|
||||
}
|
||||
|
||||
// this function will be executed by queue task threads, so the terrno is not valid
|
||||
static void tscProcessAsyncError(SSchedMsg *pMsg) {
|
||||
void (*fp)() = pMsg->ahandle;
|
||||
terrno = *(int32_t*) pMsg->msg;
|
||||
(*fp)(pMsg->thandle, NULL, *(int32_t*)pMsg->msg);
|
||||
}
|
||||
|
||||
|
|
|
@ -1648,9 +1648,10 @@ static void last_function(SQLFunctionCtx *pCtx) {
|
|||
for (int32_t i = pCtx->size - 1; i >= 0; --i) {
|
||||
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
|
||||
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
|
||||
if (!pCtx->requireNull) {
|
||||
continue;
|
||||
}
|
||||
|
||||
}
|
||||
memcpy(pCtx->aOutputBuf, data, pCtx->inputBytes);
|
||||
|
||||
TSKEY ts = pCtx->ptsList[i];
|
||||
|
@ -1721,8 +1722,10 @@ static void last_dist_function(SQLFunctionCtx *pCtx) {
|
|||
for (int32_t i = pCtx->size - 1; i >= 0; --i) {
|
||||
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
|
||||
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
|
||||
if (!pCtx->requireNull) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
last_data_assign_impl(pCtx, data, i);
|
||||
|
||||
|
@ -2422,24 +2425,14 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) {
|
|||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
static bool percentile_function_setup(SQLFunctionCtx *pCtx) {
|
||||
const int32_t MAX_AVAILABLE_BUFFER_SIZE = 1 << 20; // 1MB
|
||||
const int32_t NUMOFCOLS = 1;
|
||||
|
||||
if (!function_setup(pCtx)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
SSchema field[1] = { { (uint8_t)pCtx->inputType, "dummyCol", 0, pCtx->inputBytes } };
|
||||
|
||||
SColumnModel *pModel = createColumnModel(field, 1, 1000);
|
||||
int32_t orderIdx = 0;
|
||||
|
||||
// tOrderDesc object
|
||||
tOrderDescriptor *pDesc = tOrderDesCreate(&orderIdx, NUMOFCOLS, pModel, TSDB_ORDER_DESC);
|
||||
|
||||
((SPercentileInfo *)(pResInfo->interResultBuf))->pMemBucket =
|
||||
tMemBucketCreate(1024, MAX_AVAILABLE_BUFFER_SIZE, pCtx->inputBytes, pCtx->inputType, pDesc);
|
||||
tMemBucketCreate(pCtx->inputBytes, pCtx->inputType);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -2485,15 +2478,13 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
|
|||
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
tMemBucket * pMemBucket = ((SPercentileInfo *)pResInfo->interResultBuf)->pMemBucket;
|
||||
|
||||
if (pMemBucket->numOfElems > 0) { // check for null
|
||||
if (pMemBucket->total > 0) { // check for null
|
||||
*(double *)pCtx->aOutputBuf = getPercentile(pMemBucket, v);
|
||||
} else {
|
||||
setNull(pCtx->aOutputBuf, pCtx->outputType, pCtx->outputBytes);
|
||||
}
|
||||
|
||||
tOrderDescDestroy(pMemBucket->pOrderDesc);
|
||||
tMemBucketDestroy(pMemBucket);
|
||||
|
||||
doFinalizer(pCtx);
|
||||
}
|
||||
|
||||
|
|
|
@ -274,7 +274,7 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
|
|||
return tscSetValueToResObj(pSql, rowLen);
|
||||
}
|
||||
|
||||
static void tscProcessCurrentUser(SSqlObj *pSql) {
|
||||
static int32_t tscProcessCurrentUser(SSqlObj *pSql) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
|
||||
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
|
||||
|
@ -282,14 +282,20 @@ static void tscProcessCurrentUser(SSqlObj *pSql) {
|
|||
pExpr->resType = TSDB_DATA_TYPE_BINARY;
|
||||
|
||||
char* vx = calloc(1, pExpr->resBytes);
|
||||
if (vx == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
size_t size = sizeof(pSql->pTscObj->user);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(vx, pSql->pTscObj->user, size);
|
||||
|
||||
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
|
||||
free(vx);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void tscProcessCurrentDB(SSqlObj *pSql) {
|
||||
static int32_t tscProcessCurrentDB(SSqlObj *pSql) {
|
||||
char db[TSDB_DB_NAME_LEN] = {0};
|
||||
extractDBName(pSql->pTscObj->db, db);
|
||||
|
||||
|
@ -302,6 +308,10 @@ static void tscProcessCurrentDB(SSqlObj *pSql) {
|
|||
pExpr->resBytes = TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE;
|
||||
|
||||
char* vx = calloc(1, pExpr->resBytes);
|
||||
if (vx == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
if (t == 0) {
|
||||
setVardataNull(vx, TSDB_DATA_TYPE_BINARY);
|
||||
} else {
|
||||
|
@ -310,9 +320,11 @@ static void tscProcessCurrentDB(SSqlObj *pSql) {
|
|||
|
||||
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
|
||||
free(vx);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void tscProcessServerVer(SSqlObj *pSql) {
|
||||
static int32_t tscProcessServerVer(SSqlObj *pSql) {
|
||||
const char* v = pSql->pTscObj->sversion;
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
|
||||
|
||||
|
@ -323,13 +335,18 @@ static void tscProcessServerVer(SSqlObj *pSql) {
|
|||
pExpr->resBytes = (int16_t)(t + VARSTR_HEADER_SIZE);
|
||||
|
||||
char* vx = calloc(1, pExpr->resBytes);
|
||||
if (vx == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
STR_WITH_SIZE_TO_VARSTR(vx, v, (VarDataLenT)t);
|
||||
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
|
||||
|
||||
taosTFree(vx);
|
||||
free(vx);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void tscProcessClientVer(SSqlObj *pSql) {
|
||||
static int32_t tscProcessClientVer(SSqlObj *pSql) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
|
||||
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
|
||||
|
@ -339,23 +356,28 @@ static void tscProcessClientVer(SSqlObj *pSql) {
|
|||
pExpr->resBytes = (int16_t)(t + VARSTR_HEADER_SIZE);
|
||||
|
||||
char* v = calloc(1, pExpr->resBytes);
|
||||
if (v == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
STR_WITH_SIZE_TO_VARSTR(v, version, (VarDataLenT)t);
|
||||
tscSetLocalQueryResult(pSql, v, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
|
||||
|
||||
taosTFree(v);
|
||||
free(v);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void tscProcessServStatus(SSqlObj *pSql) {
|
||||
static int32_t tscProcessServStatus(SSqlObj *pSql) {
|
||||
STscObj* pObj = pSql->pTscObj;
|
||||
|
||||
if (pObj->pHb != NULL) {
|
||||
if (pObj->pHb->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
|
||||
pSql->res.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
|
||||
return;
|
||||
return pSql->res.code;
|
||||
}
|
||||
} else {
|
||||
if (pSql->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
|
||||
return;
|
||||
return pSql->res.code;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -364,6 +386,7 @@ static void tscProcessServStatus(SSqlObj *pSql) {
|
|||
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
|
||||
int32_t val = 1;
|
||||
tscSetLocalQueryResult(pSql, (char*) &val, pExpr->aliasName, TSDB_DATA_TYPE_INT, sizeof(int32_t));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength) {
|
||||
|
@ -393,37 +416,39 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
|
|||
|
||||
int tscProcessLocalCmd(SSqlObj *pSql) {
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
|
||||
if (pCmd->command == TSDB_SQL_CFG_LOCAL) {
|
||||
pSql->res.code = (uint8_t)taosCfgDynamicOptions(pCmd->payload);
|
||||
pRes->code = (uint8_t)taosCfgDynamicOptions(pCmd->payload);
|
||||
} else if (pCmd->command == TSDB_SQL_DESCRIBE_TABLE) {
|
||||
pSql->res.code = (uint8_t)tscProcessDescribeTable(pSql);
|
||||
pRes->code = (uint8_t)tscProcessDescribeTable(pSql);
|
||||
} else if (pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
|
||||
/*
|
||||
* set the qhandle to be 1 in order to pass the qhandle check, and to call partial release function to
|
||||
* free allocated resources and remove the SqlObj from sql query linked list
|
||||
*/
|
||||
pSql->res.qhandle = 0x1;
|
||||
pSql->res.numOfRows = 0;
|
||||
pRes->qhandle = 0x1;
|
||||
pRes->numOfRows = 0;
|
||||
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
|
||||
taosCacheEmpty(tscCacheHandle);
|
||||
pRes->code = TSDB_CODE_SUCCESS;
|
||||
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
|
||||
tscProcessServerVer(pSql);
|
||||
pRes->code = tscProcessServerVer(pSql);
|
||||
} else if (pCmd->command == TSDB_SQL_CLI_VERSION) {
|
||||
tscProcessClientVer(pSql);
|
||||
pRes->code = tscProcessClientVer(pSql);
|
||||
} else if (pCmd->command == TSDB_SQL_CURRENT_USER) {
|
||||
tscProcessCurrentUser(pSql);
|
||||
pRes->code = tscProcessCurrentUser(pSql);
|
||||
} else if (pCmd->command == TSDB_SQL_CURRENT_DB) {
|
||||
tscProcessCurrentDB(pSql);
|
||||
pRes->code = tscProcessCurrentDB(pSql);
|
||||
} else if (pCmd->command == TSDB_SQL_SERV_STATUS) {
|
||||
tscProcessServStatus(pSql);
|
||||
pRes->code = tscProcessServStatus(pSql);
|
||||
} else {
|
||||
pSql->res.code = TSDB_CODE_TSC_INVALID_SQL;
|
||||
pRes->code = TSDB_CODE_TSC_INVALID_SQL;
|
||||
tscError("%p not support command:%d", pSql, pCmd->command);
|
||||
}
|
||||
|
||||
// keep the code in local variable in order to avoid invalid read in case of async query
|
||||
int32_t code = pSql->res.code;
|
||||
int32_t code = pRes->code;
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
(*pSql->fp)(pSql->param, pSql, code);
|
||||
} else {
|
||||
|
|
|
@ -67,8 +67,7 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc
|
|||
SQLFunctionCtx *pCtx = &pReducer->pCtx[i];
|
||||
SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||
|
||||
pCtx->aOutputBuf =
|
||||
pReducer->pResultBuf->data + pExpr->offset * pReducer->resColModel->capacity;
|
||||
pCtx->aOutputBuf = pReducer->pResultBuf->data + pExpr->offset * pReducer->resColModel->capacity;
|
||||
pCtx->order = pQueryInfo->order.order;
|
||||
pCtx->functionId = pExpr->functionId;
|
||||
|
||||
|
@ -160,7 +159,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
|
||||
if (pMemBuffer == NULL) {
|
||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
||||
|
||||
tscError("%p pMemBuffer is NULL", pMemBuffer);
|
||||
pRes->code = TSDB_CODE_TSC_APP_ERROR;
|
||||
return;
|
||||
|
@ -168,7 +166,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
|
||||
if (pDesc->pColumnModel == NULL) {
|
||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
||||
|
||||
tscError("%p no local buffer or intermediate result format model", pSql);
|
||||
pRes->code = TSDB_CODE_TSC_APP_ERROR;
|
||||
return;
|
||||
|
@ -188,7 +185,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
if (numOfFlush == 0 || numOfBuffer == 0) {
|
||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
||||
tscDebug("%p retrieved no data", pSql);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -279,6 +275,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
taosTFree(pReducer);
|
||||
return;
|
||||
}
|
||||
|
||||
param->pLocalData = pReducer->pLocalDataSrc;
|
||||
param->pDesc = pReducer->pDesc;
|
||||
param->num = pReducer->pLocalDataSrc[0]->pMemBuffer->numOfElemsPerPage;
|
||||
|
|
|
@ -180,7 +180,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload,
|
|||
} else if (strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0) {
|
||||
*(uint8_t *)payload = TSDB_DATA_BOOL_NULL;
|
||||
} else {
|
||||
return tscInvalidSQLErrMsg(msg, "invalid bool data", pToken->z);
|
||||
return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z);
|
||||
}
|
||||
} else if (pToken->type == TK_INTEGER) {
|
||||
iv = strtoll(pToken->z, NULL, 10);
|
||||
|
@ -439,8 +439,8 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
|
|||
int16_t type = sToken.type;
|
||||
if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL &&
|
||||
type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) {
|
||||
tscInvalidSQLErrMsg(error, "invalid data or symbol", sToken.z);
|
||||
*code = TSDB_CODE_TSC_INVALID_SQL;
|
||||
tscSQLSyntaxErrMsg(error, "invalid data or symbol", sToken.z);
|
||||
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -472,7 +472,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
|
|||
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
|
||||
int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, error, str, isPrimaryKey, timePrec);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
*code = TSDB_CODE_TSC_INVALID_SQL;
|
||||
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
return -1; // NOTE: here 0 mean error!
|
||||
}
|
||||
|
||||
|
@ -568,8 +568,8 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
|
|||
sToken = tStrGetToken(*str, &index, false, 0, NULL);
|
||||
*str += index;
|
||||
if (sToken.n == 0 || sToken.type != TK_RP) {
|
||||
tscInvalidSQLErrMsg(error, ") expected", *str);
|
||||
*code = TSDB_CODE_TSC_INVALID_SQL;
|
||||
tscSQLSyntaxErrMsg(error, ") expected", *str);
|
||||
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -578,7 +578,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
|
|||
|
||||
if (numOfRows <= 0) {
|
||||
strcpy(error, "no any data points");
|
||||
*code = TSDB_CODE_TSC_INVALID_SQL;
|
||||
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
return -1;
|
||||
} else {
|
||||
return numOfRows;
|
||||
|
@ -943,7 +943,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
|||
sToken = tStrGetToken(sql, &index, false, 0, NULL);
|
||||
sql += index;
|
||||
if (sToken.n == 0 || sToken.type != TK_RP) {
|
||||
return tscInvalidSQLErrMsg(pCmd->payload, ") expected", sToken.z);
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
|
||||
}
|
||||
|
||||
pCmd->payloadLen = sizeof(pTag->name) + sizeof(pTag->dataLen) + pTag->dataLen;
|
||||
|
|
|
@ -259,11 +259,11 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
|
|||
pSdesc->num = htobe64(pStream->num);
|
||||
|
||||
pSdesc->useconds = htobe64(pStream->useconds);
|
||||
pSdesc->stime = htobe64(pStream->stime - pStream->interval);
|
||||
pSdesc->stime = htobe64(pStream->stime - pStream->intervalTime);
|
||||
pSdesc->ctime = htobe64(pStream->ctime);
|
||||
|
||||
pSdesc->slidingTime = htobe64(pStream->slidingTime);
|
||||
pSdesc->interval = htobe64(pStream->interval);
|
||||
pSdesc->interval = htobe64(pStream->intervalTime);
|
||||
|
||||
pHeartbeat->numOfStreams++;
|
||||
pSdesc++;
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
|
||||
#define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0"
|
||||
|
||||
#define TSWINDOW_IS_EQUAL(t1, t2) (((t1).skey == (t2).skey) && ((t1).ekey == (t2).ekey))
|
||||
|
||||
// -1 is tbname column index, so here use the -3 as the initial value
|
||||
#define COLUMN_INDEX_INITIAL_VAL (-3)
|
||||
#define COLUMN_INDEX_INITIALIZER \
|
||||
|
@ -45,6 +47,10 @@ typedef struct SColumnList { // todo refactor
|
|||
SColumnIndex ids[TSDB_MAX_COLUMNS];
|
||||
} SColumnList;
|
||||
|
||||
typedef struct SConvertFunc {
|
||||
int32_t originFuncId;
|
||||
int32_t execFuncId;
|
||||
} SConvertFunc;
|
||||
static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIndex, int32_t tableIndex);
|
||||
|
||||
static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo);
|
||||
|
@ -184,7 +190,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (!pInfo->valid) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), pInfo->pzErrMsg);
|
||||
return tscSQLSyntaxErrMsg(tscGetErrorMsgPayload(pCmd), NULL, pInfo->pzErrMsg);
|
||||
}
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
|
||||
|
@ -587,22 +593,21 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
|
|||
|
||||
// interval is not null
|
||||
SStrToken* t = &pQuerySql->interval;
|
||||
if (getTimestampInUsFromStr(t->z, t->n, &pQueryInfo->intervalTime) != TSDB_CODE_SUCCESS) {
|
||||
if (parseDuration(t->z, t->n, &pQueryInfo->intervalTime, &pQueryInfo->intervalTimeUnit) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
|
||||
// if the unit of time window value is millisecond, change the value from microsecond
|
||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
||||
pQueryInfo->intervalTime = pQueryInfo->intervalTime / 1000;
|
||||
}
|
||||
|
||||
/* parser has filter the illegal type, no need to check here */
|
||||
pQueryInfo->slidingTimeUnit = pQuerySql->interval.z[pQuerySql->interval.n - 1];
|
||||
|
||||
// interval cannot be less than 10 milliseconds
|
||||
if (pQueryInfo->intervalTime < tsMinIntervalTime) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
}
|
||||
|
||||
// for top/bottom + interval query, we do not add additional timestamp column in the front
|
||||
if (isTopBottomQuery(pQueryInfo)) {
|
||||
|
@ -666,6 +671,7 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
|
|||
const char* msg0 = "sliding value too small";
|
||||
const char* msg1 = "sliding value no larger than the interval value";
|
||||
const char* msg2 = "sliding value can not less than 1% of interval value";
|
||||
const char* msg3 = "does not support sliding when interval is natual month/year";
|
||||
|
||||
const static int32_t INTERVAL_SLIDING_FACTOR = 100;
|
||||
|
||||
|
@ -673,7 +679,16 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
|
|||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||
|
||||
SStrToken* pSliding = &pQuerySql->sliding;
|
||||
if (pSliding->n != 0) {
|
||||
if (pSliding->n == 0) {
|
||||
pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
|
||||
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pQueryInfo->intervalTimeUnit == 'n' || pQueryInfo->intervalTimeUnit == 'y') {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
|
||||
getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->slidingTime);
|
||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
||||
pQueryInfo->slidingTime /= 1000;
|
||||
|
@ -686,9 +701,6 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
|
|||
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
} else {
|
||||
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
|
||||
}
|
||||
|
||||
if ((pQueryInfo->intervalTime != 0) && (pQueryInfo->intervalTime/pQueryInfo->slidingTime > INTERVAL_SLIDING_FACTOR)) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
|
@ -1501,13 +1513,13 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, int32_t functionID, char* aliasName,
|
||||
static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, SConvertFunc cvtFunc, char* aliasName,
|
||||
int32_t resColIdx, SColumnIndex* pColIndex) {
|
||||
int16_t type = 0;
|
||||
int16_t bytes = 0;
|
||||
|
||||
char columnName[TSDB_COL_NAME_LEN] = {0};
|
||||
const char* msg1 = "not support column types";
|
||||
int32_t functionID = cvtFunc.execFuncId;
|
||||
|
||||
if (functionID == TSDB_FUNC_SPREAD) {
|
||||
if (pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BINARY ||
|
||||
|
@ -1527,12 +1539,17 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
|
|||
if (aliasName != NULL) {
|
||||
tstrncpy(columnName, aliasName, sizeof(columnName));
|
||||
} else {
|
||||
getRevisedName(columnName, functionID, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name);
|
||||
getRevisedName(columnName, cvtFunc.originFuncId, sizeof(columnName) - 1, pSchema[pColIndex->columnIndex].name);
|
||||
}
|
||||
|
||||
|
||||
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, bytes, false);
|
||||
tstrncpy(pExpr->aliasName, columnName, sizeof(pExpr->aliasName));
|
||||
|
||||
if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != functionID) {
|
||||
pExpr->colInfo.flag |= TSDB_COL_NULL;
|
||||
}
|
||||
|
||||
// set reverse order scan data blocks for last query
|
||||
if (functionID == TSDB_FUNC_LAST) {
|
||||
pExpr->numOfParams = 1;
|
||||
|
@ -1766,7 +1783,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
|
||||
}
|
||||
|
||||
SConvertFunc cvtFunc = {.originFuncId = functionID, .execFuncId = functionID};
|
||||
if (functionID == TSDB_FUNC_LAST_ROW && TSWINDOW_IS_EQUAL(pQueryInfo->window,TSWINDOW_INITIALIZER)) {
|
||||
cvtFunc.execFuncId = TSDB_FUNC_LAST;
|
||||
}
|
||||
if (!requireAllFields) {
|
||||
if (pItem->pNode->pParam->nExpr < 1) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
|
@ -1798,7 +1818,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
|
||||
for (int32_t j = 0; j < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++j) {
|
||||
index.columnIndex = j;
|
||||
if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex++, &index) != 0) {
|
||||
if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex++, &index) != 0) {
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
}
|
||||
|
@ -1815,8 +1835,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
if ((index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (index.columnIndex < 0)) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||
}
|
||||
|
||||
if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i, &index) != 0) {
|
||||
if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex + i, &index) != 0) {
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
|
@ -1853,7 +1872,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
|
||||
for (int32_t i = 0; i < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++i) {
|
||||
SColumnIndex index = {.tableIndex = j, .columnIndex = i};
|
||||
if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex, &index) != 0) {
|
||||
if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, cvtFunc, pItem->aliasName, colIndex, &index) != 0) {
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
|
@ -4675,7 +4694,9 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
|||
const char* msg0 = "sample interval can not be less than 10ms.";
|
||||
const char* msg1 = "functions not allowed in select clause";
|
||||
|
||||
if (pQueryInfo->intervalTime != 0 && pQueryInfo->intervalTime < 10) {
|
||||
if (pQueryInfo->intervalTime != 0 && pQueryInfo->intervalTime < 10 &&
|
||||
pQueryInfo->intervalTimeUnit != 'n' &&
|
||||
pQueryInfo->intervalTimeUnit != 'y') {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
|
||||
}
|
||||
|
||||
|
@ -5238,7 +5259,7 @@ static bool tagColumnInGroupby(SSqlGroupbyExpr* pGroupbyExpr, int16_t columnId)
|
|||
for (int32_t j = 0; j < pGroupbyExpr->numOfGroupCols; ++j) {
|
||||
SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, j);
|
||||
|
||||
if (columnId == pColIndex->colId && pColIndex->flag == TSDB_COL_TAG) {
|
||||
if (columnId == pColIndex->colId && TSDB_COL_IS_TAG(pColIndex->flag )) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -5537,7 +5558,6 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
|||
return checkUpdateTagPrjFunctions(pQueryInfo, pCmd);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
|
||||
const char* msg1 = "only one expression allowed";
|
||||
const char* msg2 = "invalid expression in select clause";
|
||||
|
@ -5807,22 +5827,34 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
|
||||
int32_t ret = TSDB_CODE_SUCCESS;
|
||||
for (int32_t i = 0; i < pList->nExpr; ++i) {
|
||||
SSchema* pSchema = pTagSchema + i;
|
||||
SSchema* pSchema = &pTagSchema[i];
|
||||
|
||||
char tagVal[TSDB_MAX_TAGS_LEN];
|
||||
if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
// validate the length of binary
|
||||
if (pList->a[i].pVar.nLen + VARSTR_HEADER_SIZE > pSchema->bytes) {
|
||||
if (pList->a[i].pVar.nLen > pSchema->bytes) {
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
}
|
||||
|
||||
char tagVal[TSDB_MAX_TAGS_LEN];
|
||||
ret = tVariantDump(&(pList->a[i].pVar), tagVal, pSchema->type, true);
|
||||
|
||||
// check again after the convert since it may be converted from binary to nchar.
|
||||
if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
int16_t len = varDataTLen(tagVal);
|
||||
if (len > pSchema->bytes) {
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
}
|
||||
|
||||
|
||||
|
||||
tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal);
|
||||
}
|
||||
|
||||
|
@ -6078,6 +6110,10 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
|
|||
}
|
||||
|
||||
int32_t joinQuery = (pQuerySql->from != NULL && pQuerySql->from->nExpr > 2);
|
||||
|
||||
if (pQuerySql->pWhere) {
|
||||
pQueryInfo->window = TSWINDOW_INITIALIZER;
|
||||
}
|
||||
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
@ -6161,7 +6197,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
|
|||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||
}
|
||||
|
||||
if (pQueryInfo->intervalTime > 0) {
|
||||
if (pQueryInfo->intervalTime > 0 && pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
|
||||
int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey);
|
||||
// number of result is not greater than 10,000,000
|
||||
if ((timeRange == 0) || (timeRange / pQueryInfo->intervalTime) > MAX_INTERVAL_TIME_WINDOW) {
|
||||
|
|
|
@ -226,17 +226,13 @@ int tscSendMsgToServer(SSqlObj *pSql) {
|
|||
.handle = &pSql->pRpcCtx,
|
||||
.code = 0
|
||||
};
|
||||
|
||||
// NOTE: the rpc context should be acquired before sending data to server.
|
||||
// Otherwise, the pSql object may have been released already during the response function, which is
|
||||
// processMsgFromServer function. In the meanwhile, the assignment of the rpc context to sql object will absolutely
|
||||
// cause crash.
|
||||
if (pObj != NULL && pObj->signature == pObj) {
|
||||
rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
//pObj->signature has been reset by other thread, ignore concurrency problem
|
||||
return TSDB_CODE_TSC_CONN_KILLED;
|
||||
}
|
||||
}
|
||||
|
||||
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
||||
|
@ -673,6 +669,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList));
|
||||
pQueryMsg->intervalTime = htobe64(pQueryInfo->intervalTime);
|
||||
pQueryMsg->slidingTime = htobe64(pQueryInfo->slidingTime);
|
||||
pQueryMsg->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
|
||||
pQueryMsg->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
|
||||
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
|
||||
pQueryMsg->numOfTags = htonl(numOfTags);
|
||||
|
@ -1495,8 +1492,7 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
char *tmpData = NULL;
|
||||
uint32_t len = pSql->cmd.payloadLen;
|
||||
if (len > 0) {
|
||||
tmpData = calloc(1, len);
|
||||
if (NULL == tmpData) {
|
||||
if ((tmpData = calloc(1, len)) == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -1541,8 +1537,7 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
// copy payload content to temp buff
|
||||
char *tmpData = 0;
|
||||
if (pCmd->payloadLen > 0) {
|
||||
tmpData = calloc(1, pCmd->payloadLen + 1);
|
||||
if (NULL == tmpData) return -1;
|
||||
if ((tmpData = calloc(1, pCmd->payloadLen + 1)) == NULL) return -1;
|
||||
memcpy(tmpData, pCmd->payload, pCmd->payloadLen);
|
||||
}
|
||||
|
||||
|
|
|
@ -597,11 +597,12 @@ int taos_errno(TAOS_RES *tres) {
|
|||
}
|
||||
|
||||
/*
|
||||
* In case of invalid sql error, additional information is attached to explain
|
||||
* In case of invalid sql/sql syntax error, additional information is attached to explain
|
||||
* why the sql is invalid
|
||||
*/
|
||||
static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd *pCmd) {
|
||||
if (code != TSDB_CODE_TSC_INVALID_SQL) {
|
||||
if (code != TSDB_CODE_TSC_INVALID_SQL
|
||||
&& code != TSDB_CODE_TSC_SQL_SYNTAX_ERROR) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -610,8 +611,10 @@ static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd *pCmd) {
|
|||
char *z = NULL;
|
||||
if (len > 0) {
|
||||
z = strstr(pCmd->payload, "invalid SQL");
|
||||
if (z == NULL) {
|
||||
z = strstr(pCmd->payload, "syntax error");
|
||||
}
|
||||
}
|
||||
|
||||
return z != NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -46,24 +46,25 @@ static bool isProjectStream(SQueryInfo* pQueryInfo) {
|
|||
return true;
|
||||
}
|
||||
|
||||
static int64_t tscGetRetryDelayTime(int64_t slidingTime, int16_t prec) {
|
||||
static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, int16_t prec) {
|
||||
float retryRangeFactor = 0.3f;
|
||||
int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor);
|
||||
retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L;
|
||||
|
||||
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
|
||||
// change to ms
|
||||
if (prec == TSDB_TIME_PRECISION_MICRO) {
|
||||
slidingTime = slidingTime / 1000;
|
||||
}
|
||||
|
||||
int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor);
|
||||
retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L;
|
||||
|
||||
if (slidingTime < retryDelta) {
|
||||
return slidingTime;
|
||||
} else {
|
||||
return retryDelta;
|
||||
}
|
||||
}
|
||||
|
||||
return retryDelta;
|
||||
}
|
||||
|
||||
static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
|
||||
SSqlStream *pStream = (SSqlStream *)pMsg->ahandle;
|
||||
SSqlObj * pSql = pStream->pSql;
|
||||
|
@ -86,7 +87,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
|
|||
|
||||
// failed to get meter/metric meta, retry in 10sec.
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
|
||||
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
|
||||
tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
|
||||
tscSetRetryTimer(pStream, pSql, retryDelayTime);
|
||||
|
||||
|
@ -131,13 +132,17 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
|||
}
|
||||
if (etime > pStream->etime) {
|
||||
etime = pStream->etime;
|
||||
} else if (pStream->intervalTimeUnit != 'y' && pStream->intervalTimeUnit != 'n') {
|
||||
etime = pStream->stime + (etime - pStream->stime) / pStream->intervalTime * pStream->intervalTime;
|
||||
} else {
|
||||
etime = pStream->stime + (etime - pStream->stime) / pStream->interval * pStream->interval;
|
||||
etime = taosGetIntervalStartTimestamp(etime, pStream->slidingTime, pStream->intervalTime, pStream->slidingTimeUnit, pStream->precision);
|
||||
}
|
||||
pQueryInfo->window.ekey = etime;
|
||||
if (pQueryInfo->window.skey >= pQueryInfo->window.ekey) {
|
||||
int64_t timer = pStream->slidingTime;
|
||||
if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
|
||||
timer = 86400 * 1000l;
|
||||
} else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
timer /= 1000l;
|
||||
}
|
||||
tscSetRetryTimer(pStream, pSql, timer);
|
||||
|
@ -157,7 +162,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
|||
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) {
|
||||
SSqlStream *pStream = (SSqlStream *)param;
|
||||
if (tres == NULL || numOfRows < 0) {
|
||||
int64_t retryDelay = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
|
||||
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
|
||||
tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
|
||||
retryDelay);
|
||||
|
||||
|
@ -218,7 +223,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
|||
SSqlObj * pSql = (SSqlObj *)res;
|
||||
|
||||
if (pSql == NULL || numOfRows < 0) {
|
||||
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
|
||||
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
|
||||
tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
|
||||
|
||||
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
|
||||
|
@ -241,8 +246,12 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
|||
}
|
||||
|
||||
if (!pStream->isProject) {
|
||||
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
|
||||
pStream->stime = taosAddNatualInterval(pStream->stime, pStream->slidingTime, pStream->slidingTimeUnit, pStream->precision);
|
||||
} else {
|
||||
pStream->stime += pStream->slidingTime;
|
||||
}
|
||||
}
|
||||
// actually only one row is returned. this following is not necessary
|
||||
taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream);
|
||||
} else { // numOfRows == 0, all data has been retrieved
|
||||
|
@ -301,7 +310,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
|
|||
now + timer, timer, delay, pStream->stime, etime);
|
||||
} else {
|
||||
tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
|
||||
pStream->stime, timer, delay, pStream->stime - pStream->interval, pStream->stime - 1);
|
||||
pStream->stime, timer, delay, pStream->stime - pStream->intervalTime, pStream->stime - 1);
|
||||
}
|
||||
|
||||
pSql->cmd.command = TSDB_SQL_SELECT;
|
||||
|
@ -311,23 +320,26 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
|
|||
}
|
||||
|
||||
static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
|
||||
int64_t delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
|
||||
|
||||
int64_t maxDelay =
|
||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
|
||||
|
||||
int64_t delayDelta = maxDelay;
|
||||
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
|
||||
delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
|
||||
if (delayDelta > maxDelay) {
|
||||
delayDelta = maxDelay;
|
||||
}
|
||||
|
||||
int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
|
||||
if (maxDelay > remainTimeWindow) {
|
||||
maxDelay = (int64_t)(remainTimeWindow / 1.5f);
|
||||
}
|
||||
}
|
||||
|
||||
int64_t currentDelay = (rand() % maxDelay); // a random number
|
||||
currentDelay += delayDelta;
|
||||
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
|
||||
assert(currentDelay < pStream->slidingTime);
|
||||
}
|
||||
|
||||
return currentDelay;
|
||||
}
|
||||
|
@ -354,7 +366,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
|
|||
return;
|
||||
}
|
||||
} else {
|
||||
if ((pStream->stime - pStream->interval) >= pStream->etime) {
|
||||
int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
|
||||
if (stime >= pStream->etime) {
|
||||
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
|
||||
pStream->stime, pStream->etime);
|
||||
// TODO : How to terminate stream here
|
||||
|
@ -387,24 +400,24 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
|||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
|
||||
if (pQueryInfo->intervalTime < minIntervalTime) {
|
||||
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->intervalTime < minIntervalTime) {
|
||||
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64, pSql, pStream,
|
||||
pQueryInfo->intervalTime, minIntervalTime);
|
||||
pQueryInfo->intervalTime = minIntervalTime;
|
||||
}
|
||||
|
||||
pStream->interval = pQueryInfo->intervalTime; // it shall be derived from sql string
|
||||
pStream->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
|
||||
pStream->intervalTime = pQueryInfo->intervalTime; // it shall be derived from sql string
|
||||
|
||||
if (pQueryInfo->slidingTime == 0) {
|
||||
if (pQueryInfo->slidingTime <= 0) {
|
||||
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
|
||||
pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
|
||||
}
|
||||
|
||||
int64_t minSlidingTime =
|
||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
|
||||
|
||||
if (pQueryInfo->slidingTime == -1) {
|
||||
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
|
||||
} else if (pQueryInfo->slidingTime < minSlidingTime) {
|
||||
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->slidingTime < minSlidingTime) {
|
||||
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
|
||||
pQueryInfo->slidingTime, minSlidingTime);
|
||||
|
||||
|
@ -418,6 +431,7 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
|||
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
|
||||
}
|
||||
|
||||
pStream->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
|
||||
pStream->slidingTime = pQueryInfo->slidingTime;
|
||||
|
||||
if (pStream->isProject) {
|
||||
|
@ -431,7 +445,7 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
|
|||
|
||||
if (pStream->isProject) {
|
||||
// no data in table, flush all data till now to destination meter, 10sec delay
|
||||
pStream->interval = tsProjectExecInterval;
|
||||
pStream->intervalTime = tsProjectExecInterval;
|
||||
pStream->slidingTime = tsProjectExecInterval;
|
||||
|
||||
if (stime != 0) { // first projection start from the latest event timestamp
|
||||
|
@ -442,11 +456,15 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
|
|||
}
|
||||
} else { // timewindow based aggregation stream
|
||||
if (stime == 0) { // no data in meter till now
|
||||
stime = ((int64_t)taosGetTimestamp(pStream->precision) / pStream->interval) * pStream->interval;
|
||||
stime -= pStream->interval;
|
||||
stime = pQueryInfo->window.skey;
|
||||
if (stime == INT64_MIN) {
|
||||
stime = (int64_t)taosGetTimestamp(pStream->precision);
|
||||
stime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
|
||||
stime = taosGetIntervalStartTimestamp(stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
|
||||
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
|
||||
}
|
||||
} else {
|
||||
int64_t newStime = (stime / pStream->interval) * pStream->interval;
|
||||
int64_t newStime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
|
||||
if (newStime != stime) {
|
||||
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
|
||||
stime = newStime;
|
||||
|
@ -516,7 +534,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
|||
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
|
||||
|
||||
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
|
||||
pStream, pTableMetaInfo->name, pStream->interval, pStream->slidingTime, starttime, pSql->sqlstr);
|
||||
pStream, pTableMetaInfo->name, pStream->intervalTime, pStream->slidingTime, starttime, pSql->sqlstr);
|
||||
}
|
||||
|
||||
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||
|
|
|
@ -178,6 +178,8 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, in
|
|||
pSupporter->subqueryIndex = index;
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
|
||||
|
||||
pSupporter->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
|
||||
pSupporter->slidingTime = pQueryInfo->slidingTimeUnit;
|
||||
pSupporter->intervalTime = pQueryInfo->intervalTime;
|
||||
pSupporter->slidingTime = pQueryInfo->slidingTime;
|
||||
pSupporter->limit = pQueryInfo->limit;
|
||||
|
@ -309,6 +311,8 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
|
|||
// set the second stage sub query for join process
|
||||
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE);
|
||||
|
||||
pQueryInfo->intervalTimeUnit = pSupporter->intervalTimeUnit;
|
||||
pQueryInfo->slidingTimeUnit = pSupporter->slidingTimeUnit;
|
||||
pQueryInfo->intervalTime = pSupporter->intervalTime;
|
||||
pQueryInfo->slidingTime = pSupporter->slidingTime;
|
||||
pQueryInfo->groupbyExpr = pSupporter->groupbyExpr;
|
||||
|
@ -570,8 +574,9 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar
|
|||
|
||||
SSchema* pColSchema = tscGetTableColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId);
|
||||
|
||||
*s1 = taosArrayInit(p1->num, p1->tagSize);
|
||||
*s2 = taosArrayInit(p2->num, p2->tagSize);
|
||||
// int16_t for padding
|
||||
*s1 = taosArrayInit(p1->num, p1->tagSize - sizeof(int16_t));
|
||||
*s2 = taosArrayInit(p2->num, p2->tagSize - sizeof(int16_t));
|
||||
|
||||
if (!(checkForDuplicateTagVal(pQueryInfo, p1, pParentSql) && checkForDuplicateTagVal(pQueryInfo, p2, pParentSql))) {
|
||||
return TSDB_CODE_QRY_DUP_JOIN_KEY;
|
||||
|
@ -1039,6 +1044,10 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
|
|||
|
||||
int32_t numOfExprs = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
|
||||
pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * numOfExprs);
|
||||
if (pRes->pColumnIndex == NULL) {
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||
|
@ -1153,7 +1162,8 @@ static void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code);
|
|||
|
||||
static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj);
|
||||
|
||||
int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter *pSupporter) {
|
||||
// TODO
|
||||
int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter *pSupporter) {
|
||||
SSqlCmd * pCmd = &pSql->cmd;
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
|
||||
|
@ -1199,7 +1209,9 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
|
|||
|
||||
// this data needs to be transfer to support struct
|
||||
memset(&pNewQueryInfo->fieldsInfo, 0, sizeof(SFieldInfo));
|
||||
tscTagCondCopy(&pSupporter->tagCond, &pNewQueryInfo->tagCond);//pNewQueryInfo->tagCond;
|
||||
if (tscTagCondCopy(&pSupporter->tagCond, &pNewQueryInfo->tagCond) != 0) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pNew->cmd.numOfCols = 0;
|
||||
pNewQueryInfo->intervalTime = 0;
|
||||
|
@ -1296,52 +1308,75 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
|
|||
pNewQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY;
|
||||
}
|
||||
|
||||
return tscProcessSql(pNew);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t tscHandleMasterJoinQuery(SSqlObj* pSql) {
|
||||
void tscHandleMasterJoinQuery(SSqlObj* pSql) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
SSqlRes* pRes = &pSql->res;
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
assert((pQueryInfo->type & TSDB_QUERY_TYPE_SUBQUERY) == 0);
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
// todo add test
|
||||
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
|
||||
if (pState == NULL) {
|
||||
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return pSql->res.code;
|
||||
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pState->numOfTotal = pQueryInfo->numOfTables;
|
||||
pState->numOfRemain = pState->numOfTotal;
|
||||
|
||||
bool hasEmptySub = false;
|
||||
|
||||
tscDebug("%p start subquery, total:%d", pSql, pQueryInfo->numOfTables);
|
||||
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
|
||||
SJoinSupporter *pSupporter = tscCreateJoinSupporter(pSql, pState, i);
|
||||
|
||||
if (pSupporter == NULL) { // failed to create support struct, abort current query
|
||||
tscError("%p tableIndex:%d, failed to allocate join support object, abort further query", pSql, i);
|
||||
pState->numOfRemain = i;
|
||||
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
if (0 == i) {
|
||||
taosTFree(pState);
|
||||
}
|
||||
return pSql->res.code;
|
||||
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
int32_t code = tscLaunchJoinSubquery(pSql, i, pSupporter);
|
||||
code = tscCreateJoinSubquery(pSql, i, pSupporter);
|
||||
if (code != TSDB_CODE_SUCCESS) { // failed to create subquery object, quit query
|
||||
tscDestroyJoinSupporter(pSupporter);
|
||||
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
if (0 == i) {
|
||||
taosTFree(pState);
|
||||
goto _error;
|
||||
}
|
||||
|
||||
SSqlObj* pSub = pSql->pSubs[i];
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSub->cmd, 0, 0);
|
||||
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) && (pTableMetaInfo->vgroupList->numOfVgroups == 0)) {
|
||||
hasEmptySub = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pSql->cmd.command = (pSql->numOfSubs <= 0)? TSDB_SQL_RETRIEVE_EMPTY_RESULT:TSDB_SQL_TABLE_JOIN_RETRIEVE;
|
||||
if (hasEmptySub) { // at least one subquery is empty, do nothing and return
|
||||
freeJoinSubqueryObj(pSql);
|
||||
pSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
|
||||
(*pSql->fp)(pSql->param, pSql, 0);
|
||||
} else {
|
||||
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
|
||||
SSqlObj* pSub = pSql->pSubs[i];
|
||||
if ((code = tscProcessSql(pSub)) != TSDB_CODE_SUCCESS) {
|
||||
pState->numOfRemain = i - 1; // the already sent reques will continue and do not go to the error process routine
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
pSql->cmd.command = TSDB_SQL_TABLE_JOIN_RETRIEVE;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
_error:
|
||||
pRes->code = code;
|
||||
tscQueueAsyncRes(pSql);
|
||||
}
|
||||
|
||||
static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs, SSubqueryState* pState) {
|
||||
|
@ -1398,6 +1433,17 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
|
||||
tscDebug("%p retrieved query data from %d vnode(s)", pSql, pSql->numOfSubs);
|
||||
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
|
||||
|
||||
if (pSql->pSubs == NULL || pState == NULL) {
|
||||
taosTFree(pState);
|
||||
taosTFree(pSql->pSubs);
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pSql->numOfSubs);
|
||||
|
||||
tscQueueAsyncRes(pSql);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pState->numOfTotal = pSql->numOfSubs;
|
||||
pState->numOfRemain = pSql->numOfSubs;
|
||||
|
||||
|
@ -2029,8 +2075,21 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) {
|
|||
numOfRes = (int32_t)(MIN(numOfRes, pSql->pSubs[i]->res.numOfRows));
|
||||
}
|
||||
|
||||
if (numOfRes == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t totalSize = tscGetResRowLength(pQueryInfo->exprList);
|
||||
pRes->pRsp = realloc(pRes->pRsp, numOfRes * totalSize);
|
||||
|
||||
assert(numOfRes * totalSize > 0);
|
||||
char* tmp = realloc(pRes->pRsp, numOfRes * totalSize);
|
||||
if (tmp == NULL) {
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return;
|
||||
} else {
|
||||
pRes->pRsp = tmp;
|
||||
}
|
||||
|
||||
pRes->data = pRes->pRsp;
|
||||
|
||||
char* data = pRes->data;
|
||||
|
@ -2069,6 +2128,12 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
|
|||
pRes->buffer = calloc(numOfExprs, POINTER_BYTES);
|
||||
pRes->length = calloc(numOfExprs, sizeof(int32_t));
|
||||
|
||||
if (pRes->tsrow == NULL || pRes->buffer == NULL || pRes->length == NULL) {
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
tscQueueAsyncRes(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
tscRestoreSQLFuncForSTableQuery(pQueryInfo);
|
||||
}
|
||||
|
||||
|
|
|
@ -254,15 +254,12 @@ int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
|
|||
pRes->numOfCols = numOfOutput;
|
||||
|
||||
pRes->tsrow = calloc(numOfOutput, POINTER_BYTES);
|
||||
pRes->length = calloc(numOfOutput, sizeof(int32_t)); // todo refactor
|
||||
pRes->length = calloc(numOfOutput, sizeof(int32_t));
|
||||
pRes->buffer = calloc(numOfOutput, POINTER_BYTES);
|
||||
|
||||
// not enough memory
|
||||
if (pRes->tsrow == NULL || (pRes->buffer == NULL && pRes->numOfCols > 0)) {
|
||||
taosTFree(pRes->tsrow);
|
||||
taosTFree(pRes->buffer);
|
||||
taosTFree(pRes->length);
|
||||
|
||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return pRes->code;
|
||||
}
|
||||
|
@ -281,12 +278,13 @@ void tscDestroyResPointerInfo(SSqlRes* pRes) {
|
|||
}
|
||||
|
||||
taosTFree(pRes->pRsp);
|
||||
|
||||
taosTFree(pRes->tsrow);
|
||||
taosTFree(pRes->length);
|
||||
taosTFree(pRes->buffer);
|
||||
|
||||
taosTFree(pRes->pGroupRec);
|
||||
taosTFree(pRes->pColumnIndex);
|
||||
taosTFree(pRes->buffer);
|
||||
|
||||
if (pRes->pArithSup != NULL) {
|
||||
taosTFree(pRes->pArithSup->data);
|
||||
|
@ -1052,7 +1050,7 @@ void tscSqlExprInfoDestroy(SArray* pExprInfo) {
|
|||
taosArrayDestroy(pExprInfo);
|
||||
}
|
||||
|
||||
void tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy) {
|
||||
int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy) {
|
||||
assert(src != NULL && dst != NULL);
|
||||
|
||||
size_t size = taosArrayGetSize(src);
|
||||
|
@ -1064,7 +1062,7 @@ void tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy)
|
|||
if (deepcopy) {
|
||||
SSqlExpr* p1 = calloc(1, sizeof(SSqlExpr));
|
||||
if (p1 == NULL) {
|
||||
assert(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
*p1 = *pExpr;
|
||||
|
@ -1078,6 +1076,8 @@ void tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
|
||||
|
@ -1324,11 +1324,14 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t
|
|||
return false;
|
||||
}
|
||||
|
||||
void tscTagCondCopy(STagCond* dest, const STagCond* src) {
|
||||
int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
|
||||
memset(dest, 0, sizeof(STagCond));
|
||||
|
||||
if (src->tbnameCond.cond != NULL) {
|
||||
dest->tbnameCond.cond = strdup(src->tbnameCond.cond);
|
||||
if (dest->tbnameCond.cond == NULL) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
dest->tbnameCond.uid = src->tbnameCond.uid;
|
||||
|
@ -1337,7 +1340,7 @@ void tscTagCondCopy(STagCond* dest, const STagCond* src) {
|
|||
dest->relType = src->relType;
|
||||
|
||||
if (src->pCond == NULL) {
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t s = taosArrayGetSize(src->pCond);
|
||||
|
@ -1354,7 +1357,7 @@ void tscTagCondCopy(STagCond* dest, const STagCond* src) {
|
|||
assert(pCond->cond != NULL);
|
||||
c.cond = malloc(c.len);
|
||||
if (c.cond == NULL) {
|
||||
assert(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(c.cond, pCond->cond, c.len);
|
||||
|
@ -1362,6 +1365,8 @@ void tscTagCondCopy(STagCond* dest, const STagCond* src) {
|
|||
|
||||
taosArrayPush(dest->pCond, &c);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tscTagCondRelease(STagCond* pTagCond) {
|
||||
|
@ -1830,6 +1835,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
|||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
|
||||
pNewQueryInfo->command = pQueryInfo->command;
|
||||
pNewQueryInfo->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
|
||||
pNewQueryInfo->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
|
||||
pNewQueryInfo->intervalTime = pQueryInfo->intervalTime;
|
||||
pNewQueryInfo->slidingTime = pQueryInfo->slidingTime;
|
||||
|
@ -1854,7 +1860,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
|||
}
|
||||
}
|
||||
|
||||
tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond);
|
||||
if (tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond) != 0) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
|
||||
pNewQueryInfo->fillVal = malloc(pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t));
|
||||
|
@ -1883,7 +1892,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
|
|||
}
|
||||
|
||||
uint64_t uid = pTableMetaInfo->pTableMeta->id.uid;
|
||||
tscSqlExprCopy(pNewQueryInfo->exprList, pQueryInfo->exprList, uid, true);
|
||||
if (tscSqlExprCopy(pNewQueryInfo->exprList, pQueryInfo->exprList, uid, true) != 0) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
doSetSqlExprAndResultFieldInfo(pQueryInfo, pNewQueryInfo, uid);
|
||||
|
||||
|
@ -2028,10 +2040,37 @@ bool tscIsUpdateQuery(SSqlObj* pSql) {
|
|||
return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || TSDB_SQL_USE_DB == pCmd->command);
|
||||
}
|
||||
|
||||
int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* sql) {
|
||||
const char* msgFormat1 = "syntax error near \'%s\'";
|
||||
const char* msgFormat2 = "syntax error near \'%s\' (%s)";
|
||||
const char* msgFormat3 = "%s";
|
||||
|
||||
const char* prefix = "syntax error";
|
||||
const int32_t BACKWARD_CHAR_STEP = 0;
|
||||
|
||||
if (sql == NULL) {
|
||||
assert(additionalInfo != NULL);
|
||||
sprintf(msg, msgFormat1, additionalInfo);
|
||||
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
}
|
||||
|
||||
char buf[64] = {0}; // only extract part of sql string
|
||||
strncpy(buf, (sql - BACKWARD_CHAR_STEP), tListLen(buf) - 1);
|
||||
|
||||
if (additionalInfo != NULL) {
|
||||
sprintf(msg, msgFormat2, buf, additionalInfo);
|
||||
} else {
|
||||
const char* msgFormat = (0 == strncmp(sql, prefix, strlen(prefix))) ? msgFormat3 : msgFormat1;
|
||||
sprintf(msg, msgFormat, buf);
|
||||
}
|
||||
|
||||
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
|
||||
}
|
||||
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
|
||||
const char* msgFormat1 = "invalid SQL: %s";
|
||||
const char* msgFormat2 = "invalid SQL: syntax error near \"%s\" (%s)";
|
||||
const char* msgFormat3 = "invalid SQL: syntax error near \"%s\"";
|
||||
const char* msgFormat2 = "invalid SQL: \'%s\' (%s)";
|
||||
const char* msgFormat3 = "invalid SQL: \'%s\'";
|
||||
|
||||
const int32_t BACKWARD_CHAR_STEP = 0;
|
||||
|
||||
|
|
|
@ -35,6 +35,8 @@ bool tscValidateTableNameLength(size_t len);
|
|||
|
||||
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
|
||||
|
||||
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision);
|
||||
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision);
|
||||
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
|
||||
|
||||
#endif // TDENGINE_NAME_H
|
||||
|
|
|
@ -100,13 +100,101 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
|
|||
return pFilter;
|
||||
}
|
||||
|
||||
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision) {
|
||||
key /= 1000;
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
key /= 1000;
|
||||
}
|
||||
|
||||
struct tm tm;
|
||||
time_t t = (time_t)key;
|
||||
localtime_r(&t, &tm);
|
||||
|
||||
if (timeUnit == 'y') {
|
||||
intervalTime *= 12;
|
||||
}
|
||||
|
||||
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + intervalTime);
|
||||
tm.tm_year = mon / 12;
|
||||
tm.tm_mon = mon % 12;
|
||||
|
||||
key = mktime(&tm) * 1000L;
|
||||
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
key *= 1000L;
|
||||
}
|
||||
|
||||
return key;
|
||||
}
|
||||
|
||||
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision) {
|
||||
skey /= 1000;
|
||||
ekey /= 1000;
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
skey /= 1000;
|
||||
ekey /= 1000;
|
||||
}
|
||||
if (ekey < skey) {
|
||||
int64_t tmp = ekey;
|
||||
ekey = skey;
|
||||
skey = tmp;
|
||||
}
|
||||
|
||||
struct tm tm;
|
||||
time_t t = (time_t)skey;
|
||||
localtime_r(&t, &tm);
|
||||
int smon = tm.tm_year * 12 + tm.tm_mon;
|
||||
|
||||
t = (time_t)ekey;
|
||||
localtime_r(&t, &tm);
|
||||
int emon = tm.tm_year * 12 + tm.tm_mon;
|
||||
|
||||
if (timeUnit == 'y') {
|
||||
intervalTime *= 12;
|
||||
}
|
||||
|
||||
return (emon - smon) / (int32_t)intervalTime;
|
||||
}
|
||||
|
||||
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
|
||||
if (slidingTime == 0) {
|
||||
return startTime;
|
||||
}
|
||||
int64_t start = startTime;
|
||||
if (timeUnit == 'n' || timeUnit == 'y') {
|
||||
start /= 1000;
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
start /= 1000;
|
||||
}
|
||||
struct tm tm;
|
||||
time_t t = (time_t)start;
|
||||
localtime_r(&t, &tm);
|
||||
tm.tm_sec = 0;
|
||||
tm.tm_min = 0;
|
||||
tm.tm_hour = 0;
|
||||
tm.tm_mday = 1;
|
||||
|
||||
int64_t start = ((startTime - intervalTime) / slidingTime + 1) * slidingTime;
|
||||
if (!(timeUnit == 'u' || timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h')) {
|
||||
if (timeUnit == 'y') {
|
||||
tm.tm_mon = 0;
|
||||
tm.tm_year = (int)(tm.tm_year / slidingTime * slidingTime);
|
||||
} else {
|
||||
int mon = tm.tm_year * 12 + tm.tm_mon;
|
||||
mon = (int)(mon / slidingTime * slidingTime);
|
||||
tm.tm_year = mon / 12;
|
||||
tm.tm_mon = mon % 12;
|
||||
}
|
||||
|
||||
start = mktime(&tm) * 1000L;
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
start *= 1000L;
|
||||
}
|
||||
} else {
|
||||
int64_t delta = startTime - intervalTime;
|
||||
int32_t factor = delta > 0? 1:-1;
|
||||
|
||||
start = (delta / slidingTime + factor) * slidingTime;
|
||||
|
||||
if (timeUnit == 'd' || timeUnit == 'w') {
|
||||
/*
|
||||
* here we revised the start time of day according to the local time zone,
|
||||
* but in case of DST, the start time of one day need to be dynamically decided.
|
||||
|
@ -127,6 +215,8 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, in
|
|||
if (end < startTime) {
|
||||
start += slidingTime;
|
||||
}
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -1,13 +0,0 @@
|
|||
Metadata-Version: 2.1
|
||||
Name: taos
|
||||
Version: 2.0.0
|
||||
Summary: TDengine python client package
|
||||
Home-page: https://github.com/pypa/sampleproject
|
||||
Author: Taosdata Inc.
|
||||
Author-email: support@taosdata.com
|
||||
License: UNKNOWN
|
||||
Description: # TDengine python client interface
|
||||
Platform: UNKNOWN
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Operating System :: Linux
|
||||
Description-Content-Type: text/markdown
|
|
@ -1,13 +0,0 @@
|
|||
README.md
|
||||
setup.py
|
||||
taos/__init__.py
|
||||
taos/cinterface.py
|
||||
taos/connection.py
|
||||
taos/constants.py
|
||||
taos/cursor.py
|
||||
taos/dbapi.py
|
||||
taos/error.py
|
||||
taos.egg-info/PKG-INFO
|
||||
taos.egg-info/SOURCES.txt
|
||||
taos.egg-info/dependency_links.txt
|
||||
taos.egg-info/top_level.txt
|
|
@ -1 +0,0 @@
|
|||
taos
|
Binary file not shown.
Binary file not shown.
|
@ -1,13 +0,0 @@
|
|||
Metadata-Version: 2.1
|
||||
Name: taos
|
||||
Version: 2.0.0
|
||||
Summary: TDengine python client package
|
||||
Home-page: https://github.com/pypa/sampleproject
|
||||
Author: Taosdata Inc.
|
||||
Author-email: support@taosdata.com
|
||||
License: UNKNOWN
|
||||
Description: # TDengine python client interface
|
||||
Platform: UNKNOWN
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Operating System :: Linux
|
||||
Description-Content-Type: text/markdown
|
|
@ -1,13 +0,0 @@
|
|||
README.md
|
||||
setup.py
|
||||
taos/__init__.py
|
||||
taos/cinterface.py
|
||||
taos/connection.py
|
||||
taos/constants.py
|
||||
taos/cursor.py
|
||||
taos/dbapi.py
|
||||
taos/error.py
|
||||
taos.egg-info/PKG-INFO
|
||||
taos.egg-info/SOURCES.txt
|
||||
taos.egg-info/dependency_links.txt
|
||||
taos.egg-info/top_level.txt
|
|
@ -1 +0,0 @@
|
|||
taos
|
|
@ -1,21 +0,0 @@
|
|||
from taos.cinterface import CTaosInterface
|
||||
from taos.error import *
|
||||
from taos.subscription import TDengineSubscription
|
||||
from taos.connection import TDengineConnection
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
conn = TDengineConnection(
|
||||
host="127.0.0.1", user="root", password="taosdata", database="test")
|
||||
|
||||
# Generate a cursor object to run SQL commands
|
||||
sub = conn.subscribe(False, "test", "select * from log0601;", 1000)
|
||||
|
||||
for i in range(100):
|
||||
print(i)
|
||||
data = sub.consume()
|
||||
for d in data:
|
||||
print(d)
|
||||
|
||||
sub.close()
|
||||
conn.close()
|
Binary file not shown.
Binary file not shown.
|
@ -1,13 +0,0 @@
|
|||
Metadata-Version: 2.1
|
||||
Name: taos
|
||||
Version: 2.0.0
|
||||
Summary: TDengine python client package
|
||||
Home-page: https://github.com/pypa/sampleproject
|
||||
Author: Taosdata Inc.
|
||||
Author-email: support@taosdata.com
|
||||
License: UNKNOWN
|
||||
Description: # TDengine python client interface
|
||||
Platform: UNKNOWN
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Operating System :: Windows
|
||||
Description-Content-Type: text/markdown
|
|
@ -1,13 +0,0 @@
|
|||
README.md
|
||||
setup.py
|
||||
taos/__init__.py
|
||||
taos/cinterface.py
|
||||
taos/connection.py
|
||||
taos/constants.py
|
||||
taos/cursor.py
|
||||
taos/dbapi.py
|
||||
taos/error.py
|
||||
taos.egg-info/PKG-INFO
|
||||
taos.egg-info/SOURCES.txt
|
||||
taos.egg-info/dependency_links.txt
|
||||
taos.egg-info/top_level.txt
|
|
@ -1 +0,0 @@
|
|||
taos
|
Binary file not shown.
Binary file not shown.
|
@ -1,13 +0,0 @@
|
|||
Metadata-Version: 2.1
|
||||
Name: taos
|
||||
Version: 2.0.0
|
||||
Summary: TDengine python client package
|
||||
Home-page: https://github.com/pypa/sampleproject
|
||||
Author: Hongze Cheng
|
||||
Author-email: hzcheng@taosdata.com
|
||||
License: UNKNOWN
|
||||
Description: # TDengine python client interface
|
||||
Platform: UNKNOWN
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Operating System :: Windows
|
||||
Description-Content-Type: text/markdown
|
|
@ -1,13 +0,0 @@
|
|||
README.md
|
||||
setup.py
|
||||
taos/__init__.py
|
||||
taos/cinterface.py
|
||||
taos/connection.py
|
||||
taos/constants.py
|
||||
taos/cursor.py
|
||||
taos/dbapi.py
|
||||
taos/error.py
|
||||
taos.egg-info/PKG-INFO
|
||||
taos.egg-info/SOURCES.txt
|
||||
taos.egg-info/dependency_links.txt
|
||||
taos.egg-info/top_level.txt
|
|
@ -1 +0,0 @@
|
|||
taos
|
|
@ -85,7 +85,11 @@ extern const int32_t TYPE_BYTES[11];
|
|||
#define TSDB_DATA_NULL_STR_L "null"
|
||||
|
||||
#define TSDB_DEFAULT_USER "root"
|
||||
#ifdef _TD_POWER_
|
||||
#define TSDB_DEFAULT_PASS "powerdb"
|
||||
#else
|
||||
#define TSDB_DEFAULT_PASS "taosdata"
|
||||
#endif
|
||||
|
||||
#define TSDB_TRUE 1
|
||||
#define TSDB_FALSE 0
|
||||
|
|
|
@ -98,6 +98,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_ACTION_IN_PROGRESS, 0, 0x0212, "Action in
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DISCONNECTED, 0, 0x0213, "Disconnected from service")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_WRITE_AUTH, 0, 0x0214, "No write permission")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_CONN_KILLED, 0, 0x0215, "Connection killed")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SQL_SYNTAX_ERROR, 0, 0x0216, "Syntax errr in SQL")
|
||||
|
||||
// mnode
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, 0, 0x0300, "Message not processed")
|
||||
|
|
|
@ -170,6 +170,13 @@ enum _mgmt_table {
|
|||
#define TSDB_COL_NORMAL 0x0u // the normal column of the table
|
||||
#define TSDB_COL_TAG 0x1u // the tag column type
|
||||
#define TSDB_COL_UDC 0x2u // the user specified normal string column, it is a dummy column
|
||||
#define TSDB_COL_NULL 0x4u // the column filter NULL or not
|
||||
|
||||
#define TSDB_COL_IS_TAG(f) (((f&(~(TSDB_COL_NULL)))&TSDB_COL_TAG) != 0)
|
||||
#define TSDB_COL_IS_NORMAL_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_NORMAL)
|
||||
#define TSDB_COL_IS_UD_COL(f) ((f&(~(TSDB_COL_NULL))) == TSDB_COL_UDC)
|
||||
#define TSDB_COL_REQ_NULL(f) (((f)&TSDB_COL_NULL) != 0)
|
||||
|
||||
|
||||
extern char *taosMsg[];
|
||||
|
||||
|
@ -456,6 +463,7 @@ typedef struct {
|
|||
int64_t intervalTime; // time interval for aggregation, in million second
|
||||
int64_t intervalOffset; // start offset for interval query
|
||||
int64_t slidingTime; // value for sliding window
|
||||
char intervalTimeUnit;
|
||||
char slidingTimeUnit; // time interval type, for revisement of interval(1d)
|
||||
uint16_t tagCondLen; // tag length in current query
|
||||
int16_t numOfGroupCols; // num of group by columns
|
||||
|
|
|
@ -3,5 +3,3 @@ PROJECT(TDengine)
|
|||
|
||||
ADD_SUBDIRECTORY(shell)
|
||||
ADD_SUBDIRECTORY(taosdemo)
|
||||
#ADD_SUBDIRECTORY(taosClusterTest)
|
||||
ADD_SUBDIRECTORY(taosnetwork)
|
||||
|
|
|
@ -24,7 +24,12 @@ ELSEIF (TD_WINDOWS)
|
|||
LIST(APPEND SRC ./src/shellWindows.c)
|
||||
ADD_EXECUTABLE(shell ${SRC})
|
||||
TARGET_LINK_LIBRARIES(shell taos_static)
|
||||
|
||||
IF (TD_POWER)
|
||||
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME power)
|
||||
ELSE ()
|
||||
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN)
|
||||
LIST(APPEND SRC ./src/shellEngine.c)
|
||||
LIST(APPEND SRC ./src/shellMain.c)
|
||||
|
|
|
@ -50,6 +50,9 @@ typedef struct SShellArguments {
|
|||
char* commands;
|
||||
int abort;
|
||||
int port;
|
||||
int endPort;
|
||||
int pktLen;
|
||||
char* netTestRole;
|
||||
} SShellArguments;
|
||||
|
||||
/**************** Function declarations ****************/
|
||||
|
|
|
@ -30,11 +30,22 @@
|
|||
#include <regex.h>
|
||||
|
||||
/**************** Global variables ****************/
|
||||
#ifdef _TD_POWER_
|
||||
char CLIENT_VERSION[] = "Welcome to the PowerDB shell from %s, Client Version:%s\n"
|
||||
"Copyright (c) 2017 by PowerDB, Inc. All rights reserved.\n\n";
|
||||
char PROMPT_HEADER[] = "power> ";
|
||||
|
||||
char CONTINUE_PROMPT[] = " -> ";
|
||||
int prompt_size = 7;
|
||||
#else
|
||||
char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n"
|
||||
"Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n";
|
||||
char PROMPT_HEADER[] = "taos> ";
|
||||
|
||||
char CONTINUE_PROMPT[] = " -> ";
|
||||
int prompt_size = 6;
|
||||
#endif
|
||||
|
||||
TAOS_RES *result = NULL;
|
||||
SShellHistory history;
|
||||
|
||||
|
|
|
@ -46,6 +46,9 @@ static struct argp_option options[] = {
|
|||
{"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."},
|
||||
{"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."},
|
||||
{"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."},
|
||||
{"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is NULL, valid option: client | server."},
|
||||
{"endport", 'e', "ENDPORT", 0, "Net test end port, default is 6042."},
|
||||
{"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."},
|
||||
{0}};
|
||||
|
||||
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
||||
|
@ -65,6 +68,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
|||
case 'P':
|
||||
if (arg) {
|
||||
tsDnodeShellPort = atoi(arg);
|
||||
arguments->port = atoi(arg);
|
||||
} else {
|
||||
fprintf(stderr, "Invalid port\n");
|
||||
return -1;
|
||||
|
@ -126,6 +130,29 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
|||
case 'd':
|
||||
arguments->database = arg;
|
||||
break;
|
||||
|
||||
case 'n':
|
||||
arguments->netTestRole = arg;
|
||||
break;
|
||||
|
||||
case 'e':
|
||||
if (arg) {
|
||||
arguments->endPort = atoi(arg);
|
||||
} else {
|
||||
fprintf(stderr, "Invalid end port\n");
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
|
||||
case 'l':
|
||||
if (arg) {
|
||||
arguments->pktLen = atoi(arg);
|
||||
} else {
|
||||
fprintf(stderr, "Invalid packet length\n");
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
|
||||
case OPT_ABORT:
|
||||
arguments->abort = 1;
|
||||
break;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include "os.h"
|
||||
#include "shell.h"
|
||||
#include "tnettest.h"
|
||||
|
||||
pthread_t pid;
|
||||
|
||||
|
@ -60,7 +61,10 @@ SShellArguments args = {
|
|||
.file = "\0",
|
||||
.dir = "\0",
|
||||
.threadNum = 5,
|
||||
.commands = NULL
|
||||
.commands = NULL,
|
||||
.endPort = 6042,
|
||||
.pktLen = 1000,
|
||||
.netTestRole = NULL
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -75,6 +79,11 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
shellParseArgument(argc, argv, &args);
|
||||
|
||||
if (args.netTestRole && args.netTestRole[0] != 0) {
|
||||
taosNetTest(args.host, (uint16_t)args.port, (uint16_t)args.endPort, args.pktLen, args.netTestRole);
|
||||
exit(0);
|
||||
}
|
||||
|
||||
/* Initialize the shell */
|
||||
TAOS* con = shellInit(&args);
|
||||
if (con == NULL) {
|
||||
|
|
|
@ -89,7 +89,11 @@ typedef struct DemoArguments {
|
|||
{0, 'h', "host", 0, "The host to connect to TDengine. Default is localhost.", 0},
|
||||
{0, 'p', "port", 0, "The TCP/IP port number to use for the connection. Default is 0.", 1},
|
||||
{0, 'u', "user", 0, "The TDengine user name to use when connecting to the server. Default is 'root'.", 2},
|
||||
#ifdef _TD_POWER_
|
||||
{0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'powerdb'.", 3},
|
||||
#else
|
||||
{0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3},
|
||||
#endif
|
||||
{0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
|
||||
{0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 3},
|
||||
{0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3},
|
||||
|
@ -104,7 +108,11 @@ typedef struct DemoArguments {
|
|||
{0, 'r', "num_of_records_per_req", 0, "The number of records per request. Default is 1000.", 10},
|
||||
{0, 't', "num_of_tables", 0, "The number of tables. Default is 10000.", 11},
|
||||
{0, 'n', "num_of_records_per_table", 0, "The number of records per table. Default is 100000.", 12},
|
||||
#ifdef _TD_POWER_
|
||||
{0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/power/'.", 14},
|
||||
#else
|
||||
{0, 'c', "config_directory", 0, "Configuration directory. Default is '/etc/taos/'.", 14},
|
||||
#endif
|
||||
{0, 'x', 0, 0, "Insert only flag.", 13},
|
||||
{0, 'O', "order", 0, "Insert mode--0: In order, 1: Out of order. Default is in order.", 14},
|
||||
{0, 'R', "rate", 0, "Out of order data's rate--if order=1 Default 10, min: 0, max: 50.", 14},
|
||||
|
@ -279,7 +287,11 @@ typedef struct DemoArguments {
|
|||
printf("%s%s\n", indent, "-u");
|
||||
printf("%s%s%s\n", indent, indent, "user, The user name to use when connecting to the server. Default is 'root'.");
|
||||
printf("%s%s\n", indent, "-p");
|
||||
#ifdef _TD_POWER_
|
||||
printf("%s%s%s\n", indent, indent, "password, The password to use when connecting to the server. Default is 'powerdb'.");
|
||||
#else
|
||||
printf("%s%s%s\n", indent, indent, "password, The password to use when connecting to the server. Default is 'taosdata'.");
|
||||
#endif
|
||||
printf("%s%s\n", indent, "-d");
|
||||
printf("%s%s%s\n", indent, indent, "database, Destination database. Default is 'test'.");
|
||||
printf("%s%s\n", indent, "-a");
|
||||
|
@ -309,7 +321,11 @@ typedef struct DemoArguments {
|
|||
printf("%s%s\n", indent, "-n");
|
||||
printf("%s%s%s\n", indent, indent, "num_of_records_per_table, The number of records per table. Default is 100000.");
|
||||
printf("%s%s\n", indent, "-c");
|
||||
#ifdef _TD_POWER_
|
||||
printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/power/'.");
|
||||
#else
|
||||
printf("%s%s%s\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/taos/'.");
|
||||
#endif
|
||||
printf("%s%s\n", indent, "-x");
|
||||
printf("%s%s%s\n", indent, indent, "flag, Insert only flag.");
|
||||
printf("%s%s\n", indent, "-O");
|
||||
|
@ -513,7 +529,11 @@ int main(int argc, char *argv[]) {
|
|||
SDemoArguments arguments = { NULL, // host
|
||||
0, // port
|
||||
"root", // user
|
||||
#ifdef _TD_POWER_
|
||||
"powerdb", // password
|
||||
#else
|
||||
"taosdata", // password
|
||||
#endif
|
||||
"test", // database
|
||||
1, // replica
|
||||
"t", // tb_prefix
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
|
||||
PROJECT(TDengine)
|
||||
|
||||
IF (TD_LINUX)
|
||||
AUX_SOURCE_DIRECTORY(. SRC)
|
||||
ADD_EXECUTABLE(taosClient client.c)
|
||||
ADD_EXECUTABLE(taosServer server.c)
|
||||
TARGET_LINK_LIBRARIES( taosServer -lpthread -lm -lrt )
|
||||
TARGET_LINK_LIBRARIES( taosClient -lpthread -lm -lrt )
|
||||
ENDIF ()
|
|
@ -1,313 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <argp.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <netdb.h>
|
||||
#include <netinet/in.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include <wordexp.h>
|
||||
|
||||
#define MAX_PKG_LEN (64*1000)
|
||||
#define BUFFER_SIZE (MAX_PKG_LEN + 1024)
|
||||
#define TEST_FQDN_LEN 128
|
||||
#define TEST_IPv4ADDR_LEN 16
|
||||
|
||||
typedef struct {
|
||||
uint16_t port;
|
||||
uint32_t hostIp;
|
||||
char fqdn[TEST_FQDN_LEN];
|
||||
uint16_t pktLen;
|
||||
} info_s;
|
||||
|
||||
typedef struct Arguments {
|
||||
char host[TEST_IPv4ADDR_LEN];
|
||||
char fqdn[TEST_FQDN_LEN];
|
||||
uint16_t port;
|
||||
uint16_t max_port;
|
||||
uint16_t pktLen;
|
||||
} SArguments;
|
||||
|
||||
static struct argp_option options[] = {
|
||||
{0, 'h', "host ip", 0, "The host ip to connect to TDEngine. Default is localhost.", 0},
|
||||
{0, 'p', "port", 0, "The TCP or UDP port number to use for the connection. Default is 6030.", 1},
|
||||
{0, 'm', "max port", 0, "The max TCP or UDP port number to use for the connection. Default is 6042.", 2},
|
||||
{0, 'f', "host fqdn", 0, "The host fqdn to connect to TDEngine.", 3},
|
||||
{0, 'l', "test pkg len", 0, "The len of pkg for test. Default is 1000 Bytes, max not greater than 64k Bytes.\nNotes: This parameter must be consistent between the client and the server.", 3}};
|
||||
|
||||
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
||||
wordexp_t full_path;
|
||||
SArguments *arguments = state->input;
|
||||
switch (key) {
|
||||
case 'h':
|
||||
if (wordexp(arg, &full_path, 0) != 0) {
|
||||
fprintf(stderr, "Invalid host ip %s\n", arg);
|
||||
return -1;
|
||||
}
|
||||
strcpy(arguments->host, full_path.we_wordv[0]);
|
||||
wordfree(&full_path);
|
||||
break;
|
||||
case 'p':
|
||||
arguments->port = atoi(arg);
|
||||
break;
|
||||
case 'm':
|
||||
arguments->max_port = atoi(arg);
|
||||
break;
|
||||
case 'l':
|
||||
arguments->pktLen = atoi(arg);
|
||||
break;
|
||||
case 'f':
|
||||
if (wordexp(arg, &full_path, 0) != 0) {
|
||||
fprintf(stderr, "Invalid host fqdn %s\n", arg);
|
||||
return -1;
|
||||
}
|
||||
strcpy(arguments->fqdn, full_path.we_wordv[0]);
|
||||
wordfree(&full_path);
|
||||
break;
|
||||
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct argp argp = {options, parse_opt, 0, 0};
|
||||
|
||||
int checkTcpPort(info_s *info) {
|
||||
int clientSocket;
|
||||
|
||||
struct sockaddr_in serverAddr;
|
||||
char sendbuf[BUFFER_SIZE];
|
||||
char recvbuf[BUFFER_SIZE];
|
||||
int iDataNum = 0;
|
||||
if ((clientSocket = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
|
||||
printf("socket() fail: %s\n", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
// set send and recv overtime
|
||||
struct timeval timeout;
|
||||
timeout.tv_sec = 2; //s
|
||||
timeout.tv_usec = 0; //us
|
||||
if (setsockopt(clientSocket, SOL_SOCKET,SO_SNDTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
|
||||
perror("setsockopt send timer failed:");
|
||||
}
|
||||
if (setsockopt(clientSocket, SOL_SOCKET,SO_RCVTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
|
||||
perror("setsockopt recv timer failed:");
|
||||
}
|
||||
|
||||
serverAddr.sin_family = AF_INET;
|
||||
serverAddr.sin_port = htons(info->port);
|
||||
|
||||
serverAddr.sin_addr.s_addr = info->hostIp;
|
||||
|
||||
//printf("=================================\n");
|
||||
if (connect(clientSocket, (struct sockaddr *)&serverAddr, sizeof(serverAddr)) < 0) {
|
||||
printf("connect() fail: %s\t", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
//printf("Connect to: %s:%d...success\n", host, port);
|
||||
memset(sendbuf, 0, BUFFER_SIZE);
|
||||
memset(recvbuf, 0, BUFFER_SIZE);
|
||||
|
||||
struct in_addr ipStr;
|
||||
memcpy(&ipStr, &info->hostIp, 4);
|
||||
sprintf(sendbuf, "client send tcp pkg to %s:%d, content: 1122334455", inet_ntoa(ipStr), info->port);
|
||||
sprintf(sendbuf + info->pktLen - 16, "1122334455667788");
|
||||
|
||||
send(clientSocket, sendbuf, info->pktLen, 0);
|
||||
|
||||
memset(recvbuf, 0, BUFFER_SIZE);
|
||||
int nleft, nread;
|
||||
char *ptr = recvbuf;
|
||||
nleft = info->pktLen;
|
||||
while (nleft > 0) {
|
||||
nread = recv(clientSocket, ptr, BUFFER_SIZE, 0);;
|
||||
|
||||
if (nread == 0) {
|
||||
break;
|
||||
} else if (nread < 0) {
|
||||
if (errno == EINTR) {
|
||||
continue;
|
||||
} else {
|
||||
printf("recv ack pkg from TCP port: %d fail:%s.\n", info->port, strerror(errno));
|
||||
close(clientSocket);
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
nleft -= nread;
|
||||
ptr += nread;
|
||||
iDataNum += nread;
|
||||
}
|
||||
}
|
||||
|
||||
if (iDataNum < info->pktLen) {
|
||||
printf("recv ack pkg len: %d, less than req pkg len: %d from tcp port: %d\n", iDataNum, info->pktLen, info->port);
|
||||
return -1;
|
||||
}
|
||||
//printf("Read ack pkg len:%d from tcp port: %d, buffer: %s %s\n", info->pktLen, port, recvbuf, recvbuf+iDataNum-8);
|
||||
|
||||
close(clientSocket);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int checkUdpPort(info_s *info) {
|
||||
int clientSocket;
|
||||
|
||||
struct sockaddr_in serverAddr;
|
||||
char sendbuf[BUFFER_SIZE];
|
||||
char recvbuf[BUFFER_SIZE];
|
||||
int iDataNum = 0;
|
||||
if ((clientSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) {
|
||||
perror("socket");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// set overtime
|
||||
struct timeval timeout;
|
||||
timeout.tv_sec = 2; //s
|
||||
timeout.tv_usec = 0; //us
|
||||
if (setsockopt(clientSocket, SOL_SOCKET,SO_SNDTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
|
||||
perror("setsockopt send timer failed:");
|
||||
}
|
||||
if (setsockopt(clientSocket, SOL_SOCKET,SO_RCVTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) {
|
||||
perror("setsockopt recv timer failed:");
|
||||
}
|
||||
|
||||
serverAddr.sin_family = AF_INET;
|
||||
serverAddr.sin_port = htons(info->port);
|
||||
serverAddr.sin_addr.s_addr = info->hostIp;
|
||||
|
||||
memset(sendbuf, 0, BUFFER_SIZE);
|
||||
memset(recvbuf, 0, BUFFER_SIZE);
|
||||
|
||||
struct in_addr ipStr;
|
||||
memcpy(&ipStr, &info->hostIp, 4);
|
||||
sprintf(sendbuf, "client send udp pkg to %s:%d, content: 1122334455", inet_ntoa(ipStr), info->port);
|
||||
sprintf(sendbuf + info->pktLen - 16, "1122334455667788");
|
||||
|
||||
socklen_t sin_size = sizeof(*(struct sockaddr *)&serverAddr);
|
||||
|
||||
int code = sendto(clientSocket, sendbuf, info->pktLen, 0, (struct sockaddr *)&serverAddr, (int)sin_size);
|
||||
if (code < 0) {
|
||||
perror("sendto");
|
||||
return -1;
|
||||
}
|
||||
|
||||
iDataNum = recvfrom(clientSocket, recvbuf, BUFFER_SIZE, 0, (struct sockaddr *)&serverAddr, &sin_size);
|
||||
|
||||
if (iDataNum < info->pktLen) {
|
||||
printf("Read ack pkg len: %d, less than req pkg len: %d from udp port: %d\t\t", iDataNum, info->pktLen, info->port);
|
||||
return -1;
|
||||
}
|
||||
|
||||
//printf("Read ack pkg len:%d from udp port: %d, buffer: %s %s\n", info->pktLen, port, recvbuf, recvbuf+iDataNum-8);
|
||||
close(clientSocket);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t getIpFromFqdn(const char *fqdn, uint32_t* ip) {
|
||||
struct addrinfo hints = {0};
|
||||
hints.ai_family = AF_UNSPEC;
|
||||
hints.ai_socktype = SOCK_STREAM;
|
||||
|
||||
struct addrinfo *result = NULL;
|
||||
|
||||
int32_t ret = getaddrinfo(fqdn, NULL, &hints, &result);
|
||||
if (result) {
|
||||
struct sockaddr *sa = result->ai_addr;
|
||||
struct sockaddr_in *si = (struct sockaddr_in*)sa;
|
||||
struct in_addr ia = si->sin_addr;
|
||||
*ip = ia.s_addr;
|
||||
freeaddrinfo(result);
|
||||
return 0;
|
||||
} else {
|
||||
printf("Failed get the ip address from fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
void checkPort(uint32_t hostIp, uint16_t startPort, uint16_t maxPort, uint16_t pktLen) {
|
||||
int ret;
|
||||
info_s info;
|
||||
memset(&info, 0, sizeof(info_s));
|
||||
info.hostIp = hostIp;
|
||||
info.pktLen = pktLen;
|
||||
|
||||
for (uint16_t port = startPort; port <= maxPort; port++) {
|
||||
//printf("test: %s:%d\n", info.host, port);
|
||||
printf("\n");
|
||||
|
||||
info.port = port;
|
||||
ret = checkTcpPort(&info);
|
||||
if (ret != 0) {
|
||||
printf("tcp port:%d test fail.\t\n", port);
|
||||
} else {
|
||||
printf("tcp port:%d test ok.\t\t", port);
|
||||
}
|
||||
|
||||
ret = checkUdpPort(&info);
|
||||
if (ret != 0) {
|
||||
printf("udp port:%d test fail.\t\n", port);
|
||||
} else {
|
||||
printf("udp port:%d test ok.\t\t", port);
|
||||
}
|
||||
}
|
||||
|
||||
printf("\n");
|
||||
return ;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
SArguments arguments = {"127.0.0.1", "", 6030, 6042, 1000};
|
||||
int ret;
|
||||
|
||||
argp_parse(&argp, argc, argv, 0, 0, &arguments);
|
||||
if (arguments.pktLen > MAX_PKG_LEN) {
|
||||
printf("test pkg len overflow: %d, max len not greater than %d bytes\n", arguments.pktLen, MAX_PKG_LEN);
|
||||
exit(0);
|
||||
}
|
||||
|
||||
printf("host ip: %s\thost fqdn: %s\tport: %d\tmax_port: %d\tpkgLen: %d\n", arguments.host, arguments.fqdn, arguments.port, arguments.max_port, arguments.pktLen);
|
||||
|
||||
if (arguments.host[0] != 0) {
|
||||
printf("\nstart connect to %s test:\n", arguments.host);
|
||||
checkPort(inet_addr(arguments.host), arguments.port, arguments.max_port, arguments.pktLen);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
if (arguments.fqdn[0] != 0) {
|
||||
uint32_t hostIp = 0;
|
||||
ret = getIpFromFqdn(arguments.fqdn, &hostIp);
|
||||
if (ret) {
|
||||
printf("\n");
|
||||
return 0;
|
||||
}
|
||||
printf("\nstart connetc to %s test:\n", arguments.fqdn);
|
||||
checkPort(hostIp, arguments.port, arguments.max_port, arguments.pktLen);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1,246 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <argp.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <netdb.h>
|
||||
#include <netinet/in.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#define MAX_PKG_LEN (64*1000)
|
||||
#define BUFFER_SIZE (MAX_PKG_LEN + 1024)
|
||||
|
||||
typedef struct {
|
||||
int port;
|
||||
uint16_t pktLen;
|
||||
} info_s;
|
||||
|
||||
typedef struct Arguments {
|
||||
char * host;
|
||||
uint16_t port;
|
||||
uint16_t max_port;
|
||||
uint16_t pktLen;
|
||||
} SArguments;
|
||||
|
||||
static struct argp_option options[] = {
|
||||
{0, 'h', "host", 0, "The host to connect to TDEngine. Default is localhost.", 0},
|
||||
{0, 'p', "port", 0, "The TCP or UDP port number to use for the connection. Default is 6041.", 1},
|
||||
{0, 'm', "max port", 0, "The max TCP or UDP port number to use for the connection. Default is 6060.", 2},
|
||||
{0, 'l', "test pkg len", 0, "The len of pkg for test. Default is 1000 Bytes, max not greater than 64k Bytes.\nNotes: This parameter must be consistent between the client and the server.", 3}};
|
||||
|
||||
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
||||
|
||||
SArguments *arguments = state->input;
|
||||
switch (key) {
|
||||
case 'h':
|
||||
arguments->host = arg;
|
||||
break;
|
||||
case 'p':
|
||||
arguments->port = atoi(arg);
|
||||
break;
|
||||
case 'm':
|
||||
arguments->max_port = atoi(arg);
|
||||
break;
|
||||
case 'l':
|
||||
arguments->pktLen = atoi(arg);
|
||||
break;
|
||||
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct argp argp = {options, parse_opt, 0, 0};
|
||||
|
||||
static void *bindTcpPort(void *sarg) {
|
||||
info_s *pinfo = (info_s *)sarg;
|
||||
int port = pinfo->port;
|
||||
int serverSocket;
|
||||
|
||||
struct sockaddr_in server_addr;
|
||||
struct sockaddr_in clientAddr;
|
||||
int addr_len = sizeof(clientAddr);
|
||||
int client;
|
||||
char buffer[BUFFER_SIZE];
|
||||
int iDataNum = 0;
|
||||
|
||||
if ((serverSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) {
|
||||
printf("socket() fail: %s", strerror(errno));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bzero(&server_addr, sizeof(server_addr));
|
||||
server_addr.sin_family = AF_INET;
|
||||
server_addr.sin_port = htons(port);
|
||||
server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
|
||||
|
||||
if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) {
|
||||
printf("port:%d bind() fail: %s", port, strerror(errno));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (listen(serverSocket, 5) < 0) {
|
||||
printf("listen() fail: %s", strerror(errno));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//printf("Bind port: %d success\n", port);
|
||||
while (1) {
|
||||
client = accept(serverSocket, (struct sockaddr *)&clientAddr, (socklen_t *)&addr_len);
|
||||
if (client < 0) {
|
||||
printf("accept() fail: %s", strerror(errno));
|
||||
continue;
|
||||
}
|
||||
|
||||
memset(buffer, 0, BUFFER_SIZE);
|
||||
int nleft, nread;
|
||||
char *ptr = buffer;
|
||||
nleft = pinfo->pktLen;
|
||||
while (nleft > 0) {
|
||||
nread = recv(client, ptr, BUFFER_SIZE, 0);
|
||||
|
||||
if (nread == 0) {
|
||||
break;
|
||||
} else if (nread < 0) {
|
||||
if (errno == EINTR) {
|
||||
continue;
|
||||
} else {
|
||||
printf("recv Client: %s pkg from TCP port: %d fail:%s.\n", inet_ntoa(clientAddr.sin_addr), port, strerror(errno));
|
||||
close(serverSocket);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
nleft -= nread;
|
||||
ptr += nread;
|
||||
iDataNum += nread;
|
||||
}
|
||||
}
|
||||
|
||||
printf("recv Client: %s pkg from TCP port: %d, pkg len: %d\n", inet_ntoa(clientAddr.sin_addr), port, iDataNum);
|
||||
if (iDataNum > 0) {
|
||||
send(client, buffer, iDataNum, 0);
|
||||
}
|
||||
}
|
||||
|
||||
close(serverSocket);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *bindUdpPort(void *sarg) {
|
||||
info_s *pinfo = (info_s *)sarg;
|
||||
int port = pinfo->port;
|
||||
int serverSocket;
|
||||
|
||||
struct sockaddr_in server_addr;
|
||||
struct sockaddr_in clientAddr;
|
||||
char buffer[BUFFER_SIZE];
|
||||
int iDataNum;
|
||||
|
||||
if ((serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) {
|
||||
perror("socket");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bzero(&server_addr, sizeof(server_addr));
|
||||
server_addr.sin_family = AF_INET;
|
||||
server_addr.sin_port = htons(port);
|
||||
server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
|
||||
|
||||
if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) {
|
||||
perror("connect");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
socklen_t sin_size;
|
||||
|
||||
while (1) {
|
||||
memset(buffer, 0, BUFFER_SIZE);
|
||||
|
||||
sin_size = sizeof(*(struct sockaddr *)&server_addr);
|
||||
|
||||
iDataNum = recvfrom(serverSocket, buffer, BUFFER_SIZE, 0, (struct sockaddr *)&clientAddr, &sin_size);
|
||||
|
||||
if (iDataNum < 0) {
|
||||
perror("recvfrom null");
|
||||
continue;
|
||||
}
|
||||
if (iDataNum > 0) {
|
||||
printf("recv Client: %s pkg from UDP port: %d, pkg len: %d\n", inet_ntoa(clientAddr.sin_addr), port, iDataNum);
|
||||
//printf("Read msg from udp:%s ... %s\n", buffer, buffer+iDataNum-16);
|
||||
|
||||
sendto(serverSocket, buffer, iDataNum, 0, (struct sockaddr *)&clientAddr, (int)sin_size);
|
||||
}
|
||||
}
|
||||
|
||||
close(serverSocket);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
SArguments arguments = {"127.0.0.1", 6030, 6042, 1000};
|
||||
argp_parse(&argp, argc, argv, 0, 0, &arguments);
|
||||
if (arguments.pktLen > MAX_PKG_LEN) {
|
||||
printf("test pkg len overflow: %d, max len not greater than %d bytes\n", arguments.pktLen, MAX_PKG_LEN);
|
||||
exit(0);
|
||||
}
|
||||
|
||||
int port = arguments.port;
|
||||
|
||||
int num = arguments.max_port - arguments.port + 1;
|
||||
|
||||
if (num < 0) {
|
||||
num = 1;
|
||||
}
|
||||
pthread_t *pids = malloc(2 * num * sizeof(pthread_t));
|
||||
info_s * tinfos = malloc(num * sizeof(info_s));
|
||||
info_s * uinfos = malloc(num * sizeof(info_s));
|
||||
|
||||
for (size_t i = 0; i < num; i++) {
|
||||
info_s *tcpInfo = tinfos + i;
|
||||
tcpInfo->port = port + i;
|
||||
tcpInfo->pktLen = arguments.pktLen;
|
||||
|
||||
if (pthread_create(pids + i, NULL, bindTcpPort, tcpInfo) != 0)
|
||||
{
|
||||
printf("create thread fail, port:%d.\n", port);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
info_s *udpInfo = uinfos + i;
|
||||
udpInfo->port = port + i;
|
||||
if (pthread_create(pids + num + i, NULL, bindUdpPort, udpInfo) != 0)
|
||||
{
|
||||
printf("create thread fail, port:%d.\n", port);
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < num; i++) {
|
||||
pthread_join(pids[i], NULL);
|
||||
pthread_join(pids[(num + i)], NULL);
|
||||
}
|
||||
}
|
|
@ -64,6 +64,7 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
|
|||
}
|
||||
|
||||
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts);
|
||||
int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
|
||||
|
||||
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
|
||||
void deltaToUtcInitOnce();
|
||||
|
|
|
@ -319,6 +319,8 @@ int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
|
|||
*time = factor * seconds + fraction;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) {
|
||||
*result = val;
|
||||
|
||||
|
@ -384,6 +386,23 @@ int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
|
|||
return getTimestampInUsFromStrImpl(timestamp, token[tokenlen - 1], ts);
|
||||
}
|
||||
|
||||
int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
|
||||
errno = 0;
|
||||
|
||||
/* get the basic numeric value */
|
||||
*duration = strtoll(token, NULL, 10);
|
||||
if (errno != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
*unit = token[tokenLen - 1];
|
||||
if (*unit == 'n' || *unit == 'y') {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return getTimestampInUsFromStrImpl(*duration, *unit, duration);
|
||||
}
|
||||
|
||||
// internal function, when program is paused in debugger,
|
||||
// one can call this function from debugger to print a
|
||||
// timestamp as human readable string, for example (gdb):
|
||||
|
|
|
@ -18,15 +18,25 @@
|
|||
#include "tglobal.h"
|
||||
|
||||
void osInit() {
|
||||
|
||||
#ifdef _TD_POWER_
|
||||
if (configDir[0] == 0) {
|
||||
strcpy(configDir, "/etc/power");
|
||||
}
|
||||
strcpy(tsDataDir, "/var/lib/power");
|
||||
strcpy(tsLogDir, "/var/log/power");
|
||||
strcpy(tsScriptDir, "/etc/power");
|
||||
#else
|
||||
if (configDir[0] == 0) {
|
||||
strcpy(configDir, "/etc/taos");
|
||||
}
|
||||
strcpy(tsDataDir, "/var/lib/taos");
|
||||
strcpy(tsLogDir, "/var/log/taos");
|
||||
strcpy(tsScriptDir, "/etc/taos");
|
||||
#endif
|
||||
|
||||
strcpy(tsVnodeDir, "");
|
||||
strcpy(tsDnodeDir, "");
|
||||
strcpy(tsMnodeDir, "");
|
||||
strcpy(tsDataDir, "/var/lib/taos");
|
||||
strcpy(tsLogDir, "/var/log/taos");
|
||||
strcpy(tsScriptDir, "/etc/taos");
|
||||
strcpy(tsOsName, "Linux");
|
||||
}
|
|
@ -22,16 +22,29 @@ extern void taosWinSocketInit();
|
|||
|
||||
void osInit() {
|
||||
taosSetCoreDump();
|
||||
#ifdef _TD_POWER_
|
||||
if (configDir[0] == 0) {
|
||||
strcpy(configDir, "C:/PowerDB/cfg");
|
||||
}
|
||||
|
||||
strcpy(tsVnodeDir, "C:/PowerDB/data");
|
||||
strcpy(tsDataDir, "C:/PowerDB/data");
|
||||
strcpy(tsLogDir, "C:/PowerDB/log");
|
||||
strcpy(tsScriptDir, "C:/PowerDB/script");
|
||||
|
||||
#else
|
||||
if (configDir[0] == 0) {
|
||||
strcpy(configDir, "C:/TDengine/cfg");
|
||||
}
|
||||
|
||||
strcpy(tsVnodeDir, "C:/TDengine/data");
|
||||
strcpy(tsDnodeDir, "");
|
||||
strcpy(tsMnodeDir, "");
|
||||
strcpy(tsDataDir, "C:/TDengine/data");
|
||||
strcpy(tsLogDir, "C:/TDengine/log");
|
||||
strcpy(tsScriptDir, "C:/TDengine/script");
|
||||
#endif
|
||||
|
||||
strcpy(tsDnodeDir, "");
|
||||
strcpy(tsMnodeDir, "");
|
||||
strcpy(tsOsName, "Windows");
|
||||
taosWinSocketInit();
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_HTTP_QUEUE_H
|
||||
#define TDENGINE_HTTP_QUEUE_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
bool httpInitResultQueue();
|
||||
void httpCleanupResultQueue();
|
||||
void httpDispatchToResultQueue();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -0,0 +1,149 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "os.h"
|
||||
#include "tqueue.h"
|
||||
#include "tnote.h"
|
||||
#include "taos.h"
|
||||
#include "tsclient.h"
|
||||
#include "httpInt.h"
|
||||
#include "httpContext.h"
|
||||
#include "httpSql.h"
|
||||
#include "httpResp.h"
|
||||
#include "httpAuth.h"
|
||||
#include "httpSession.h"
|
||||
|
||||
typedef struct {
|
||||
pthread_t thread;
|
||||
int32_t workerId;
|
||||
} SHttpWorker;
|
||||
|
||||
typedef struct {
|
||||
int32_t num;
|
||||
SHttpWorker *httpWorker;
|
||||
} SHttpWorkerPool;
|
||||
|
||||
typedef struct {
|
||||
void *param;
|
||||
void *result;
|
||||
int numOfRows;
|
||||
void (*fp)(void *param, void *result, int numOfRows);
|
||||
} SHttpResult;
|
||||
|
||||
static SHttpWorkerPool tsHttpPool;
|
||||
static taos_qset tsHttpQset;
|
||||
static taos_queue tsHttpQueue;
|
||||
|
||||
void httpDispatchToResultQueue(void *param, TAOS_RES *result, int numOfRows, void (*fp)(void *param, void *result, int numOfRows)) {
|
||||
if (tsHttpQueue != NULL) {
|
||||
SHttpResult *pMsg = (SHttpResult *)taosAllocateQitem(sizeof(SHttpResult));
|
||||
pMsg->param = param;
|
||||
pMsg->result = result;
|
||||
pMsg->numOfRows = numOfRows;
|
||||
pMsg->fp = fp;
|
||||
taosWriteQitem(tsHttpQueue, TAOS_QTYPE_RPC, pMsg);
|
||||
} else {
|
||||
(*fp)(param, result, numOfRows);
|
||||
}
|
||||
}
|
||||
|
||||
static void *httpProcessResultQueue(void *param) {
|
||||
SHttpResult *pMsg;
|
||||
int32_t type;
|
||||
void *unUsed;
|
||||
|
||||
while (1) {
|
||||
if (taosReadQitemFromQset(tsHttpQset, &type, (void **)&pMsg, &unUsed) == 0) {
|
||||
httpDebug("httpResultQueue: got no message from qset, exiting...");
|
||||
break;
|
||||
}
|
||||
|
||||
httpDebug("context:%p, res:%p will be processed in result queue", pMsg->param, pMsg->result);
|
||||
(*pMsg->fp)(pMsg->param, pMsg->result, pMsg->numOfRows);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool httpAllocateResultQueue() {
|
||||
tsHttpQueue = taosOpenQueue();
|
||||
if (tsHttpQueue == NULL) return false;
|
||||
|
||||
taosAddIntoQset(tsHttpQset, tsHttpQueue, NULL);
|
||||
|
||||
for (int32_t i = 0; i < tsHttpPool.num; ++i) {
|
||||
SHttpWorker *pWorker = tsHttpPool.httpWorker + i;
|
||||
pWorker->workerId = i;
|
||||
|
||||
pthread_attr_t thAttr;
|
||||
pthread_attr_init(&thAttr);
|
||||
pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE);
|
||||
|
||||
if (pthread_create(&pWorker->thread, &thAttr, httpProcessResultQueue, pWorker) != 0) {
|
||||
httpError("failed to create thread to process http result queue, reason:%s", strerror(errno));
|
||||
}
|
||||
|
||||
pthread_attr_destroy(&thAttr);
|
||||
httpDebug("http result worker:%d is launched, total:%d", pWorker->workerId, tsHttpPool.num);
|
||||
}
|
||||
|
||||
httpInfo("http result queue is opened");
|
||||
return true;
|
||||
}
|
||||
|
||||
static void httpFreeResultQueue() {
|
||||
taosCloseQueue(tsHttpQueue);
|
||||
tsHttpQueue = NULL;
|
||||
}
|
||||
|
||||
bool httpInitResultQueue() {
|
||||
tsHttpQset = taosOpenQset();
|
||||
|
||||
tsHttpPool.num = tsHttpMaxThreads;
|
||||
tsHttpPool.httpWorker = (SHttpWorker *)calloc(sizeof(SHttpWorker), tsHttpPool.num);
|
||||
|
||||
if (tsHttpPool.httpWorker == NULL) return -1;
|
||||
for (int32_t i = 0; i < tsHttpPool.num; ++i) {
|
||||
SHttpWorker *pWorker = tsHttpPool.httpWorker + i;
|
||||
pWorker->workerId = i;
|
||||
}
|
||||
|
||||
return httpAllocateResultQueue();
|
||||
}
|
||||
|
||||
void httpCleanupResultQueue() {
|
||||
httpFreeResultQueue();
|
||||
|
||||
for (int32_t i = 0; i < tsHttpPool.num; ++i) {
|
||||
SHttpWorker *pWorker = tsHttpPool.httpWorker + i;
|
||||
if (pWorker->thread) {
|
||||
taosQsetThreadResume(tsHttpQset);
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < tsHttpPool.num; ++i) {
|
||||
SHttpWorker *pWorker = tsHttpPool.httpWorker + i;
|
||||
if (pWorker->thread) {
|
||||
pthread_join(pWorker->thread, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
taosCloseQset(tsHttpQset);
|
||||
free(tsHttpPool.httpWorker);
|
||||
|
||||
httpInfo("http result queue is closed");
|
||||
}
|
|
@ -24,12 +24,15 @@
|
|||
#include "httpResp.h"
|
||||
#include "httpAuth.h"
|
||||
#include "httpSession.h"
|
||||
#include "httpQueue.h"
|
||||
|
||||
void *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
|
||||
void *param, void **taos);
|
||||
void httpProcessMultiSql(HttpContext *pContext);
|
||||
|
||||
void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) {
|
||||
void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows);
|
||||
|
||||
void httpProcessMultiSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int numOfRows) {
|
||||
HttpContext *pContext = (HttpContext *)param;
|
||||
if (pContext == NULL) return;
|
||||
|
||||
|
@ -75,7 +78,11 @@ void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numO
|
|||
}
|
||||
}
|
||||
|
||||
void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int code) {
|
||||
void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) {
|
||||
httpDispatchToResultQueue(param, result, numOfRows, httpProcessMultiSqlRetrieveCallBackImp);
|
||||
}
|
||||
|
||||
void httpProcessMultiSqlCallBackImp(void *param, TAOS_RES *result, int code) {
|
||||
HttpContext *pContext = (HttpContext *)param;
|
||||
if (pContext == NULL) return;
|
||||
|
||||
|
@ -154,6 +161,10 @@ void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int code) {
|
|||
}
|
||||
}
|
||||
|
||||
void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) {
|
||||
httpDispatchToResultQueue(param, result, unUsedCode, httpProcessMultiSqlCallBackImp);
|
||||
}
|
||||
|
||||
void httpProcessMultiSql(HttpContext *pContext) {
|
||||
HttpSqlCmds * multiCmds = pContext->multiCmds;
|
||||
HttpEncodeMethod *encode = pContext->encodeMethod;
|
||||
|
@ -196,7 +207,9 @@ void httpProcessMultiSqlCmd(HttpContext *pContext) {
|
|||
httpProcessMultiSql(pContext);
|
||||
}
|
||||
|
||||
void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) {
|
||||
void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows);
|
||||
|
||||
void httpProcessSingleSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int numOfRows) {
|
||||
HttpContext *pContext = (HttpContext *)param;
|
||||
if (pContext == NULL) return;
|
||||
|
||||
|
@ -243,7 +256,11 @@ void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int num
|
|||
}
|
||||
}
|
||||
|
||||
void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) {
|
||||
void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int numOfRows) {
|
||||
httpDispatchToResultQueue(param, result, numOfRows, httpProcessSingleSqlRetrieveCallBackImp);
|
||||
}
|
||||
|
||||
void httpProcessSingleSqlCallBackImp(void *param, TAOS_RES *result, int unUsedCode) {
|
||||
HttpContext *pContext = (HttpContext *)param;
|
||||
if (pContext == NULL) return;
|
||||
|
||||
|
@ -306,6 +323,10 @@ void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode)
|
|||
}
|
||||
}
|
||||
|
||||
void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int unUsedCode) {
|
||||
httpDispatchToResultQueue(param, result, unUsedCode, httpProcessSingleSqlCallBackImp);
|
||||
}
|
||||
|
||||
void httpProcessSingleSqlCmd(HttpContext *pContext) {
|
||||
HttpSqlCmd * cmd = &pContext->singleCmd;
|
||||
char * sql = cmd->nativSql;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "httpServer.h"
|
||||
#include "httpResp.h"
|
||||
#include "httpHandle.h"
|
||||
#include "httpQueue.h"
|
||||
#include "gcHandle.h"
|
||||
#include "restHandle.h"
|
||||
#include "tgHandle.h"
|
||||
|
@ -67,6 +68,11 @@ int httpStartSystem() {
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (!httpInitResultQueue()) {
|
||||
httpError("http init result queue failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!httpInitContexts()) {
|
||||
httpError("http init contexts failed");
|
||||
return -1;
|
||||
|
@ -98,6 +104,8 @@ void httpCleanUpSystem() {
|
|||
httpCleanUpConnect();
|
||||
httpCleanupContexts();
|
||||
httpCleanUpSessions();
|
||||
httpCleanupResultQueue();
|
||||
|
||||
pthread_mutex_destroy(&tsHttpServer.serverMutex);
|
||||
taosTFree(tsHttpServer.pThreads);
|
||||
tsHttpServer.pThreads = NULL;
|
||||
|
|
|
@ -132,11 +132,12 @@ typedef struct SQueryCostInfo {
|
|||
typedef struct SQuery {
|
||||
int16_t numOfCols;
|
||||
int16_t numOfTags;
|
||||
char intervalTimeUnit;
|
||||
char slidingTimeUnit; // interval data type, used for daytime revise
|
||||
SOrderVal order;
|
||||
STimeWindow window;
|
||||
int64_t intervalTime;
|
||||
int64_t slidingTime; // sliding time for sliding window query
|
||||
char slidingTimeUnit; // interval data type, used for daytime revise
|
||||
int16_t precision;
|
||||
int16_t numOfOutput;
|
||||
int16_t fillType;
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
#define TDENGINE_QPERCENTILE_H
|
||||
|
||||
#include "qExtbuffer.h"
|
||||
#include "qResultbuf.h"
|
||||
#include "qTsbuf.h"
|
||||
|
||||
typedef struct MinMaxEntry {
|
||||
union {
|
||||
|
@ -31,47 +33,43 @@ typedef struct MinMaxEntry {
|
|||
};
|
||||
} MinMaxEntry;
|
||||
|
||||
typedef struct tMemBucketSegment {
|
||||
int32_t numOfSlots;
|
||||
MinMaxEntry * pBoundingEntries;
|
||||
tExtMemBuffer **pBuffer;
|
||||
} tMemBucketSegment;
|
||||
typedef struct {
|
||||
int32_t size;
|
||||
int32_t pageId;
|
||||
tFilePage *data;
|
||||
} SSlotInfo;
|
||||
|
||||
typedef struct tMemBucketSlot {
|
||||
SSlotInfo info;
|
||||
MinMaxEntry range;
|
||||
} tMemBucketSlot;
|
||||
|
||||
struct tMemBucket;
|
||||
typedef int32_t (*__perc_hash_func_t)(struct tMemBucket *pBucket, const void *value);
|
||||
|
||||
typedef struct tMemBucket {
|
||||
int16_t numOfSegs;
|
||||
int16_t nTotalSlots;
|
||||
int16_t nSlotsOfSeg;
|
||||
int16_t dataType;
|
||||
int16_t numOfSlots;
|
||||
int16_t type;
|
||||
int16_t bytes;
|
||||
int32_t total;
|
||||
int32_t elemPerPage; // number of elements for each object
|
||||
int32_t maxCapacity; // maximum allowed number of elements that can be sort directly to get the result
|
||||
int32_t bufPageSize; // disk page size
|
||||
MinMaxEntry range; // value range
|
||||
int32_t times; // count that has been checked for deciding the correct data value buckets.
|
||||
__compar_fn_t comparFn;
|
||||
|
||||
int16_t nElemSize;
|
||||
int32_t numOfElems;
|
||||
|
||||
int32_t nTotalBufferSize;
|
||||
int32_t maxElemsCapacity;
|
||||
|
||||
int32_t pageSize;
|
||||
int16_t numOfTotalPages;
|
||||
int16_t numOfAvailPages; /* remain available buffer pages */
|
||||
|
||||
tMemBucketSegment *pSegs;
|
||||
tOrderDescriptor * pOrderDesc;
|
||||
|
||||
MinMaxEntry nRange;
|
||||
|
||||
void (*HashFunc)(struct tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx);
|
||||
tMemBucketSlot *pSlots;
|
||||
SDiskbasedResultBuf *pBuffer;
|
||||
__perc_hash_func_t hashFunc;
|
||||
} tMemBucket;
|
||||
|
||||
tMemBucket *tMemBucketCreate(int32_t totalSlots, int32_t nBufferSize, int16_t nElemSize, int16_t dataType,
|
||||
tOrderDescriptor *pDesc);
|
||||
tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType);
|
||||
|
||||
void tMemBucketDestroy(tMemBucket *pBucket);
|
||||
|
||||
void tMemBucketPut(tMemBucket *pBucket, void *data, int32_t numOfRows);
|
||||
void tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size);
|
||||
|
||||
double getPercentile(tMemBucket *pMemBucket, double percent);
|
||||
|
||||
void tBucketIntHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx);
|
||||
|
||||
void tBucketDoubleHash(tMemBucket *pBucket, void *value, int16_t *segIdx, int16_t *slotIdx);
|
||||
|
||||
#endif // TDENGINE_QPERCENTILE_H
|
||||
|
|
|
@ -168,6 +168,7 @@ typedef struct SQLFunctionCtx {
|
|||
int16_t outputType;
|
||||
int16_t outputBytes; // size of results, determined by function and input column data type
|
||||
bool hasNull; // null value exist in current block
|
||||
bool requireNull; // require null in some function
|
||||
int16_t functionId; // function id
|
||||
void * aInputElemBuf;
|
||||
char * aOutputBuf; // final result output buffer, point to sdata->data
|
||||
|
|
|
@ -35,9 +35,7 @@
|
|||
* forced to load primary column explicitly.
|
||||
*/
|
||||
#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0)
|
||||
#define TSDB_COL_IS_TAG(f) (((f)&TSDB_COL_TAG) != 0)
|
||||
#define TSDB_COL_IS_NORMAL_COL(f) ((f) == TSDB_COL_NORMAL)
|
||||
#define TSDB_COL_IS_UD_COL(f) ((f) == TSDB_COL_UDC)
|
||||
|
||||
|
||||
#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP)
|
||||
|
||||
|
@ -137,13 +135,44 @@ static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv);
|
|||
|
||||
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->intervalTime > 0)
|
||||
|
||||
// previous time window may not be of the same size of pQuery->intervalTime
|
||||
#define GET_NEXT_TIMEWINDOW(_q, tw) \
|
||||
do { \
|
||||
int32_t factor = GET_FORWARD_DIRECTION_FACTOR((_q)->order.order); \
|
||||
(tw)->skey += ((_q)->slidingTime * factor); \
|
||||
(tw)->ekey = (tw)->skey + ((_q)->intervalTime - 1); \
|
||||
} while (0)
|
||||
static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) {
|
||||
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
|
||||
if (pQuery->intervalTimeUnit != 'n' && pQuery->intervalTimeUnit != 'y') {
|
||||
tw->skey += pQuery->slidingTime * factor;
|
||||
tw->ekey = tw->skey + pQuery->intervalTime - 1;
|
||||
return;
|
||||
}
|
||||
|
||||
int64_t key = tw->skey / 1000, interval = pQuery->intervalTime;
|
||||
if (pQuery->precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
key /= 1000;
|
||||
}
|
||||
if (pQuery->intervalTimeUnit == 'y') {
|
||||
interval *= 12;
|
||||
}
|
||||
|
||||
struct tm tm;
|
||||
time_t t = (time_t)key;
|
||||
localtime_r(&t, &tm);
|
||||
|
||||
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor);
|
||||
tm.tm_year = mon / 12;
|
||||
tm.tm_mon = mon % 12;
|
||||
tw->skey = mktime(&tm) * 1000L;
|
||||
|
||||
mon = (int)(mon + interval);
|
||||
tm.tm_year = mon / 12;
|
||||
tm.tm_mon = mon % 12;
|
||||
tw->ekey = mktime(&tm) * 1000L;
|
||||
|
||||
if (pQuery->precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
tw->skey *= 1000L;
|
||||
tw->ekey *= 1000L;
|
||||
}
|
||||
tw->ekey -= 1;
|
||||
}
|
||||
|
||||
#define GET_NEXT_TIMEWINDOW(_q, tw) getNextTimeWindow((_q), (tw))
|
||||
|
||||
#define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (int32_t)((_q)->tableqinfoGroupInfo.numOfTables))
|
||||
#define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (int32_t)((_q)->tableqinfoGroupInfo.numOfTables))
|
||||
|
@ -254,7 +283,7 @@ bool isGroupbyNormalCol(SSqlGroupbyExpr *pGroupbyExpr) {
|
|||
|
||||
for (int32_t i = 0; i < pGroupbyExpr->numOfGroupCols; ++i) {
|
||||
SColIndex *pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, i);
|
||||
if (pColIndex->flag == TSDB_COL_NORMAL) {
|
||||
if (TSDB_COL_IS_NORMAL_COL(pColIndex->flag)) {
|
||||
//make sure the normal column locates at the second position if tbname exists in group by clause
|
||||
if (pGroupbyExpr->numOfGroupCols > 1) {
|
||||
assert(pColIndex->colIndex > 0);
|
||||
|
@ -275,7 +304,7 @@ int16_t getGroupbyColumnType(SQuery *pQuery, SSqlGroupbyExpr *pGroupbyExpr) {
|
|||
|
||||
for (int32_t i = 0; i < pGroupbyExpr->numOfGroupCols; ++i) {
|
||||
SColIndex *pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, i);
|
||||
if (pColIndex->flag == TSDB_COL_NORMAL) {
|
||||
if (TSDB_COL_IS_NORMAL_COL(pColIndex->flag)) {
|
||||
colId = pColIndex->colId;
|
||||
break;
|
||||
}
|
||||
|
@ -469,7 +498,11 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
|
|||
|
||||
if (pWindowResInfo->curIndex == -1) { // the first window, from the previous stored value
|
||||
w.skey = pWindowResInfo->prevSKey;
|
||||
if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
|
||||
w.ekey = taosAddNatualInterval(w.skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
|
||||
} else {
|
||||
w.ekey = w.skey + pQuery->intervalTime - 1;
|
||||
}
|
||||
} else {
|
||||
int32_t slot = curTimeWindowIndex(pWindowResInfo);
|
||||
SWindowResult* pWindowRes = getWindowResult(pWindowResInfo, slot);
|
||||
|
@ -477,6 +510,10 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
|
|||
}
|
||||
|
||||
if (w.skey > ts || w.ekey < ts) {
|
||||
if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
|
||||
w.skey = taosGetIntervalStartTimestamp(ts, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
|
||||
w.ekey = taosAddNatualInterval(w.skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
|
||||
} else {
|
||||
int64_t st = w.skey;
|
||||
|
||||
if (st > ts) {
|
||||
|
@ -491,6 +528,7 @@ static STimeWindow getActiveTimeWindow(SWindowResInfo *pWindowResInfo, int64_t t
|
|||
w.skey = st;
|
||||
w.ekey = w.skey + pQuery->intervalTime - 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* query border check, skey should not be bounded by the query time range, since the value skey will
|
||||
|
@ -814,15 +852,23 @@ static int32_t getNextQualifiedWindow(SQueryRuntimeEnv *pRuntimeEnv, STimeWindow
|
|||
*/
|
||||
if (QUERY_IS_ASC_QUERY(pQuery) && primaryKeys[startPos] > pNext->ekey) {
|
||||
TSKEY next = primaryKeys[startPos];
|
||||
|
||||
if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
|
||||
pNext->skey = taosGetIntervalStartTimestamp(next, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
|
||||
pNext->ekey = taosAddNatualInterval(pNext->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
|
||||
} else {
|
||||
pNext->ekey += ((next - pNext->ekey + pQuery->slidingTime - 1)/pQuery->slidingTime) * pQuery->slidingTime;
|
||||
pNext->skey = pNext->ekey - pQuery->intervalTime + 1;
|
||||
}
|
||||
} else if ((!QUERY_IS_ASC_QUERY(pQuery)) && primaryKeys[startPos] < pNext->skey) {
|
||||
TSKEY next = primaryKeys[startPos];
|
||||
|
||||
if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
|
||||
pNext->skey = taosGetIntervalStartTimestamp(next, pQuery->slidingTime, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision);
|
||||
pNext->ekey = taosAddNatualInterval(pNext->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
|
||||
} else {
|
||||
pNext->skey -= ((pNext->skey - next + pQuery->slidingTime - 1) / pQuery->slidingTime) * pQuery->slidingTime;
|
||||
pNext->ekey = pNext->skey + pQuery->intervalTime - 1;
|
||||
}
|
||||
}
|
||||
|
||||
return startPos;
|
||||
}
|
||||
|
@ -1085,7 +1131,7 @@ static char *getGroupbyColumnData(SQuery *pQuery, int16_t *type, int16_t *bytes,
|
|||
|
||||
for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) {
|
||||
SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, k);
|
||||
if (pColIndex->flag == TSDB_COL_TAG) {
|
||||
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1555,6 +1601,13 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
|
|||
SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i];
|
||||
SColIndex* pIndex = &pSqlFuncMsg->colInfo;
|
||||
|
||||
if (TSDB_COL_REQ_NULL(pIndex->flag)) {
|
||||
pCtx->requireNull = true;
|
||||
pIndex->flag &= ~(TSDB_COL_NULL);
|
||||
} else {
|
||||
pCtx->requireNull = false;
|
||||
}
|
||||
|
||||
int32_t index = pSqlFuncMsg->colInfo.colIndex;
|
||||
if (TSDB_COL_IS_TAG(pIndex->flag)) {
|
||||
if (pIndex->colId == TSDB_TBNAME_COLUMN_INDEX) { // todo refactor
|
||||
|
@ -1574,6 +1627,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
|
|||
pCtx->inputType = pQuery->colList[index].type;
|
||||
}
|
||||
|
||||
|
||||
assert(isValidDataType(pCtx->inputType));
|
||||
pCtx->ptsOutputBuf = NULL;
|
||||
|
||||
|
@ -1783,7 +1837,7 @@ static bool onlyQueryTags(SQuery* pQuery) {
|
|||
if (functionId != TSDB_FUNC_TAGPRJ &&
|
||||
functionId != TSDB_FUNC_TID_TAG &&
|
||||
(!(functionId == TSDB_FUNC_COUNT && pExprInfo->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX)) &&
|
||||
(!(functionId == TSDB_FUNC_PRJ && pExprInfo->base.colInfo.flag == TSDB_COL_UDC))) {
|
||||
(!(functionId == TSDB_FUNC_PRJ && TSDB_COL_IS_UD_COL(pExprInfo->base.colInfo.flag)))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -1804,7 +1858,8 @@ void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int6
|
|||
if (keyFirst > (INT64_MAX - pQuery->intervalTime)) {
|
||||
assert(keyLast - keyFirst < pQuery->intervalTime);
|
||||
win->ekey = INT64_MAX;
|
||||
return;
|
||||
} else if (pQuery->intervalTimeUnit == 'n' || pQuery->intervalTimeUnit == 'y') {
|
||||
win->ekey = taosAddNatualInterval(win->skey, pQuery->intervalTime, pQuery->intervalTimeUnit, pQuery->precision) - 1;
|
||||
} else {
|
||||
win->ekey = win->skey + pQuery->intervalTime - 1;
|
||||
}
|
||||
|
@ -1909,6 +1964,15 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, bool stableQuery) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
|
||||
pQuery->order.order = TSDB_ORDER_ASC;
|
||||
if (pQuery->window.skey > pQuery->window.ekey) {
|
||||
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (isPointInterpoQuery(pQuery) && pQuery->intervalTime == 0) {
|
||||
if (!QUERY_IS_ASC_QUERY(pQuery)) {
|
||||
qDebug(msg, GET_QINFO_ADDR(pQuery), "interp", pQuery->order.order, TSDB_ORDER_ASC, pQuery->window.skey,
|
||||
|
@ -2070,35 +2134,36 @@ static bool needToLoadDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis *pDat
|
|||
return false;
|
||||
}
|
||||
|
||||
#define PT_IN_WINDOW(_p, _w) ((_p) > (_w).skey && (_p) < (_w).ekey)
|
||||
|
||||
static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) {
|
||||
STimeWindow w = {0};
|
||||
|
||||
TSKEY sk = MIN(pQuery->window.skey, pQuery->window.ekey);
|
||||
TSKEY ek = MAX(pQuery->window.skey, pQuery->window.ekey);
|
||||
|
||||
|
||||
if (QUERY_IS_ASC_QUERY(pQuery)) {
|
||||
getAlignQueryTimeWindow(pQuery, pBlockInfo->window.skey, sk, ek, &w);
|
||||
assert(w.ekey >= pBlockInfo->window.skey);
|
||||
|
||||
if (PT_IN_WINDOW(w.ekey, pBlockInfo->window)) {
|
||||
if (w.ekey < pBlockInfo->window.ekey) {
|
||||
return true;
|
||||
}
|
||||
|
||||
while(1) {
|
||||
GET_NEXT_TIMEWINDOW(pQuery, &w);
|
||||
if (w.skey > pBlockInfo->window.skey) {
|
||||
if (w.skey > pBlockInfo->window.ekey) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (PT_IN_WINDOW(w.skey, pBlockInfo->window) || PT_IN_WINDOW(w.ekey, pBlockInfo->window)) {
|
||||
assert(w.ekey > pBlockInfo->window.ekey);
|
||||
if (w.skey <= pBlockInfo->window.ekey && w.skey > pBlockInfo->window.skey) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
getAlignQueryTimeWindow(pQuery, pBlockInfo->window.ekey, sk, ek, &w);
|
||||
if (PT_IN_WINDOW(w.skey, pBlockInfo->window)) {
|
||||
assert(w.skey <= pBlockInfo->window.ekey);
|
||||
|
||||
if (w.skey > pBlockInfo->window.skey) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2108,7 +2173,8 @@ static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) {
|
|||
break;
|
||||
}
|
||||
|
||||
if (PT_IN_WINDOW(w.skey, pBlockInfo->window) || PT_IN_WINDOW(w.ekey, pBlockInfo->window)) {
|
||||
assert(w.skey < pBlockInfo->window.skey);
|
||||
if (w.ekey < pBlockInfo->window.ekey && w.ekey >= pBlockInfo->window.skey) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -4385,7 +4451,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
|
|||
|
||||
// NOTE: pTableCheckInfo need to update the query time range and the lastKey info
|
||||
// TODO fixme
|
||||
changeExecuteScanOrder(pQInfo, false);
|
||||
changeExecuteScanOrder(pQInfo, isSTableQuery);
|
||||
|
||||
code = setupQueryHandle(tsdb, pQInfo, isSTableQuery);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -5353,7 +5419,7 @@ static int32_t getColumnIndexInSource(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pE
|
|||
j += 1;
|
||||
}
|
||||
|
||||
} else if (pExprMsg->colInfo.flag == TSDB_COL_UDC) { // user specified column data
|
||||
} else if (TSDB_COL_IS_UD_COL(pExprMsg->colInfo.flag)) { // user specified column data
|
||||
return TSDB_UD_COLUMN_INDEX;
|
||||
} else {
|
||||
while (j < pQueryMsg->numOfCols) {
|
||||
|
@ -5561,7 +5627,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
|
|||
|
||||
int16_t functionId = pExprMsg->functionId;
|
||||
if (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG_DUMMY) {
|
||||
if (pExprMsg->colInfo.flag != TSDB_COL_TAG) { // ignore the column index check for arithmetic expression.
|
||||
if (!TSDB_COL_IS_TAG(pExprMsg->colInfo.flag)) { // ignore the column index check for arithmetic expression.
|
||||
code = TSDB_CODE_QRY_INVALID_MSG;
|
||||
goto _cleanup;
|
||||
}
|
||||
|
@ -6016,6 +6082,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
|
|||
pQuery->pGroupbyExpr = pGroupbyExpr;
|
||||
pQuery->intervalTime = pQueryMsg->intervalTime;
|
||||
pQuery->slidingTime = pQueryMsg->slidingTime;
|
||||
pQuery->intervalTimeUnit = pQueryMsg->intervalTimeUnit;
|
||||
pQuery->slidingTimeUnit = pQueryMsg->slidingTimeUnit;
|
||||
pQuery->fillType = pQueryMsg->fillType;
|
||||
pQuery->numOfTags = pQueryMsg->numOfTags;
|
||||
|
@ -6093,6 +6160,9 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
|
|||
goto _cleanup;
|
||||
}
|
||||
|
||||
// NOTE: pTableCheckInfo need to update the query time range and the lastKey info
|
||||
// changeExecuteScanOrder(pQInfo, stableQuery);
|
||||
|
||||
int32_t index = 0;
|
||||
|
||||
for(int32_t i = 0; i < numOfGroups; ++i) {
|
||||
|
@ -6843,7 +6913,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
|
|||
int16_t type = 0, bytes = 0;
|
||||
for(int32_t j = 0; j < pQuery->numOfOutput; ++j) {
|
||||
// not assign value in case of user defined constant output column
|
||||
if (pExprInfo[j].base.colInfo.flag == TSDB_COL_UDC) {
|
||||
if (TSDB_COL_IS_UD_COL(pExprInfo[j].base.colInfo.flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_
|
|||
SFillColInfo* pColInfo = &pFillInfo->pFillCol[i];
|
||||
pFillInfo->pData[i] = calloc(1, pColInfo->col.bytes * capacity);
|
||||
|
||||
if (pColInfo->flag == TSDB_COL_TAG) {
|
||||
if (TSDB_COL_IS_TAG(pColInfo->flag)) {
|
||||
bool exists = false;
|
||||
for(int32_t j = 0; j < k; ++j) {
|
||||
if (pFillInfo->pTags[j].col.colId == pColInfo->col.colId) {
|
||||
|
@ -155,7 +155,7 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, tFilePage* pInpu
|
|||
char* data = pInput->data + pCol->col.offset * pInput->num;
|
||||
memcpy(pFillInfo->pData[i], data, (size_t)(pInput->num * pCol->col.bytes));
|
||||
|
||||
if (pCol->flag == TSDB_COL_TAG) { // copy the tag value to tag value buffer
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) { // copy the tag value to tag value buffer
|
||||
for (int32_t j = 0; j < pFillInfo->numOfTags; ++j) {
|
||||
SFillTagColInfo* pTag = &pFillInfo->pTags[j];
|
||||
if (pTag->col.colId == pCol->col.colId) {
|
||||
|
@ -179,14 +179,22 @@ int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows
|
|||
if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set.
|
||||
TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
|
||||
|
||||
if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
|
||||
numOfRes = (int64_t)(ABS(lastKey - pFillInfo->start) / pFillInfo->slidingTime) + 1;
|
||||
} else {
|
||||
numOfRes = taosCountNatualInterval(lastKey, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
|
||||
}
|
||||
assert(numOfRes >= numOfRows);
|
||||
} else { // reach the end of data
|
||||
if ((ekey1 < pFillInfo->start && FILL_IS_ASC_FILL(pFillInfo)) ||
|
||||
(ekey1 > pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo))) {
|
||||
return 0;
|
||||
} else { // the numOfRes rows are all filled with specified policy
|
||||
}
|
||||
// the numOfRes rows are all filled with specified policy
|
||||
if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
|
||||
numOfRes = (ABS(ekey1 - pFillInfo->start) / pFillInfo->slidingTime) + 1;
|
||||
} else {
|
||||
numOfRes = taosCountNatualInterval(ekey1, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -251,7 +259,7 @@ int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoi
|
|||
static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t num) {
|
||||
for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[j];
|
||||
if (pCol->flag == TSDB_COL_NORMAL) {
|
||||
if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -366,7 +374,12 @@ static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* nu
|
|||
setTagsValue(pFillInfo, data, *num);
|
||||
}
|
||||
|
||||
// TODO natual sliding time
|
||||
if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
|
||||
pFillInfo->start += (pFillInfo->slidingTime * step);
|
||||
} else {
|
||||
pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
|
||||
}
|
||||
pFillInfo->numOfCurrent++;
|
||||
|
||||
(*num) += 1;
|
||||
|
@ -446,7 +459,7 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
|
|||
// assign rows to dst buffer
|
||||
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
if (pCol->flag == TSDB_COL_TAG) {
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -473,7 +486,12 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
|
|||
// set the tag value for final result
|
||||
setTagsValue(pFillInfo, data, num);
|
||||
|
||||
// TODO natual sliding time
|
||||
if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
|
||||
pFillInfo->start += (pFillInfo->slidingTime * step);
|
||||
} else {
|
||||
pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
|
||||
}
|
||||
pFillInfo->rowIdx += 1;
|
||||
|
||||
pFillInfo->numOfCurrent +=1;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue