diff --git a/CMakeLists.txt b/CMakeLists.txt index 40022a5390..b572d7bd16 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -280,6 +280,11 @@ IF (NOT DEFINED TD_CLUSTER) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll DESTINATION driver) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll.a DESTINATION driver) ENDIF () + ELSEIF (TD_DARWIN_64) + SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") + INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")") + INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})") + INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Darwin)") ENDIF () ENDIF () diff --git a/documentation/webdocs/markdowndocs/Connector.md b/documentation/webdocs/markdowndocs/Connector.md index 46a2b04daa..a0433d1f09 100644 --- a/documentation/webdocs/markdowndocs/Connector.md +++ b/documentation/webdocs/markdowndocs/Connector.md @@ -175,26 +175,34 @@ TDengine provides APIs for continuous query driven by time, which run queries pe ### C/C++ subscription API -For the time being, TDengine supports subscription on one table. It is implemented through periodic pulling from a TDengine server. +For the time being, TDengine supports subscription on one or multiple tables. It is implemented through periodic pulling from a TDengine server. -- `TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds)` - The API is used to start a subscription session by given a handle. The parameters required are _host_ (IP address of a TDenginer server), _user_ (username), _pass_ (password), _db_ (database to use), _table_ (table name to subscribe), _time_ (start time to subscribe, 0 for now), _mseconds_ (pulling period). If failed to open a subscription session, a _NULL_ pointer is returned. +* `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)` + The API is used to start a subscription session, it returns the subscription object on success and `NULL` in case of failure, the parameters are: + * **taos**: The database connnection, which must be established already. + * **restart**: `Zero` to continue a subscription if it already exits, other value to start from the beginning. + * **topic**: The unique identifier of a subscription. + * **sql**: A sql statement for data query, it can only be a `select` statement, can only query for raw data, and can only query data in ascending order of the timestamp field. + * **fp**: A callback function to receive query result, only used in asynchronization mode and should be `NULL` in synchronization mode, please refer below for its prototype. + * **param**: User provided additional parameter for the callback function. + * **interval**: Pulling interval in millisecond. Under asynchronization mode, API will call the callback function `fp` in this interval, system performance will be impacted if this interval is too short. Under synchronization mode, if the duration between two call to `taos_consume` is less than this interval, the second call blocks until the duration exceed this interval. -- `TAOS_ROW taos_consume(TAOS_SUB *tsub)` - The API used to get the new data from a TDengine server. It should be put in an infinite loop. The parameter _tsub_ is the handle returned by _taos_subscribe_. If new data are updated, the API will return a row of the result. Otherwise, the API is blocked until new data arrives. If _NULL_ pointer is returned, it means an error occurs. +* `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` + Prototype of the callback function, the parameters are: + * tsub: The subscription object. + * res: The query result. + * param: User provided additional parameter when calling `taos_subscribe`. + * code: Error code in case of failures. -- `void taos_unsubscribe(TAOS_SUB *tsub)` - Stop a subscription session by the handle returned by _taos_subscribe_. +* `TAOS_RES *taos_consume(TAOS_SUB *tsub)` + The API used to get the new data from a TDengine server. It should be put in an loop. The parameter `tsub` is the handle returned by `taos_subscribe`. This API should only be called in synchronization mode. If the duration between two call to `taos_consume` is less than pulling interval, the second call blocks until the duration exceed the interval. The API returns the new rows if new data arrives, or empty rowset otherwise, and if there's an error, it returns `NULL`. + +* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)` -- `int taos_num_subfields(TAOS_SUB *tsub)` - The API used to get the number of fields in a row. - - -- `TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub)` - The API used to get the description of each column. + Stop a subscription session by the handle returned by `taos_subscribe`. If `keepProgress` is **not** zero, the subscription progress information is kept and can be reused in later call to `taos_subscribe`, the information is removed otherwise. ## Java Connector @@ -208,7 +216,7 @@ Since the native language of TDengine is C, the necessary TDengine library shoul * taos.dll (Windows) After TDengine client is installed, the library `taos.dll` will be automatically copied to the `C:/Windows/System32`, which is the system's default search path. -> Note: Please make sure that TDengine Windows client has been installed if developing on Windows. +> Note: Please make sure that [TDengine Windows client][14] has been installed if developing on Windows. Now although TDengine client would be defaultly installed together with TDengine server, it can also be installed [alone][15]. Since TDengine is time-series database, there are still some differences compared with traditional databases in using TDengine JDBC driver: * TDengine doesn't allow to delete/modify a single record, and thus JDBC driver also has no such method. @@ -583,13 +591,35 @@ data = c1.fetchall() numOfRows = c1.rowcount numOfCols = len(c1.description) for irow in range(numOfRows): - print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]) + print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])) # use the cursor as an iterator to retrieve all returned results c1.execute('select * from tb') for data in c1: print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]) ``` + +* create a subscription +```python +# Create a subscription with topic 'test' and consumption interval 1000ms. +# The first argument is True means to restart the subscription; +# if the subscription with topic 'test' has already been created, then pass +# False to this argument means to continue the existing subscription. +sub = conn.subscribe(True, "test", "select * from meters;", 1000) +``` + +* consume a subscription +```python +data = sub.consume() +for d in data: + print(d) +``` + +* close the subscription +```python +sub.close() +``` + * close the connection ```python c1.close() @@ -882,4 +912,5 @@ An example of using the NodeJS connector to achieve the same things but without [11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate [12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo [13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE - +[14]: https://www.taosdata.com/cn/documentation/connector/#Windows%E5%AE%A2%E6%88%B7%E7%AB%AF%E5%8F%8A%E7%A8%8B%E5%BA%8F%E6%8E%A5%E5%8F%A3 +[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B diff --git a/documentation/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation/webdocs/markdowndocs/TAOS SQL-ch.md index e9f47bd03e..1e9383c40c 100644 --- a/documentation/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation/webdocs/markdowndocs/TAOS SQL-ch.md @@ -423,7 +423,7 @@ count(tbname) | ###聚合函数 -TDengine支持针对数据的聚合查询。提供支持的聚合和提取函数如下表: +TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数如下: - **COUNT** ```mysql @@ -446,13 +446,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和提取函数 适用于:表、超级表。 -- **WAVG** +- **TWA** ```mysql - SELECT WAVG(field_name) FROM tb_name WHERE clause + SELECT TWA(field_name) FROM tb_name WHERE clause ``` - 功能说明:统计表/超级表中某列在一段时间内的时间加权平均。 + 功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。 返回结果数据类型:双精度浮点数Double。 - 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 说明:时间加权平均(time weighted average, TWA)查询需要指定查询时间段的 _开始时间_ 和 _结束时间_ 。 适用于:表、超级表。 @@ -556,7 +557,15 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和提取函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 说明:*k*值取值范围0≤*k*≤100,为0的时候等同于MIN,为100的时候等同于MAX。 - +- **APERCENTILE** + ```mysql + SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause] + ``` + 功能说明:统计表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。 + 返回结果数据类型: 双精度浮点数Double。 + 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 说明:*k*值取值范围0≤*k*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数 + - **LAST_ROW** ```mysql SELECT LAST_ROW(field_name) FROM { tb_name | stb_name } diff --git a/documentation/webdocs/markdowndocs/administrator-ch.md b/documentation/webdocs/markdowndocs/administrator-ch.md index 2a250a916c..35beb610f2 100644 --- a/documentation/webdocs/markdowndocs/administrator-ch.md +++ b/documentation/webdocs/markdowndocs/administrator-ch.md @@ -337,6 +337,8 @@ TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。 insert into tb1 file a.csv b.csv tb2 c.csv … import into tb1 file a.csv b.csv tb2 c.csv … ``` +> 注意:导入的CSV文件不能够带表头, 且表的列与CSV文件的列需要严格对应。 +> 同样还可以使用[样例数据导入工具][1]对数据进行横向和纵向扩展导入。 ## 数据导出 @@ -407,3 +409,6 @@ KILL STREAM TDengine启动后,会自动创建一个监测数据库`LOG`,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在`LOG`库里。系统管理员可以通过客户端程序查看记录库中的运行负载信息,(在企业版中)还可以通过浏览器查看数据的图标可视化结果。 这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项`monitor`将其关闭或打开。 + + +[1]: https://github.com/taosdata/TDengine/tree/develop/importSampleData \ No newline at end of file diff --git a/documentation/webdocs/markdowndocs/advanced features-ch.md b/documentation/webdocs/markdowndocs/advanced features-ch.md index 14a2801209..4d01eaf364 100644 --- a/documentation/webdocs/markdowndocs/advanced features-ch.md +++ b/documentation/webdocs/markdowndocs/advanced features-ch.md @@ -63,28 +63,11 @@ CREATE TABLE QUERY_RES ## 数据订阅(Publisher/Subscriber) 基于数据天然的时间序列特性,TDengine的数据写入(insert)与消息系统的数据发布(pub)逻辑上一致,均可视为系统中插入一条带时间戳的新记录。同时,TDengine在内部严格按照数据时间序列单调递增的方式保存数据。本质上来说,TDengine中里每一张表均可视为一个标准的消息队列。 -TDengine内嵌支持轻量级的消息订阅与推送服务。使用系统提供的API,用户可订阅数据库中的某一张表(或超级表)。订阅的逻辑和操作状态的维护均是由客户端完成,客户端定时轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户。 +TDengine内嵌支持轻量级的消息订阅与推送服务。使用系统提供的API,用户可使用普通查询语句订阅数据库中的一张或多张表。订阅的逻辑和操作状态的维护均是由客户端完成,客户端定时轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户。 TDengine的订阅与推送服务的状态是客户端维持,TDengine服务器并不维持。因此如果应用重启,从哪个时间点开始获取最新数据,由应用决定。 -#### API说明 - -使用订阅的功能,主要API如下: - - -示例代码:请看安装包中的的示范程序 +订阅相关API请见 [连接器](https://www.taosdata.com/cn/documentation/connector/)。 ## 缓存 (Cache) TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Use,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心最近产生的数据,即当前状态。TDengine充分利用了这一特性,将最近到达的(当前状态)数据保存在缓存中。 diff --git a/documentation/webdocs/markdowndocs/connector-ch.md b/documentation/webdocs/markdowndocs/connector-ch.md index 23bc6a9f6c..b5d8fb5afb 100644 --- a/documentation/webdocs/markdowndocs/connector-ch.md +++ b/documentation/webdocs/markdowndocs/connector-ch.md @@ -164,27 +164,36 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 ### C/C++ 数据订阅接口 -订阅API目前支持订阅一张表,并通过定期轮询的方式不断获取写入表中的最新数据。 +订阅API目前支持订阅一张或多张表,并通过定期轮询的方式不断获取写入表中的最新数据。 -- `TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds)` +* `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)` - 该API用来启动订阅,需要提供的参数包含:TDengine管理主节点的IP地址、用户名、密码、数据库、数据库表的名字;time是开始订阅消息的时间,是从1970年1月1日起计算的毫秒数,为长整型, 如果设为0,表示从当前时间开始订阅;mseconds为查询数据库更新的时间间隔,单位为毫秒,建议设为1000毫秒。返回值为一指向TDengine_SUB结构的指针,如果返回为空,表示失败。 + 该函数负责启动订阅服务,成功时返回订阅对象,失败时返回 `NULL`,其参数为: + * taos:已经建立好的数据库连接 + * restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 + * topic:订阅的主题(即名称),此参数是订阅的唯一标识 + * sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 + * fp:收到查询结果时的回调函数(稍后介绍函数原型),只在异步调用时使用,同步调用时此参数应该传 `NULL` + * param:调用回调函数时的附加参数,系统API将其原样传递到回调函数,不进行任何处理 + * interval:轮询周期,单位为毫秒。异步调用时,将根据此参数周期性的调用回调函数,为避免对系统性能造成影响,不建议将此参数设置的过小;同步调用时,如两次调用`taos_consume`的间隔小于此周期,API将会阻塞,直到时间间隔超过此周期。 -- `TAOS_ROW taos_consume(TAOS_SUB *tsub)` +* `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)` - 该API用来获取最新消息,应用程序一般会将其置于一个无限循环语句中。其中参数tsub是taos_subscribe的返回值。如果数据库有新的记录,该API将返回,返回参数是一行记录。如果没有新的记录,该API将阻塞。如果返回值为空,说明系统出错,需要检查系统是否还在正常运行。 + 异步模式下,回调函数的原型,其参数为: + * tsub:订阅对象 + * res:查询结果集,注意结果集中可能没有记录 + * param:调用 `taos_subscribe`时客户程序提供的附加参数 + * code:错误码 -- `void taos_unsubscribe(TAOS_SUB *tsub)` - 该API用于取消订阅,参数tsub是taos_subscribe的返回值。应用程序退出时,需要调用该API,否则有资源泄露。 +* `TAOS_RES *taos_consume(TAOS_SUB *tsub)` -- `int taos_num_subfields(TAOS_SUB *tsub)` + 同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用`taos_consume`的间隔小于订阅的轮询周期,API将会阻塞,直到时间间隔超过此周期。 如果数据库有新记录到达,该API将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此API。 - 该API用来获取返回的一排数据中数据的列数 +* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)` -- `TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub)` + 取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。 - 该API用来获取每列数据的属性(数据类型、名字、字节数),与taos_num_subfields配合使用,可用来解析返回的一排数据。 ## Java Connector @@ -198,38 +207,38 @@ TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API * taos.dll 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 -> 注意:在 windows 环境开发时需要安装 TDengine 对应的 windows 版本客户端,由于目前没有提供 Linux 环境单独的客户端,需要安装 TDengine 才能使用。 +> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。 TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点: * TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。 * 由于不支持删除和修改,所以也不支持事务操作。 * 目前不支持表间的 union 操作。 -* 目前不支持嵌套查询(nested query),`对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet`。 +* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet。 ## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 -| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | -| --- | --- | --- | +| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | +| --- | --- | --- | | 1.0.3 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | ## TDengine DataType 和 Java DataType TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: -| TDengine DataType | Java DataType | -| --- | --- | -| TIMESTAMP | java.sql.Timestamp | -| INT | java.lang.Integer | -| BIGINT | java.lang.Long | -| FLOAT | java.lang.Float | -| DOUBLE | java.lang.Double | +| TDengine DataType | Java DataType | +| --- | --- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | | SMALLINT, TINYINT |java.lang.Short | -| BOOL | java.lang.Boolean | -| BINARY, NCHAR | java.lang.String | +| BOOL | java.lang.Boolean | +| BINARY, NCHAR | java.lang.String | ## 如何获取 TAOS-JDBCDriver @@ -579,13 +588,34 @@ data = c1.fetchall() numOfRows = c1.rowcount numOfCols = len(c1.description) for irow in range(numOfRows): - print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]) + print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])) # 直接使用cursor 循环拉取查询结果 c1.execute('select * from tb') for data in c1: print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]) ``` + +* 创建订阅 +```python +# 创建一个主题为 'test' 消费周期为1000毫秒的订阅 +# 第一个参数为 True 表示重新开始订阅,如为 False 且之前创建过主题为 'test' 的订阅,则表示继续消费此订阅的数据,而不是重新开始消费所有数据 +sub = conn.subscribe(True, "test", "select * from meters;", 1000) +``` + +* 消费订阅的数据 +```python +data = sub.consume() +for d in data: + print(d) +``` + +* 取消订阅 +```python +sub.close() +``` + + * 关闭连接 ```python c1.close() @@ -807,6 +837,8 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间 ## Go Connector +### linux环境 + #### 安装TDengine Go的连接器使用到了 libtaos.so 和taos.h,因此,在使用Go连接器之前,需要在程序运行的机器上安装TDengine以获得相关的驱动文件。 @@ -867,7 +899,14 @@ taosSql驱动包内采用cgo模式,调用了TDengine的C/C++同步接口,与 3. 创建表、写入和查询数据 -在创建好了数据库后,就可以开始创建表和写入查询数据了。这些操作的基本思路都是首先组装SQL语句,然后调用db.Exec执行,并检查错误信息和执行相应的处理。可以参考上面的样例代码 +在创建好了数据库后,就可以开始创建表和写入查询数据了。这些操作的基本思路都是首先组装SQL语句,然后调用db.Exec执行,并检查错误信息和执行相应的处理。可以参考上面的样例代码。 + +### windows环境 + +在windows上使用Go,请参考  +[TDengine GO windows驱动的编译和使用](https://www.taosdata.com/blog/2020/01/06/tdengine-go-windows%E9%A9%B1%E5%8A%A8%E7%9A%84%E7%BC%96%E8%AF%91/) + + ## Node.js Connector @@ -1054,6 +1093,8 @@ https://gitee.com/maikebing/Maikebing.EntityFrameworkCore.Taos ├───├── jdbc ├───└── python ├── driver +├───├── libtaos.dll +├───├── libtaos.dll.a ├───├── taos.dll ├───├── taos.exp ├───└── taos.lib @@ -1078,8 +1119,8 @@ https://gitee.com/maikebing/Maikebing.EntityFrameworkCore.Taos + Client可执行文件: C:/TDengine/taos.exe + 配置文件: C:/TDengine/cfg/taos.cfg -+ C驱动程序目录: C:/TDengine/driver -+ C驱动程序头文件: C:/TDengine/include ++ 驱动程序目录: C:/TDengine/driver ++ 驱动程序头文件: C:/TDengine/include + JDBC驱动程序目录: C:/TDengine/connector/jdbc + GO驱动程序目录:C:/TDengine/connector/go + Python驱动程序目录:C:/TDengine/connector/python @@ -1106,6 +1147,14 @@ taos -h TDengine在Window系统上提供的API与Linux系统是相同的, 应用程序使用时,需要包含TDengine头文件taos.h,连接时需要链接TDengine库taos.lib,运行时将taos.dll放到可执行文件目录下。 +#### Go接口注意事项 + +TDengine在Window系统上提供的API与Linux系统是相同的, 应用程序使用时,除了需要Go的驱动包(C:\TDengine\connector\go)外,还需要包含TDengine头文件taos.h,连接时需要链接TDengine库libtaos.dll、libtaos.dll.a(C:\TDengine\driver),运行时将libtaos.dll、libtaos.dll.a放到可执行文件目录下。 + +使用参考请见: + +[TDengine GO windows驱动的编译和使用](https://www.taosdata.com/blog/2020/01/06/tdengine-go-windows%E9%A9%B1%E5%8A%A8%E7%9A%84%E7%BC%96%E8%AF%91/) + #### JDBC接口注意事项 在Windows系统上,应用程序可以使用JDBC接口来操纵数据库,使用JDBC接口的注意事项如下: @@ -1121,6 +1170,49 @@ TDengine在Window系统上提供的API与Linux系统是相同的, 应用程序 + 将Windows开发包(taos.dll)放置到system32目录下。 +## Mac客户端及程序接口 + +### 客户端安装 + +在Mac操作系统下,TDengine提供64位的Mac客户端([2月10日起提供下载](https://www.taosdata.com/cn/all-downloads/#tdengine_mac-list)),客户端安装程序为.tar.gz文件,解压并运行其中的install_client.sh后即可完成安装,安装路径为/usr/loca/taos。客户端目录结构如下: + +``` +├── cfg +├───└── taos.cfg +├── connector +├───├── go +├───├── grafana +├───├── jdbc +├───└── python +├── driver +├───├── libtaos.1.6.5.1.dylib +├── examples +├───├── bash +├───├── c +├───├── C# +├───├── go +├───├── JDBC +├───├── lua +├───├── matlab +├───├── nodejs +├───├── python +├───├── R +├───└── rust +├── include +├───└── taos.h +└── bin +├───└── taos +``` + +其中,最常用的文件列出如下: + ++ Client可执行文件: /usr/local/taos/bin/taos 软连接到 /usr/local/bin/taos ++ 配置文件: /usr/local/taos/cfg/taos.cfg 软连接到 /etc/taos/taos.cfg ++ 驱动程序目录: /usr/local/taos/driver/libtaos.1.6.5.1.dylib 软连接到 /usr/local/lib/libtaos.dylib ++ 驱动程序头文件: /usr/local/taos/include/taos.h 软连接到 /usr/local/include/taos.h ++ 日志目录(第一次运行程序时生成):~/TDengineLog + + [1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver [2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver @@ -1135,3 +1227,5 @@ TDengine在Window系统上提供的API与Linux系统是相同的, 应用程序 [11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate [12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo [13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE +[14]: https://www.taosdata.com/cn/documentation/connector/#Windows%E5%AE%A2%E6%88%B7%E7%AB%AF%E5%8F%8A%E7%A8%8B%E5%BA%8F%E6%8E%A5%E5%8F%A3 +[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 8c55269bb9..92734cb980 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -2,7 +2,7 @@ # # Generate deb package for ubuntu set -e -#set -x +# set -x #curr_dir=$(pwd) compile_dir=$1 diff --git a/packaging/release.sh b/packaging/release.sh index bdb18dde4c..728060db29 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -3,7 +3,7 @@ # Generate the deb package for ubunt, or rpm package for centos, or tar.gz package for other linux os set -e -# set -x +#set -x # releash.sh -v [cluster | lite] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta] @@ -46,8 +46,17 @@ done echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType}" curr_dir=$(pwd) -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/..)" + +if [ "$osType" != "Darwin" ]; then + script_dir="$(dirname $(readlink -f $0))" + top_dir="$(readlink -f ${script_dir}/..)" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + top_dir=${script_dir}/.. +fi + versioninfo="${top_dir}/src/util/src/version.c" csudo="" @@ -147,7 +156,14 @@ build_time=$(date +"%F %R") echo "char version[64] = \"${version}\";" > ${versioninfo} echo "char compatible_version[64] = \"${compatible_version}\";" >> ${versioninfo} echo "char gitinfo[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo} -echo "char gitinfoOfInternal[128] = \"\";" >> ${versioninfo} +if [ "$verMode" != "cluster" ]; then + echo "char gitinfoOfInternal[128] = \"\";" >> ${versioninfo} +else + enterprise_dir="${top_dir}/../enterprise" + cd ${enterprise_dir} + echo "char gitinfoOfInternal[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo} + cd ${curr_dir} +fi echo "char buildinfo[512] = \"Built by ${USER} at ${build_time}\";" >> ${versioninfo} echo "" >> ${versioninfo} tmp_version=$(echo $version | tr -s "." "_") @@ -167,15 +183,23 @@ if [ -d ${compile_dir} ]; then ${csudo} rm -rf ${compile_dir} fi -${csudo} mkdir -p ${compile_dir} +if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${compile_dir} +else + mkdir -p ${compile_dir} +fi cd ${compile_dir} # check support cpu type if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then - cmake ../ -DCPUTYPE=${cpuType} + if [ "$verMode" != "cluster" ]; then + cmake ../ -DCPUTYPE=${cpuType} + else + cmake ../../ -DCPUTYPE=${cpuType} + fi else - echo "input cpuType=${cpuType} error!!!" - exit 1 + echo "input cpuType=${cpuType} error!!!" + exit 1 fi make @@ -187,28 +211,36 @@ cd ${curr_dir} #osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) #echo "osinfo: ${osinfo}" -echo "====do deb package for the ubuntu system====" -output_dir="${top_dir}/debs" -if [ -d ${output_dir} ]; then - ${csudo} rm -rf ${output_dir} -fi -${csudo} mkdir -p ${output_dir} -cd ${script_dir}/deb -${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType} - -echo "====do rpm package for the centos system====" -output_dir="${top_dir}/rpms" -if [ -d ${output_dir} ]; then - ${csudo} rm -rf ${output_dir} -fi -${csudo} mkdir -p ${output_dir} -cd ${script_dir}/rpm -${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType} +if [ "$osType" != "Darwin" ]; then + if [ "$verMode" != "cluster" ]; then + echo "====do deb package for the ubuntu system====" + output_dir="${top_dir}/debs" + if [ -d ${output_dir} ]; then + ${csudo} rm -rf ${output_dir} + fi + ${csudo} mkdir -p ${output_dir} + cd ${script_dir}/deb + ${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType} -echo "====do tar.gz package for all systems====" -cd ${script_dir}/tools -${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} -${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} + echo "====do rpm package for the centos system====" + output_dir="${top_dir}/rpms" + if [ -d ${output_dir} ]; then + ${csudo} rm -rf ${output_dir} + fi + ${csudo} mkdir -p ${output_dir} + cd ${script_dir}/rpm + ${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType} + fi + + echo "====do tar.gz package for all systems====" + cd ${script_dir}/tools + + ${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} + ${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} +else + cd ${script_dir}/tools + ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} +fi # 4. Clean up temporary compile directories #${csudo} rm -rf ${compile_dir} diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh index e76e160843..20b7d5f755 100755 --- a/packaging/rpm/makerpm.sh +++ b/packaging/rpm/makerpm.sh @@ -2,8 +2,8 @@ # # Generate rpm package for centos -#set -e -#set -x +set -e +# set -x #curr_dir=$(pwd) compile_dir=$1 diff --git a/packaging/rpm/taosd b/packaging/rpm/taosd index 6283c79383..46dd712e31 100644 --- a/packaging/rpm/taosd +++ b/packaging/rpm/taosd @@ -26,7 +26,7 @@ MAX_OPEN_FILES=65535 # Default program options NAME=taosd -PROG=/usr/local/bin/taos/taosd +PROG=/usr/local/taos/bin/taosd USER=root GROUP=root diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh old mode 100755 new mode 100644 index ea3e16d345..beca20e68d --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -6,6 +6,8 @@ set -e #set -x +verMode=lite + # -----------------------Variables definition--------------------- script_dir=$(dirname $(readlink -f "$0")) # Dynamic directory @@ -27,7 +29,12 @@ install_main_dir="/usr/local/taos" # old bin dir bin_dir="/usr/local/taos/bin" +# v1.5 jar dir +v15_java_app_dir="/usr/local/lib/taos" + service_config_dir="/etc/systemd/system" +nginx_port=6060 +nginx_dir="/usr/local/nginxd" # Color setting RED='\033[0;31m' @@ -41,6 +48,8 @@ if command -v sudo > /dev/null; then csudo="sudo" fi +update_flag=0 + initd_mod=0 service_mod=2 if pidof systemd &> /dev/null; then @@ -69,23 +78,24 @@ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) #echo "osinfo: ${osinfo}" os_type=0 if echo $osinfo | grep -qwi "ubuntu" ; then - echo "this is ubuntu system" + echo "This is ubuntu system" os_type=1 elif echo $osinfo | grep -qwi "debian" ; then - echo "this is debian system" + echo "This is debian system" os_type=1 elif echo $osinfo | grep -qwi "Kylin" ; then - echo "this is Kylin system" + echo "This is Kylin system" os_type=1 elif echo $osinfo | grep -qwi "centos" ; then - echo "this is centos system" + echo "This is centos system" os_type=2 elif echo $osinfo | grep -qwi "fedora" ; then - echo "this is fedora system" + echo "This is fedora system" os_type=2 else - echo "this is other linux system" - os_type=0 + echo "${osinfo}: This is an officially unverified linux system, If there are any problems with the installation and operation, " + echo "please feel free to contact taosdata.com for support." + os_type=1 fi function kill_taosd() { @@ -106,6 +116,9 @@ function install_main_path() { ${csudo} mkdir -p ${install_main_dir}/examples ${csudo} mkdir -p ${install_main_dir}/include ${csudo} mkdir -p ${install_main_dir}/init.d + if [ "$verMode" == "cluster" ]; then + ${csudo} mkdir -p ${nginx_dir} + fi } function install_bin() { @@ -124,16 +137,30 @@ function install_bin() { [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : + + if [ "$verMode" == "cluster" ]; then + ${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/* + ${csudo} mkdir -p ${nginx_dir}/logs + ${csudo} chmod 777 ${nginx_dir}/sbin/nginx + fi } function install_lib() { # Remove links ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -rf ${v15_java_app_dir} || : ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + + if [ "$verMode" == "cluster" ]; then + # Compatible with version 1.5 + ${csudo} mkdir -p ${v15_java_app_dir} + ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar + ${csudo} chmod 777 ${v15_java_app_dir} || : + fi } function install_header() { @@ -154,6 +181,57 @@ function install_config() { ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + + if [ "$verMode" == "cluster" ]; then + [ ! -z $1 ] && return 0 || : # only install client + + if ((${update_flag}==1)); then + return 0 + fi + + IP_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" + IP_PATTERN="\b$IP_FORMAT\.$IP_FORMAT\.$IP_FORMAT\.$IP_FORMAT\b" + + echo + echo -e -n "${GREEN}Enter the IP address of an existing TDengine cluster node to join${NC} OR ${GREEN}leave it blank to build one${NC} :" + read masterIp + while true; do + if [ ! -z "$masterIp" ]; then + # check the format of the masterIp + if [[ $masterIp =~ $IP_PATTERN ]]; then + # Write the first IP to configuration file + sudo sed -i -r "s/#*\s*(masterIp\s*).*/\1$masterIp/" ${cfg_dir}/taos.cfg + + # Get the second IP address + + echo + echo -e -n "${GREEN}Enter the IP address of another node in cluster${NC} OR ${GREEN}leave it blank to skip${NC}: " + read secondIp + while true; do + + if [ ! -z "$secondIp" ]; then + if [[ $secondIp =~ $IP_PATTERN ]]; then + # Write the second IP to configuration file + sudo sed -i -r "s/#*\s*(secondIp\s*).*/\1$secondIp/" ${cfg_dir}/taos.cfg + break + else + read -p "Please enter the correct IP address: " secondIp + fi + else + break + fi + done + + break + else + read -p "Please enter the correct IP address: " masterIp + fi + else + break + fi + done + + fi } @@ -175,7 +253,9 @@ function install_connector() { } function install_examples() { - ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + if [ -d ${script_dir}/examples ]; then + ${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples + fi } function clean_service_on_sysvinit() { @@ -240,7 +320,19 @@ function clean_service_on_systemd() { ${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} -} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + + if systemctl is-active --quiet nginxd; then + echo "Nginx for TDengine is running, stopping it..." + ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${nginx_service_config} + fi +} # taos:2345:respawn:/etc/init.d/taosd start @@ -269,6 +361,36 @@ function install_service_on_systemd() { ${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" ${csudo} systemctl enable taosd + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/nginxd.service" + ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}" + ${csudo} bash -c "echo >> ${nginx_service_config}" + ${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}" + ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}" + if ! ${csudo} systemctl enable nginxd &> /dev/null; then + ${csudo} systemctl daemon-reexec + ${csudo} systemctl enable nginxd + fi + ${csudo} systemctl start nginxd + fi } function install_service() { @@ -363,6 +485,21 @@ function update_TDengine() { install_bin install_service install_config + + if [ "$verMode" == "cluster" ]; then + # Check if openresty is installed + openresty_work=false + + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for TDengine is updated successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m" + fi + fi + fi echo echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" @@ -376,7 +513,15 @@ function update_TDengine() { echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" fi - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + if [ "$verMode" == "cluster" ]; then + if [ ${openresty_work} = 'true' ]; then + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + fi + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + fi echo echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" else @@ -416,6 +561,20 @@ function install_TDengine() { # For installing new install_bin install_service + + if [ "$verMode" == "cluster" ]; then + openresty_work=false + # Check if nginx is installed successfully + if type curl &> /dev/null; then + if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then + echo -e "\033[44;32;1mNginx for TDengine is installed successfully!${NC}" + openresty_work=true + else + echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m" + fi + fi + fi + install_config # Ask if to start the service @@ -430,8 +589,17 @@ function install_TDengine() { else echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}" fi - - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + + if [ "$verMode" == "cluster" ]; then + if [ ${openresty_work} = 'true' ]; then + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + fi + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + fi + echo echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" else # Only install client @@ -450,6 +618,7 @@ function install_TDengine() { if [ -z $1 ]; then # Install server and client if [ -x ${bin_dir}/taosd ]; then + update_flag=1 update_TDengine else install_TDengine @@ -457,6 +626,7 @@ if [ -z $1 ]; then else # Only install client if [ -x ${bin_dir}/taos ]; then + update_flag=1 update_TDengine client else install_TDengine client diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh index 78d7b75045..c5ecf5e5b9 100755 --- a/packaging/tools/install_client.sh +++ b/packaging/tools/install_client.sh @@ -7,18 +7,35 @@ set -e #set -x # -----------------------Variables definition--------------------- -script_dir=$(dirname $(readlink -f "$0")) -# Dynamic directory -data_dir="/var/lib/taos" -log_dir="/var/log/taos" + +osType=Linux + +if [ "$osType" != "Darwin" ]; then + script_dir=$(dirname $(readlink -f "$0")) + # Dynamic directory + data_dir="/var/lib/taos" + log_dir="/var/log/taos" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + data_dir="/var/lib/taos" + log_dir="~/TDengineLog" +fi log_link_dir="/usr/local/taos/log" cfg_install_dir="/etc/taos" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -inc_link_dir="/usr/include" +if [ "$osType" != "Darwin" ]; then + bin_link_dir="/usr/bin" + lib_link_dir="/usr/lib" + inc_link_dir="/usr/include" +else + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" +fi #install main path install_main_dir="/usr/local/taos" @@ -26,6 +43,8 @@ install_main_dir="/usr/local/taos" # old bin dir bin_dir="/usr/local/taos/bin" +# v1.5 jar dir +v15_java_app_dir="/usr/local/lib/taos" # Color setting RED='\033[0;31m' @@ -51,9 +70,9 @@ function kill_client() { function install_main_path() { #create install main dir and all sub dir ${csudo} rm -rf ${install_main_dir} || : - ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir} ${csudo} mkdir -p ${install_main_dir}/cfg - ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/bin ${csudo} mkdir -p ${install_main_dir}/connector ${csudo} mkdir -p ${install_main_dir}/driver ${csudo} mkdir -p ${install_main_dir}/examples @@ -61,51 +80,60 @@ function install_main_path() { } function install_bin() { - # Remove links - ${csudo} rm -f ${bin_link_dir}/taos || : - ${csudo} rm -f ${bin_link_dir}/taosdump || : - ${csudo} rm -f ${bin_link_dir}/rmtaos || : + # Remove links + ${csudo} rm -f ${bin_link_dir}/taos || : + if [ "$osType" == "Darwin" ]; then + ${csudo} rm -f ${bin_link_dir}/taosdump || : + fi + ${csudo} rm -f ${bin_link_dir}/rmtaos || : - ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* + ${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/* #Make link - [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : - [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : - [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : + [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : + if [ "$osType" == "Darwin" ]; then + [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + fi + [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : } function clean_lib() { - sudo rm -f /usr/lib/libtaos.so || : + sudo rm -f /usr/lib/libtaos.* || : sudo rm -rf ${lib_dir} || : } function install_lib() { # Remove links ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -rf ${v15_java_app_dir} || : - ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* - - ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + + if [ "$osType" != "Darwin" ]; then + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + else + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib + ${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + fi } function install_header() { ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function install_config() { #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* - fi - + fi + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg } @@ -113,8 +141,12 @@ function install_config() { function install_log() { ${csudo} rm -rf ${log_dir} || : - ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} - + + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + else + mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + fi ${csudo} ln -s ${log_dir} ${install_main_dir}/log } @@ -142,7 +174,7 @@ function update_TDengine() { kill_client sleep 1 fi - + install_main_path install_log @@ -152,7 +184,7 @@ function update_TDengine() { install_examples install_bin install_config - + echo echo -e "\033[44;32;1mTDengine client is updated successfully!${NC}" @@ -168,16 +200,16 @@ function install_TDengine() { tar -zxf taos.tar.gz echo -e "${GREEN}Start to install TDengine client...${NC}" - - install_main_path - install_log + + install_main_path + install_log install_header install_lib install_connector install_examples install_bin install_config - + echo echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}" @@ -191,8 +223,8 @@ function install_TDengine() { if [ -e ${bin_dir}/taosd ]; then echo -e "\033[44;32;1mThere are already installed TDengine server, so don't need install client!${NC}" exit 0 - fi - + fi + if [ -x ${bin_dir}/taos ]; then update_flag=1 update_TDengine diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 25ab397775..2200c7f13d 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -9,19 +9,37 @@ set -e # -----------------------Variables definition--------------------- source_dir=$1 binary_dir=$2 -script_dir=$(dirname $(readlink -f "$0")) +osType=$3 + +if [ "$osType" != "Darwin" ]; then + script_dir=$(dirname $(readlink -f "$0")) +else + script_dir=${source_dir}/packaging/tools +fi + # Dynamic directory data_dir="/var/lib/taos" -log_dir="/var/log/taos" + +if [ "$osType" != "Darwin" ]; then + log_dir="/var/log/taos" +else + log_dir="~/TDengineLog" +fi data_link_dir="/usr/local/taos/data" log_link_dir="/usr/local/taos/log" cfg_install_dir="/etc/taos" -bin_link_dir="/usr/bin" -lib_link_dir="/usr/lib" -inc_link_dir="/usr/include" +if [ "$osType" != "Darwin" ]; then + bin_link_dir="/usr/bin" + lib_link_dir="/usr/lib" + inc_link_dir="/usr/include" +else + bin_link_dir="/usr/local/bin" + lib_link_dir="/usr/local/lib" + inc_link_dir="/usr/local/include" +fi #install main path install_main_dir="/usr/local/taos" @@ -43,58 +61,61 @@ if command -v sudo > /dev/null; then csudo="sudo" fi -initd_mod=0 -service_mod=2 -if pidof systemd &> /dev/null; then - service_mod=0 -elif $(which service &> /dev/null); then - service_mod=1 - service_config_dir="/etc/init.d" - if $(which chkconfig &> /dev/null); then - initd_mod=1 - elif $(which insserv &> /dev/null); then - initd_mod=2 - elif $(which update-rc.d &> /dev/null); then - initd_mod=3 +if [ "$osType" != "Darwin" ]; then + + initd_mod=0 + service_mod=2 + if pidof systemd &> /dev/null; then + service_mod=0 + elif $(which service &> /dev/null); then + service_mod=1 + service_config_dir="/etc/init.d" + if $(which chkconfig &> /dev/null); then + initd_mod=1 + elif $(which insserv &> /dev/null); then + initd_mod=2 + elif $(which update-rc.d &> /dev/null); then + initd_mod=3 + else + service_mod=2 + fi else service_mod=2 fi -else - service_mod=2 -fi - -# get the operating system type for using the corresponding init file -# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification -#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) -osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) -#echo "osinfo: ${osinfo}" -os_type=0 -if echo $osinfo | grep -qwi "ubuntu" ; then - echo "this is ubuntu system" - os_type=1 -elif echo $osinfo | grep -qwi "debian" ; then - echo "this is debian system" - os_type=1 -elif echo $osinfo | grep -qwi "Kylin" ; then - echo "this is Kylin system" - os_type=1 -elif echo $osinfo | grep -qwi "centos" ; then - echo "this is centos system" - os_type=2 -elif echo $osinfo | grep -qwi "fedora" ; then - echo "this is fedora system" - os_type=2 -else - echo "this is other linux system" - os_type=0 + # get the operating system type for using the corresponding init file + # ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification + #osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) + osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2) + #echo "osinfo: ${osinfo}" + os_type=0 + if echo $osinfo | grep -qwi "ubuntu" ; then + echo "this is ubuntu system" + os_type=1 + elif echo $osinfo | grep -qwi "debian" ; then + echo "this is debian system" + os_type=1 + elif echo $osinfo | grep -qwi "Kylin" ; then + echo "this is Kylin system" + os_type=1 + elif echo $osinfo | grep -qwi "centos" ; then + echo "this is centos system" + os_type=2 + elif echo $osinfo | grep -qwi "fedora" ; then + echo "this is fedora system" + os_type=2 + else + echo "${osinfo}: This is an officially unverified linux system, If there are any problems with the installation and operation, " + echo "please feel free to contact taosdata.com for support." + os_type=1 + fi fi function kill_taosd() { - pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') - if [ -n "$pid" ]; then - ${csudo} kill -9 $pid || : - fi + pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo} kill -9 $pid || : + fi } function install_main_path() { @@ -107,37 +128,62 @@ function install_main_path() { ${csudo} mkdir -p ${install_main_dir}/driver ${csudo} mkdir -p ${install_main_dir}/examples ${csudo} mkdir -p ${install_main_dir}/include - ${csudo} mkdir -p ${install_main_dir}/init.d + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${install_main_dir}/init.d + fi } function install_bin() { # Remove links - ${csudo} rm -f ${bin_link_dir}/taos || : - ${csudo} rm -f ${bin_link_dir}/taosd || : - ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdump || : - ${csudo} rm -f ${bin_link_dir}/rmtaos || : + ${csudo} rm -f ${bin_link_dir}/taos || : - ${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin - ${csudo} cp -r ${script_dir}/remove.sh ${install_main_dir}/bin + if [ "$osType" != "Darwin" ]; then + ${csudo} rm -f ${bin_link_dir}/taosd || : + ${csudo} rm -f ${bin_link_dir}/taosdemo || : + ${csudo} rm -f ${bin_link_dir}/taosdump || : + fi + + ${csudo} rm -f ${bin_link_dir}/rmtaos || : + + ${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin + + if [ "$osType" != "Darwin" ]; then + ${csudo} cp -r ${script_dir}/remove.sh ${install_main_dir}/bin + else + ${csudo} cp -r ${script_dir}/remove_client.sh ${install_main_dir}/bin + fi ${csudo} chmod 0555 ${install_main_dir}/bin/* #Make link [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : - [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : - [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : - [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : - [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : + + if [ "$osType" != "Darwin" ]; then + [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : + [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : + [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + fi + + if [ "$osType" != "Darwin" ]; then + [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : + else + [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : + fi } function install_lib() { # Remove links - ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -f ${lib_link_dir}/libtaos.* || : versioninfo=$(${script_dir}/get_version.sh ${source_dir}/src/util/src/version.c) - ${csudo} cp ${binary_dir}/build/lib/libtaos.so.${versioninfo} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* - ${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib_link_dir}/libtaos.so.1 - ${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + if [ "$osType" != "Darwin" ]; then + ${csudo} cp ${binary_dir}/build/lib/libtaos.so.${versioninfo} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + ${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib_link_dir}/libtaos.so.1 + ${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so + else + ${csudo} cp ${binary_dir}/build/lib/libtaos.${versioninfo}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + ${csudo} ln -sf ${install_main_dir}/driver/libtaos.${versioninfo}.dylib ${lib_link_dir}/libtaos.1.dylib + ${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib + fi } function install_header() { @@ -163,8 +209,13 @@ function install_config() { function install_log() { ${csudo} rm -rf ${log_dir} || : - ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} - + + if [ "$osType" != "Darwin" ]; then + ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} + else + mkdir -p ${log_dir} && chmod 777 ${log_dir} + fi + ${csudo} ln -s ${log_dir} ${install_main_dir}/log } @@ -291,7 +342,9 @@ function install_service() { function update_TDengine() { echo -e "${GREEN}Start to update TDEngine...${NC}" # Stop the service if running - if pidof taosd &> /dev/null; then + + if [ "$osType" != "Darwin" ]; then + if pidof taosd &> /dev/null; then if ((${service_mod}==0)); then ${csudo} systemctl stop taosd || : elif ((${service_mod}==1)); then @@ -300,6 +353,7 @@ function update_TDengine() { kill_taosd fi sleep 1 + fi fi install_main_path @@ -310,32 +364,54 @@ function update_TDengine() { install_connector install_examples install_bin - install_service + + if [ "$osType" != "Darwin" ]; then + install_service + fi + install_config - echo - echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" - else - echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" - fi + if [ "$osType" != "Darwin" ]; then + echo + echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + echo - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" - echo - echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" + else + echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" + fi + + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" + else + echo + echo -e "\033[44;32;1mTDengine Client is updated successfully!${NC}" + echo + + echo -e "${GREEN_DARK}To access TDengine Client ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine Client is updated successfully!${NC}" + fi } function install_TDengine() { # Start to install - echo -e "${GREEN}Start to install TDEngine...${NC}" - + if [ "$osType" != "Darwin" ]; then + echo -e "${GREEN}Start to install TDEngine...${NC}" + else + echo -e "${GREEN}Start to install TDEngine Client ...${NC}" + fi + install_main_path - install_data + + if [ "$osType" != "Darwin" ]; then + install_data + fi install_log install_header install_lib @@ -343,30 +419,41 @@ function install_TDengine() { install_examples install_bin - install_service - install_config - # Ask if to start the service - echo - echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" - echo - echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" - elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" - else - echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" + + if [ "$osType" != "Darwin" ]; then + install_service fi - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" - echo - echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" + install_config + + if [ "$osType" != "Darwin" ]; then + # Ask if to start the service + echo + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" + echo + echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" + if ((${service_mod}==0)); then + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" + elif ((${service_mod}==1)); then + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" + else + echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" + fi + + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo + echo -e "\033[44;32;1mTDengine Client is installed successfully!${NC}" + fi } ## ==============================Main program starts from here============================ echo source directory: $1 echo binary directory: $2 -if [ -x ${bin_dir}/taosd ]; then +if [ -x ${bin_dir}/taos ]; then update_TDengine else install_TDengine diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index d206359160..34dacbc9f3 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -13,8 +13,15 @@ osType=$5 verMode=$6 verType=$7 -script_dir="$(dirname $(readlink -f $0))" -top_dir="$(readlink -f ${script_dir}/../..)" +if [ "$osType" != "Darwin" ]; then + script_dir="$(dirname $(readlink -f $0))" + top_dir="$(readlink -f ${script_dir}/../..)" +else + script_dir=`dirname $0` + cd ${script_dir} + script_dir="$(pwd)" + top_dir=${script_dir}/../.. +fi # create compressed install file. build_dir="${compile_dir}/build" @@ -22,13 +29,26 @@ code_dir="${top_dir}/src" release_dir="${top_dir}/release" #package_name='linux' -install_dir="${release_dir}/TDengine-client" + +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/TDengine-enterprise-client" +else + install_dir="${release_dir}/TDengine-client" +fi # Directories and files. -bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh" -lib_files="${build_dir}/lib/libtaos.so.${version}" + +if [ "$osType" != "Darwin" ]; then + bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh" + lib_files="${build_dir}/lib/libtaos.so.${version}" +else + bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh" + lib_files="${build_dir}/lib/libtaos.${version}.dylib" +fi + header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" cfg_dir="${top_dir}/packaging/cfg" + install_files="${script_dir}/install_client.sh" # make directories. @@ -38,10 +58,23 @@ mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cf mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* cd ${install_dir} -tar -zcv -f taos.tar.gz * --remove-files || : + +if [ "$osType" != "Darwin" ]; then + tar -zcv -f taos.tar.gz * --remove-files || : +else + tar -zcv -f taos.tar.gz * || : + mv taos.tar.gz .. + rm -rf ./* + mv ../taos.tar.gz . +fi cd ${curr_dir} -cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install* +cp ${install_files} ${install_dir} +if [ "$osType" == "Darwin" ]; then + sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >> install_client_temp.sh + mv install_client_temp.sh ${install_dir}/install_client.sh +fi +chmod a+x ${install_dir}/install_client.sh # Copy example code mkdir -p ${install_dir}/examples @@ -60,7 +93,10 @@ cp ${lib_files} ${install_dir}/driver # Copy connector connector_dir="${code_dir}/connector" mkdir -p ${install_dir}/connector -cp ${build_dir}/lib/*.jar ${install_dir}/connector + +if [ "$osType" != "Darwin" ]; then + cp ${build_dir}/lib/*.jar ${install_dir}/connector +fi cp -r ${connector_dir}/grafana ${install_dir}/connector/ cp -r ${connector_dir}/python ${install_dir}/connector/ cp -r ${connector_dir}/go ${install_dir}/connector @@ -90,6 +126,13 @@ else exit 1 fi -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files +if [ "$osType" != "Darwin" ]; then + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : +else + tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || : + mv "$(basename ${pkg_name}).tar.gz" .. + rm -rf ./* + mv ../"$(basename ${pkg_name}).tar.gz" . +fi cd ${curr_dir} diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index a16da7f6c3..e0cd65a00e 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -23,7 +23,11 @@ code_dir="${top_dir}/src" release_dir="${top_dir}/release" #package_name='linux' -install_dir="${release_dir}/TDengine-server" +if [ "$verMode" == "cluster" ]; then + install_dir="${release_dir}/TDengine-enterprise-server" +else + install_dir="${release_dir}/TDengine-server" +fi # Directories and files. bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdump ${script_dir}/remove.sh" @@ -31,6 +35,7 @@ lib_files="${build_dir}/lib/libtaos.so.${version}" header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h" cfg_dir="${top_dir}/packaging/cfg" install_files="${script_dir}/install.sh" +nginx_dir="${code_dir}/../../enterprise/src/modules/web" # Init file #init_dir=${script_dir}/deb @@ -50,11 +55,29 @@ mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x $ mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/taosd.deb mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taosd.rpm +if [ "$verMode" == "cluster" ]; then + mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd + cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png + rm -rf ${install_dir}/nginxd/png + + if [ "$cpuType" == "aarch64" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/ + elif [ "$cpuType" == "aarch32" ]; then + cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/ + fi + rm -rf ${install_dir}/nginxd/sbin/arm +fi + cd ${install_dir} -tar -zcv -f taos.tar.gz * --remove-files || : +tar -zcv -f taos.tar.gz * --remove-files || : cd ${curr_dir} -cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install* +cp ${install_files} ${install_dir} +if [ "$verMode" == "cluster" ]; then + sed 's/verMode=lite/verMode=cluster/g' ${install_dir}/install.sh >> install_temp.sh + mv install_temp.sh ${install_dir}/install.sh +fi +chmod a+x ${install_dir}/install.sh # Copy example code mkdir -p ${install_dir}/examples @@ -103,6 +126,6 @@ else exit 1 fi -tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files +tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || : -cd ${curr_dir} +cd ${curr_dir} \ No newline at end of file diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 3c9fd6bf7f..81507e1aa9 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -2,6 +2,11 @@ # # Script to stop the service and uninstall TDengine, but retain the config, data and log files. +set -e +#set -x + +verMode=lite + RED='\033[0;31m' GREEN='\033[1;32m' NC='\033[0m' @@ -14,10 +19,14 @@ cfg_link_dir="/usr/local/taos/cfg" bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" inc_link_dir="/usr/include" +install_nginxd_dir="/usr/local/nginxd" + +# v1.5 jar dir +v15_java_app_dir="/usr/local/lib/taos" service_config_dir="/etc/systemd/system" taos_service_name="taosd" - +nginx_service_name="nginxd" csudo="" if command -v sudo > /dev/null; then csudo="sudo" @@ -62,6 +71,7 @@ function clean_bin() { function clean_lib() { # Remove link ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -rf ${v15_java_app_dir} || : } function clean_header() { @@ -90,6 +100,20 @@ function clean_service_on_systemd() { ${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} + + if [ "$verMode" == "cluster" ]; then + nginx_service_config="${service_config_dir}/${nginx_service_name}.service" + + if [ -d ${bin_dir}/web ]; then + if systemctl is-active --quiet ${nginx_service_name}; then + echo "Nginx for TDengine is running, stopping it..." + ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null + fi + ${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null + + ${csudo} rm -f ${nginx_service_config} + fi + fi } function clean_service_on_sysvinit() { @@ -143,6 +167,7 @@ clean_config ${csudo} rm -rf ${data_link_dir} || : ${csudo} rm -rf ${install_main_dir} +${csudo} rm -rf ${install_nginxd_dir} osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release) if echo $osinfo | grep -qwi "ubuntu" ; then diff --git a/packaging/tools/remove_client.sh b/packaging/tools/remove_client.sh index 206de34c1f..9210546a9f 100755 --- a/packaging/tools/remove_client.sh +++ b/packaging/tools/remove_client.sh @@ -17,6 +17,10 @@ bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" inc_link_dir="/usr/include" + +# v1.5 jar dir +v15_java_app_dir="/usr/local/lib/taos" + csudo="" if command -v sudo > /dev/null; then csudo="sudo" @@ -39,6 +43,7 @@ function clean_bin() { function clean_lib() { # Remove link ${csudo} rm -f ${lib_link_dir}/libtaos.* || : + ${csudo} rm -rf ${v15_java_app_dir} || : } function clean_header() { diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 12c01ba75a..92d6b61eb2 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -24,20 +24,10 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) #set version of .so #VERSION so version #SOVERSION api version - IF (TD_LITE) - execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh) - execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c - OUTPUT_VARIABLE - VERSION_INFO) - MESSAGE(STATUS "build lite version ${VERSION_INFO}") - ELSE () - execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh) - execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c - OUTPUT_VARIABLE - VERSION_INFO) - MESSAGE(STATUS "build cluster version ${VERSION_INFO}") - ENDIF () - + execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh) + execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c + OUTPUT_VARIABLE + VERSION_INFO) MESSAGE(STATUS "build version ${VERSION_INFO}") SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1) @@ -57,6 +47,7 @@ ELSEIF (TD_WINDOWS_64) TARGET_LINK_LIBRARIES(taos trpc) ELSEIF (TD_DARWIN_64) + SET(CMAKE_MACOSX_RPATH 1) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux) ADD_LIBRARY(taos_static STATIC ${SRC}) @@ -66,6 +57,17 @@ ELSEIF (TD_DARWIN_64) # generate dynamic library (*.dylib) ADD_LIBRARY(taos SHARED ${SRC}) TARGET_LINK_LIBRARIES(taos trpc tutil pthread m) - + + SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1) + + #set version of .so + #VERSION so version + #SOVERSION api version + execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh) + execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c + OUTPUT_VARIABLE + VERSION_INFO) + MESSAGE(STATUS "build version ${VERSION_INFO}") + SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1) ENDIF () diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 9116a7c60b..14341bbbe7 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -336,6 +336,7 @@ typedef struct { int rspType; int rspLen; uint64_t qhandle; + int64_t uid; int64_t useconds; int64_t offset; // offset value from vnode during projection query of stable int row; @@ -382,6 +383,7 @@ typedef struct _sql_obj { uint32_t queryId; void * thandle; void * pStream; + void * pSubscription; char * sqlstr; char retry; char maxRetry; @@ -465,6 +467,12 @@ void tscDestroyResPointerInfo(SSqlRes *pRes); void tscFreeSqlCmdData(SSqlCmd *pCmd); +/** + * free query result of the sql object + * @param pObj + */ +void tscFreeSqlResult(SSqlObj* pSql); + /** * only free part of resources allocated during query. * Note: this function is multi-thread safe. diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h index 958252b4de..8dbe63d75a 100644 --- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h +++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h @@ -135,7 +135,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm * Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;JI)J */ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp - (JNIEnv *, jobject, jstring, jstring, jstring, jstring, jstring, jlong, jint); + (JNIEnv *, jobject, jlong, jboolean, jstring, jstring, jint); /* * Class: com_taosdata_jdbc_TSDBJNIConnector @@ -143,7 +143,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp * Signature: (J)Lcom/taosdata/jdbc/TSDBResultSetRowData; */ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp - (JNIEnv *, jobject, jlong); + (JNIEnv *, jobject, jlong, jint); /* * Class: com_taosdata_jdbc_TSDBJNIConnector @@ -151,7 +151,7 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp * Signature: (J)V */ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp - (JNIEnv *, jobject, jlong); + (JNIEnv *, jobject, jlong, jboolean); /* * Class: com_taosdata_jdbc_TSDBJNIConnector diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 9cec4f4b0f..228403c79d 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -20,6 +20,7 @@ #include "tscJoinProcess.h" #include "tsclient.h" #include "tscUtil.h" +#include "ttime.h" int __init = 0; @@ -514,92 +515,42 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm } } -JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jstring jhost, - jstring juser, jstring jpass, jstring jdb, - jstring jtable, jlong jtime, - jint jperiod) { - TAOS_SUB *tsub; - jlong sub = 0; - char * host = NULL; - char * user = NULL; - char * pass = NULL; - char * db = NULL; - char * table = NULL; - int64_t time = 0; - int period = 0; +JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jlong con, + jboolean restart, jstring jtopic, jstring jsql, jint jinterval) { + jlong sub = 0; + TAOS *taos = (TAOS *)con; + char *topic = NULL; + char *sql = NULL; jniGetGlobalMethod(env); jniTrace("jobj:%p, in TSDBJNIConnector_subscribeImp", jobj); - if (jhost != NULL) { - host = (char *)(*env)->GetStringUTFChars(env, jhost, NULL); + if (jtopic != NULL) { + topic = (char *)(*env)->GetStringUTFChars(env, jtopic, NULL); } - if (juser != NULL) { - user = (char *)(*env)->GetStringUTFChars(env, juser, NULL); - } - if (jpass != NULL) { - pass = (char *)(*env)->GetStringUTFChars(env, jpass, NULL); - } - if (jdb != NULL) { - db = (char *)(*env)->GetStringUTFChars(env, jdb, NULL); - } - if (jtable != NULL) { - table = (char *)(*env)->GetStringUTFChars(env, jtable, NULL); - } - time = (int64_t)jtime; - period = (int)jperiod; - - if (user == NULL) { - jniTrace("jobj:%p, user is null, use tsDefaultUser", jobj); - user = tsDefaultUser; - } - if (pass == NULL) { - jniTrace("jobj:%p, pass is null, use tsDefaultPass", jobj); - pass = tsDefaultPass; + if (jsql != NULL) { + sql = (char *)(*env)->GetStringUTFChars(env, jsql, NULL); } - jniTrace("jobj:%p, host:%s, user:%s, pass:%s, db:%s, table:%s, time:%d, period:%d", jobj, host, user, pass, db, table, - time, period); - tsub = taos_subscribe(host, user, pass, db, table, time, period); + TAOS_SUB *tsub = taos_subscribe(taos, (int)restart, topic, sql, NULL, NULL, jinterval); sub = (jlong)tsub; if (sub == 0) { - jniTrace("jobj:%p, failed to subscribe to db:%s, table:%s", jobj, db, table); + jniTrace("jobj:%p, failed to subscribe: topic:%s", jobj, jtopic); } else { - jniTrace("jobj:%p, successfully subscribe to db:%s, table:%s, sub:%ld, tsub:%p", jobj, db, table, sub, tsub); + jniTrace("jobj:%p, successfully subscribe: topic: %s", jobj, jtopic); } - if (host != NULL) (*env)->ReleaseStringUTFChars(env, jhost, host); - if (user != NULL && user != tsDefaultUser) (*env)->ReleaseStringUTFChars(env, juser, user); - if (pass != NULL && pass != tsDefaultPass) (*env)->ReleaseStringUTFChars(env, jpass, pass); - if (db != NULL) (*env)->ReleaseStringUTFChars(env, jdb, db); - if (table != NULL) (*env)->ReleaseStringUTFChars(env, jtable, table); + if (topic != NULL) (*env)->ReleaseStringUTFChars(env, jtopic, topic); + if (sql != NULL) (*env)->ReleaseStringUTFChars(env, jsql, sql); return sub; } -JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub) { - jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%ld", jobj, sub); - - TAOS_SUB * tsub = (TAOS_SUB *)sub; - TAOS_ROW row = taos_consume(tsub); - TAOS_FIELD *fields = taos_fetch_subfields(tsub); - int num_fields = taos_subfields_count(tsub); - - jniGetGlobalMethod(env); - - jniTrace("jobj:%p, check fields:%p, num_fields=%d", jobj, fields, num_fields); - +static jobject convert_one_row(JNIEnv *env, TAOS_ROW row, TAOS_FIELD* fields, int num_fields) { jobject rowobj = (*env)->NewObject(env, g_rowdataClass, g_rowdataConstructor, num_fields); jniTrace("created a rowdata object, rowobj:%p", rowobj); - if (row == NULL) { - jniTrace("jobj:%p, tsub:%p, fields size is %d, fetch row to the end", jobj, tsub, num_fields); - return NULL; - } - - char tmp[TSDB_MAX_BYTES_PER_ROW] = {0}; - for (int i = 0; i < num_fields; i++) { if (row[i] == NULL) { continue; @@ -634,6 +585,7 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNI } break; case TSDB_DATA_TYPE_BINARY: { + char tmp[TSDB_MAX_BYTES_PER_ROW] = {0}; strncpy(tmp, row[i], (size_t) fields[i].bytes); // handle the case that terminated does not exist (*env)->CallVoidMethod(env, rowobj, g_rowdataSetStringFp, i, (*env)->NewStringUTF(env, tmp)); @@ -642,7 +594,7 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNI } case TSDB_DATA_TYPE_NCHAR: (*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteArrayFp, i, - jniFromNCharToByteArray(env, (char*)row[i], fields[i].bytes)); + jniFromNCharToByteArray(env, (char*)row[i], fields[i].bytes)); break; case TSDB_DATA_TYPE_TIMESTAMP: (*env)->CallVoidMethod(env, rowobj, g_rowdataSetTimestampFp, i, (jlong) * ((int64_t *)row[i])); @@ -651,13 +603,56 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNI break; } } - jniTrace("jobj:%p, rowdata retrieved, rowobj:%p", jobj, rowobj); return rowobj; } -JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub) { +JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub, jint timeout) { + jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%ld", jobj, sub); + jniGetGlobalMethod(env); + TAOS_SUB *tsub = (TAOS_SUB *)sub; - taos_unsubscribe(tsub); + jobject rows = (*env)->NewObject(env, g_arrayListClass, g_arrayListConstructFp); + + int64_t start = taosGetTimestampMs(); + int count = 0; + + while (true) { + TAOS_RES * res = taos_consume(tsub); + if (res == NULL) { + jniError("jobj:%p, tsub:%p, taos_consume returns NULL", jobj, tsub); + return NULL; + } + + TAOS_FIELD *fields = taos_fetch_fields(res); + int num_fields = taos_num_fields(res); + while (true) { + TAOS_ROW row = taos_fetch_row(res); + if (row == NULL) { + break; + } + jobject rowobj = convert_one_row(env, row, fields, num_fields); + (*env)->CallBooleanMethod(env, rows, g_arrayListAddFp, rowobj); + count++; + } + + if (count > 0) { + break; + } + if (timeout == -1) { + continue; + } + if (((int)(taosGetTimestampMs() - start)) >= timeout) { + jniTrace("jobj:%p, sub:%ld, timeout", jobj, sub); + break; + } + } + + return rows; +} + +JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub, jboolean keepProgress) { + TAOS_SUB *tsub = (TAOS_SUB *)sub; + taos_unsubscribe(tsub, keepProgress); } JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp(JNIEnv *env, jobject jobj, diff --git a/src/client/src/tscAst.c b/src/client/src/tscAst.c index 9f12f8c1ae..856f948d1f 100644 --- a/src/client/src/tscAst.c +++ b/src/client/src/tscAst.c @@ -833,7 +833,7 @@ void tSQLBinaryExprCalcTraverse(tSQLBinaryExpr *pExprs, int32_t numOfRows, char tSQLSyntaxNode *pRight = pExprs->pRight; /* the left output has result from the left child syntax tree */ - char *pLeftOutput = malloc(sizeof(int64_t) * numOfRows); + char *pLeftOutput = (char*)malloc(sizeof(int64_t) * numOfRows); if (pLeft->nodeType == TSQL_NODE_EXPR) { tSQLBinaryExprCalcTraverse(pLeft->pExpr, numOfRows, pLeftOutput, param, order, getSourceDataBlock); } diff --git a/src/client/src/tscCache.c b/src/client/src/tscCache.c index 1ac32d7502..666d069a58 100644 --- a/src/client/src/tscCache.c +++ b/src/client/src/tscCache.c @@ -96,11 +96,7 @@ void *taosAddConnIntoCache(void *handle, void *data, uint32_t ip, uint16_t port, pObj = (SConnCache *)handle; if (pObj == NULL || pObj->maxSessions == 0) return NULL; -#ifdef CLUSTER - if (data == NULL || ip == 0) { -#else if (data == NULL) { -#endif tscTrace("data:%p ip:%p:%d not valid, not added in cache", data, ip, port); return NULL; } diff --git a/src/client/src/tscJoinProcess.c b/src/client/src/tscJoinProcess.c index a94b308e87..d49d6d7512 100644 --- a/src/client/src/tscJoinProcess.c +++ b/src/client/src/tscJoinProcess.c @@ -793,7 +793,9 @@ STSBuf* tsBufCreate(bool autoDelete) { return NULL; } - allocResForTSBuf(pTSBuf); + if (NULL == allocResForTSBuf(pTSBuf)) { + return NULL; + } // update the header info STSBufFileHeader header = {.magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = TSQL_SO_ASC}; diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index f5925b61cd..a7a774b3a8 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -202,10 +202,10 @@ void tscKillStream(STscObj *pObj, uint32_t killId) { tscTrace("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId); } - taos_close_stream(pStream); if (pStream->callback) { pStream->callback(pStream->param); } + taos_close_stream(pStream); } char *tscBuildQueryStreamDesc(char *pMsg, STscObj *pObj) { @@ -285,8 +285,9 @@ void tscKillConnection(STscObj *pObj) { SSqlStream *pStream = pObj->streamList; while (pStream) { + SSqlStream *tmp = pStream->next; taos_close_stream(pStream); - pStream = pStream->next; + pStream = tmp; } pthread_mutex_unlock(&pObj->mutex); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 70620de329..c9c628201e 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2918,7 +2918,7 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumnBase* pColumn) { } int32_t size = pColumn->numOfFilters + 1; - char* tmp = realloc(pColumn->filterInfo, sizeof(SColumnFilterInfo) * (size)); + char* tmp = (char*)realloc((void*)(pColumn->filterInfo), sizeof(SColumnFilterInfo) * (size)); if (tmp != NULL) { pColumn->filterInfo = (SColumnFilterInfo*)tmp; } diff --git a/src/client/src/tscSQLParserImpl.c b/src/client/src/tscSQLParserImpl.c index cc4375fb03..f7423f3a73 100644 --- a/src/client/src/tscSQLParserImpl.c +++ b/src/client/src/tscSQLParserImpl.c @@ -706,14 +706,14 @@ void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) { pInfo->sqlType = type; if (nParam == 0) return; - if (pInfo->pDCLInfo == NULL) pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); + if (pInfo->pDCLInfo == NULL) pInfo->pDCLInfo = (tDCLSQL *)calloc(1, sizeof(tDCLSQL)); va_list va; va_start(va, nParam); while (nParam-- > 0) { SSQLToken *pToken = va_arg(va, SSQLToken *); - tTokenListAppend(pInfo->pDCLInfo, pToken); + (void)tTokenListAppend(pInfo->pDCLInfo, pToken); } va_end(va); } diff --git a/src/client/src/tscSecondaryMerge.c b/src/client/src/tscSecondaryMerge.c index f848643fc1..06e2638666 100644 --- a/src/client/src/tscSecondaryMerge.c +++ b/src/client/src/tscSecondaryMerge.c @@ -212,7 +212,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd tscTrace("%p load data from disk into memory, orderOfVnode:%d, total:%d", pSqlObjAddr, i + 1, idx + 1); tExtMemBufferLoadData(pMemBuffer[i], &(pDS->filePage), j, 0); #ifdef _DEBUG_VIEW - printf("load data page into mem for build loser tree: %ld rows\n", pDS->filePage.numOfElems); + printf("load data page into mem for build loser tree: %" PRIu64 " rows\n", pDS->filePage.numOfElems); SSrcColumnInfo colInfo[256] = {0}; tscGetSrcColumnInfo(colInfo, pCmd); @@ -342,7 +342,7 @@ static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor } #ifdef _DEBUG_VIEW - printf("%ld rows data flushed to disk after been sorted:\n", pPage->numOfElems); + printf("%" PRIu64 " rows data flushed to disk after been sorted:\n", pPage->numOfElems); tColModelDisplay(pDesc->pSchema, pPage->data, pPage->numOfElems, pPage->numOfElems); #endif @@ -601,7 +601,9 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr rlen += pExpr->resBytes; } - int32_t capacity = nBufferSizes / rlen; + int32_t capacity = 0; + if (0 != rlen) capacity = nBufferSizes / rlen; + pModel = tColModelCreate(pSchema, pCmd->fieldsInfo.numOfOutputCols, capacity); for (int32_t i = 0; i < pMeterMetaInfo->pMetricMeta->numOfVnodes; ++i) { diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index f741fc8620..85915f780d 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -31,33 +31,66 @@ #define TSC_MGMT_VNODE 999 -#ifdef CLUSTER - SIpStrList tscMgmtIpList; - int tsMasterIndex = 0; - int tsSlaveIndex = 1; -#else - int tsMasterIndex = 0; - int tsSlaveIndex = 0; // slave == master for single node edition - uint32_t tsServerIp; -#endif +SIpStrList tscMgmtIpList; +int tsMasterIndex = 0; +int tsSlaveIndex = 1; int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql); int (*tscProcessMsgRsp[TSDB_SQL_MAX])(SSqlObj *pSql); void (*tscUpdateVnodeMsg[TSDB_SQL_MAX])(SSqlObj *pSql, char *buf); void tscProcessActivityTimer(void *handle, void *tmrId); int tscKeepConn[TSDB_SQL_MAX] = {0}; +TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid); +void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts); +void tscSaveSubscriptionProgress(void* sub); static int32_t minMsgSize() { return tsRpcHeadSize + sizeof(STaosDigest); } -#ifdef CLUSTER void tscPrintMgmtIp() { if (tscMgmtIpList.numOfIps <= 0) { - tscError("invalid IP list:%d", tscMgmtIpList.numOfIps); + tscError("invalid mgmt IP list:%d", tscMgmtIpList.numOfIps); } else { - for (int i = 0; i < tscMgmtIpList.numOfIps; ++i) tscTrace("mgmt index:%d ip:%s", i, tscMgmtIpList.ipstr[i]); + for (int i = 0; i < tscMgmtIpList.numOfIps; ++i) { + tscTrace("mgmt index:%d ip:%s", i, tscMgmtIpList.ipstr[i]); + } + } +} + +void tscSetMgmtIpListFromCluster(SIpList *pIpList) { + tscMgmtIpList.numOfIps = pIpList->numOfIps; + if (memcmp(tscMgmtIpList.ip, pIpList->ip, pIpList->numOfIps * 4) != 0) { + for (int i = 0; i < pIpList->numOfIps; ++i) { + tinet_ntoa(tscMgmtIpList.ipstr[i], pIpList->ip[i]); + tscMgmtIpList.ip[i] = pIpList->ip[i]; + } + tscTrace("cluster mgmt IP list:"); + tscPrintMgmtIp(); + } +} + +void tscSetMgmtIpListFromEdge() { + if (tscMgmtIpList.numOfIps != 2) { + tscMgmtIpList.numOfIps = 2; + strcpy(tscMgmtIpList.ipstr[0], tsMasterIp); + tscMgmtIpList.ip[0] = inet_addr(tsMasterIp); + strcpy(tscMgmtIpList.ipstr[1], tsMasterIp); + tscMgmtIpList.ip[1] = inet_addr(tsMasterIp); + tscTrace("edge mgmt IP list:"); + tscPrintMgmtIp(); + } +} + +void tscSetMgmtIpList(SIpList *pIpList) { + /* + * The iplist returned by the cluster edition is the current management nodes + * and the iplist returned by the edge edition is empty + */ + if (pIpList->numOfIps != 0) { + tscSetMgmtIpListFromCluster(pIpList); + } else { + tscSetMgmtIpListFromEdge(); } } -#endif /* * For each management node, try twice at least in case of poor network situation. @@ -68,11 +101,7 @@ void tscPrintMgmtIp() { */ static int32_t tscGetMgmtConnMaxRetryTimes() { int32_t factor = 2; -#ifdef CLUSTER return tscMgmtIpList.numOfIps * factor; -#else - return 1*factor; -#endif } void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { @@ -88,18 +117,9 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { if (code == 0) { SHeartBeatRsp *pRsp = (SHeartBeatRsp *)pRes->pRsp; -#ifdef CLUSTER SIpList * pIpList = &pRsp->ipList; - tscMgmtIpList.numOfIps = pIpList->numOfIps; - if (memcmp(tscMgmtIpList.ip, pIpList->ip, pIpList->numOfIps * 4) != 0) { - for (int i = 0; i < pIpList->numOfIps; ++i) { - tinet_ntoa(tscMgmtIpList.ipstr[i], pIpList->ip[i]); - tscMgmtIpList.ip[i] = pIpList->ip[i]; - } - tscTrace("new mgmt IP list:"); - tscPrintMgmtIp(); - } -#endif + tscSetMgmtIpList(pIpList); + if (pRsp->killConnection) { tscKillConnection(pObj); } else { @@ -152,19 +172,12 @@ void tscProcessActivityTimer(void *handle, void *tmrId) { void tscGetConnToMgmt(SSqlObj *pSql, uint8_t *pCode) { STscObj *pTscObj = pSql->pTscObj; -#ifdef CLUSTER if (pSql->retry < tscGetMgmtConnMaxRetryTimes()) { *pCode = 0; pSql->retry++; pSql->index = pSql->index % tscMgmtIpList.numOfIps; if (pSql->cmd.command > TSDB_SQL_READ && pSql->index == 0) pSql->index = 1; void *thandle = taosGetConnFromCache(tscConnCache, tscMgmtIpList.ip[pSql->index], TSC_MGMT_VNODE, pTscObj->user); -#else - if (pSql->retry < tscGetMgmtConnMaxRetryTimes()) { - *pCode = 0; - pSql->retry++; - void *thandle = taosGetConnFromCache(tscConnCache, tsServerIp, TSC_MGMT_VNODE, pTscObj->user); -#endif if (thandle == NULL) { SRpcConnInit connInit; @@ -180,24 +193,15 @@ void tscGetConnToMgmt(SSqlObj *pSql, uint8_t *pCode) { connInit.encrypt = 0; connInit.secret = pSql->pTscObj->pass; -#ifdef CLUSTER connInit.peerIp = tscMgmtIpList.ipstr[pSql->index]; -#else - connInit.peerIp = tsMasterIp; -#endif thandle = taosOpenRpcConn(&connInit, pCode); } pSql->thandle = thandle; -#ifdef CLUSTER pSql->ip = tscMgmtIpList.ip[pSql->index]; pSql->vnode = TSC_MGMT_VNODE; tscTrace("%p mgmt index:%d ip:0x%x is picked up, pConn:%p", pSql, pSql->index, tscMgmtIpList.ip[pSql->index], pSql->thandle); -#else - pSql->ip = tsServerIp; - pSql->vnode = TSC_MGMT_VNODE; -#endif } // the pSql->res.code is the previous error(status) code. @@ -242,11 +246,16 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { while (pSql->retry < pSql->maxRetry) { (pSql->retry)++; -#ifdef CLUSTER char ipstr[40] = {0}; if (pVPeersDesc[pSql->index].ip == 0) { - (pSql->index) = (pSql->index + 1) % TSDB_VNODES_SUPPORT; - continue; + /* + * in the edge edition, ip is 0, and at this time we use masterIp instead + * in the cluster edition, ip is vnode ip + */ + //(pSql->index) = (pSql->index + 1) % TSDB_VNODES_SUPPORT; + //continue; + + pVPeersDesc[pSql->index].ip = tscMgmtIpList.ip[0]; } *pCode = TSDB_CODE_SUCCESS; @@ -276,31 +285,6 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { pSql->vnode = pVPeersDesc[pSql->index].vnode; tscTrace("%p vnode:%d ip:%p index:%d is picked up, pConn:%p", pSql, pVPeersDesc[pSql->index].vnode, pVPeersDesc[pSql->index].ip, pSql->index, pSql->thandle); -#else - *pCode = 0; - void *thandle = taosGetConnFromCache(tscConnCache, tsServerIp, pVPeersDesc[0].vnode, pTscObj->user); - - if (thandle == NULL) { - SRpcConnInit connInit; - memset(&connInit, 0, sizeof(connInit)); - connInit.cid = vidIndex; - connInit.sid = 0; - connInit.spi = 0; - connInit.encrypt = 0; - connInit.meterId = pSql->pTscObj->user; - connInit.peerId = htonl((pVPeersDesc[0].vnode << TSDB_SHELL_VNODE_BITS)); - connInit.shandle = pVnodeConn; - connInit.ahandle = pSql; - connInit.peerIp = tsMasterIp; - connInit.peerPort = tsVnodeShellPort; - thandle = taosOpenRpcConn(&connInit, pCode); - vidIndex = (vidIndex + 1) % tscNumOfThreads; - } - - pSql->thandle = thandle; - pSql->ip = tsServerIp; - pSql->vnode = pVPeersDesc[0].vnode; -#endif break; } @@ -367,15 +351,9 @@ int tscSendMsgToServer(SSqlObj *pSql) { return code; } -#ifdef CLUSTER void tscProcessMgmtRedirect(SSqlObj *pSql, uint8_t *cont) { SIpList *pIpList = (SIpList *)(cont); - tscMgmtIpList.numOfIps = pIpList->numOfIps; - for (int i = 0; i < pIpList->numOfIps; ++i) { - tinet_ntoa(tscMgmtIpList.ipstr[i], pIpList->ip[i]); - tscMgmtIpList.ip[i] = pIpList->ip[i]; - tscTrace("Update mgmt Ip, index:%d ip:%s", i, tscMgmtIpList.ipstr[i]); - } + tscSetMgmtIpList(pIpList); if (pSql->cmd.command < TSDB_SQL_READ) { tsMasterIndex = 0; @@ -386,7 +364,6 @@ void tscProcessMgmtRedirect(SSqlObj *pSql, uint8_t *cont) { tscPrintMgmtIp(); } -#endif void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { if (ahandle == NULL) return NULL; @@ -420,13 +397,9 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); if (msg == NULL) { - tscTrace("%p no response from ip:0x%x", pSql, pSql->ip); - -#ifdef CLUSTER + tscTrace("%p no response from ip:%s", pSql, taosIpStr(pSql->ip)); + pSql->index++; -#else - // for single node situation, do NOT try next index -#endif pSql->thandle = NULL; // todo taos_stop_query() in async model /* @@ -442,12 +415,7 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { // renew meter meta in case it is changed if (pCmd->command < TSDB_SQL_FETCH && pRes->code != TSDB_CODE_QUERY_CANCELLED) { -#ifdef CLUSTER pSql->maxRetry = TSDB_VNODES_SUPPORT * 2; -#else - // for fetch, it shall not renew meter meta - pSql->maxRetry = 2; -#endif code = tscRenewMeterMeta(pSql, pMeterMetaInfo->name); pRes->code = code; if (code == TSDB_CODE_ACTION_IN_PROGRESS) return pSql; @@ -460,8 +428,6 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { } else { uint16_t rspCode = pMsg->content[0]; -#ifdef CLUSTER - if (rspCode == TSDB_CODE_REDIRECT) { tscTrace("%p it shall be redirected!", pSql); taosAddConnIntoCache(tscConnCache, thandle, pSql->ip, pSql->vnode, pObj->user); @@ -493,12 +459,7 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { * removed. So, renew metermeta and try again. * not_active_session: db has been move to other node, the vnode does not exist on this dnode anymore. */ -#else - if (rspCode == TSDB_CODE_NOT_ACTIVE_TABLE || rspCode == TSDB_CODE_INVALID_TABLE_ID || - rspCode == TSDB_CODE_NOT_ACTIVE_VNODE || rspCode == TSDB_CODE_INVALID_VNODE_ID || - rspCode == TSDB_CODE_TABLE_ID_MISMATCH || rspCode == TSDB_CODE_NETWORK_UNAVAIL) { -#endif - pSql->thandle = NULL; + pSql->thandle = NULL; taosAddConnIntoCache(tscConnCache, thandle, pSql->ip, pSql->vnode, pObj->user); if (pCmd->command == TSDB_SQL_CONNECT) { @@ -767,12 +728,8 @@ int tscProcessSql(SSqlObj *pSql) { tscTrace("%p SQL cmd:%d will be processed, name:%s, type:%d", pSql, pSql->cmd.command, name, pSql->cmd.type); pSql->retry = 0; if (pSql->cmd.command < TSDB_SQL_MGMT) { -#ifdef CLUSTER pSql->maxRetry = TSDB_VNODES_SUPPORT; -#else - pSql->maxRetry = 2; -#endif - + // the pMeterMetaInfo cannot be NULL if (pMeterMetaInfo == NULL) { pSql->res.code = TSDB_CODE_OTHERS; @@ -820,10 +777,10 @@ int tscProcessSql(SSqlObj *pSql) { } } - sem_post(&pSql->emptyRspSem); - sem_wait(&pSql->rspSem); + tsem_post(&pSql->emptyRspSem); + tsem_wait(&pSql->rspSem); - sem_post(&pSql->emptyRspSem); + tsem_post(&pSql->emptyRspSem); if (pSql->numOfSubs <= 0) { pSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; @@ -856,9 +813,9 @@ int tscProcessSql(SSqlObj *pSql) { } if (fp == NULL) { - sem_post(&pSql->emptyRspSem); - sem_wait(&pSql->rspSem); - sem_post(&pSql->emptyRspSem); + tsem_post(&pSql->emptyRspSem); + tsem_wait(&pSql->rspSem); + tsem_post(&pSql->emptyRspSem); // set the command flag must be after the semaphore been correctly set. pSql->cmd.command = TSDB_SQL_RETRIEVE_METRIC; @@ -1211,7 +1168,7 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { tColModelCompact(pDesc->pSchema, trsupport->localBuffer, pDesc->pSchema->maxCapacity); #ifdef _DEBUG_VIEW - printf("%ld rows data flushed to disk:\n", trsupport->localBuffer->numOfElems); + printf("%" PRIu64 " rows data flushed to disk:\n", trsupport->localBuffer->numOfElems); SSrcColumnInfo colInfo[256] = {0}; tscGetSrcColumnInfo(colInfo, &pPObj->cmd); tColModelDisplayEx(pDesc->pSchema, trsupport->localBuffer->data, trsupport->localBuffer->numOfElems, @@ -1387,7 +1344,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSqlObj, trsupport, pSql); if (pNew == NULL) { tscError("%p sub:%p failed to create new subquery due to out of memory, abort retry, vid:%d, orderOfSub:%d", - trsupport->pParentSqlObj, pSql, pSvd->vnode, trsupport->subqueryIndex); + trsupport->pParentSqlObj, pSql, pSvd != NULL ? pSvd->vnode : -1, trsupport->subqueryIndex); pState->code = -TSDB_CODE_CLI_OUT_OF_MEMORY; trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; @@ -1411,9 +1368,14 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { tscRetrieveFromVnodeCallBack(param, tres, pState->code); } else { // success, proceed to retrieve data from dnode - tscTrace("%p sub:%p query complete,ip:%u,vid:%d,orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql, + if (vnodeInfo != NULL) { + tscTrace("%p sub:%p query complete,ip:%u,vid:%d,orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql, vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode, trsupport->subqueryIndex); + } else { + tscTrace("%p sub:%p query complete, orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql, + trsupport->subqueryIndex); + } taos_fetch_rows_a(tres, tscRetrieveFromVnodeCallBack, param); } @@ -1533,12 +1495,12 @@ static char* doSerializeTableInfo(SSqlObj* pSql, int32_t numOfMeters, int32_t vn tscTrace("%p vid:%d, query on %d meters", pSql, htons(vnodeId), numOfMeters); if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { #ifdef _DEBUG_VIEW - tscTrace("%p sid:%d, uid:%lld", pSql, pMeterMetaInfo->pMeterMeta->sid, pMeterMetaInfo->pMeterMeta->uid); + tscTrace("%p sid:%d, uid:%" PRIu64, pSql, pMeterMetaInfo->pMeterMeta->sid, pMeterMetaInfo->pMeterMeta->uid); #endif SMeterSidExtInfo *pMeterInfo = (SMeterSidExtInfo *)pMsg; pMeterInfo->sid = htonl(pMeterMeta->sid); pMeterInfo->uid = htobe64(pMeterMeta->uid); - + pMeterInfo->key = htobe64(tscGetSubscriptionProgress(pSql->pSubscription, pMeterMeta->uid)); pMsg += sizeof(SMeterSidExtInfo); } else { SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); @@ -1549,6 +1511,7 @@ static char* doSerializeTableInfo(SSqlObj* pSql, int32_t numOfMeters, int32_t vn pMeterInfo->sid = htonl(pQueryMeterInfo->sid); pMeterInfo->uid = htobe64(pQueryMeterInfo->uid); + pMeterInfo->key = htobe64(tscGetSubscriptionProgress(pSql->pSubscription, pQueryMeterInfo->uid)); pMsg += sizeof(SMeterSidExtInfo); @@ -1556,7 +1519,7 @@ static char* doSerializeTableInfo(SSqlObj* pSql, int32_t numOfMeters, int32_t vn pMsg += pMetricMeta->tagLen; #ifdef _DEBUG_VIEW - tscTrace("%p sid:%d, uid:%lld", pSql, pQueryMeterInfo->sid, pQueryMeterInfo->uid); + tscTrace("%p sid:%d, uid:%" PRId64, pSql, pQueryMeterInfo->sid, pQueryMeterInfo->uid); #endif } } @@ -1648,7 +1611,7 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pQueryMsg->nAggTimeInterval = htobe64(pCmd->nAggTimeInterval); pQueryMsg->intervalTimeUnit = pCmd->intervalTimeUnit; if (pCmd->nAggTimeInterval < 0) { - tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pCmd->nAggTimeInterval); + tscError("%p illegal value of aggregation time interval in query msg: %" PRId64, pSql, pCmd->nAggTimeInterval); return -1; } @@ -3430,20 +3393,11 @@ int tscProcessConnectRsp(SSqlObj *pSql) { assert(len <= tListLen(pObj->db)); strncpy(pObj->db, temp, tListLen(pObj->db)); -#ifdef CLUSTER SIpList * pIpList; char *rsp = pRes->pRsp + sizeof(SConnectRsp); pIpList = (SIpList *)rsp; - tscMgmtIpList.numOfIps = pIpList->numOfIps; - for (int i = 0; i < pIpList->numOfIps; ++i) { - tinet_ntoa(tscMgmtIpList.ipstr[i], pIpList->ip[i]); - tscMgmtIpList.ip[i] = pIpList->ip[i]; - } + tscSetMgmtIpList(pIpList); - rsp += sizeof(SIpList) + sizeof(int32_t) * pIpList->numOfIps; - - tscPrintMgmtIp(); -#endif strcpy(pObj->sversion, pConnect->version); pObj->writeAuth = pConnect->writeAuth; pObj->superAuth = pConnect->superAuth; @@ -3542,11 +3496,27 @@ int tscProcessRetrieveRspFromVnode(SSqlObj *pSql) { pRes->numOfRows = htonl(pRetrieve->numOfRows); pRes->precision = htons(pRetrieve->precision); pRes->offset = htobe64(pRetrieve->offset); - pRes->useconds = htobe64(pRetrieve->useconds); pRes->data = pRetrieve->data; tscSetResultPointer(pCmd, pRes); + + if (pSql->pSubscription != NULL) { + TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, pCmd->fieldsInfo.numOfOutputCols - 1); + int16_t offset = tscFieldInfoGetOffset(pCmd, pCmd->fieldsInfo.numOfOutputCols - 1); + char* p = pRes->data + (pField->bytes + offset) * pRes->numOfRows; + + int32_t numOfMeters = htonl(*(int32_t*)p); + p += sizeof(int32_t); + for (int i = 0; i < numOfMeters; i++) { + int64_t uid = htobe64(*(int64_t*)p); + p += sizeof(int64_t); + TSKEY key = htobe64(*(TSKEY*)p); + p += sizeof(TSKEY); + tscUpdateSubscriptionProgress(pSql->pSubscription, uid, key); + } + } + pRes->row = 0; /** diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 13071d284e..795c9af318 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -63,19 +63,17 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const } } -#ifdef CLUSTER if (ip && ip[0]) { + tscMgmtIpList.numOfIps = 4; + strcpy(tscMgmtIpList.ipstr[0], ip); + tscMgmtIpList.ip[0] = inet_addr(ip); strcpy(tscMgmtIpList.ipstr[1], ip); tscMgmtIpList.ip[1] = inet_addr(ip); + strcpy(tscMgmtIpList.ipstr[2], tsMasterIp); + tscMgmtIpList.ip[2] = inet_addr(tsMasterIp); + strcpy(tscMgmtIpList.ipstr[3], tsSecondIp); + tscMgmtIpList.ip[3] = inet_addr(tsSecondIp); } -#else - if (ip && ip[0]) { - if (ip != tsMasterIp) { - strcpy(tsMasterIp, ip); - } - tsServerIp = inet_addr(ip); - } -#endif pObj = (STscObj *)malloc(sizeof(STscObj)); if (NULL == pObj) { @@ -175,11 +173,6 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) { -#ifndef CLUSTER - if (ip == NULL) { - ip = tsMasterIp; - } -#endif return taos_connect_imp(ip, user, pass, db, port, fp, param, taos); } @@ -344,7 +337,7 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) { SSqlRes *pRes = &pSql->res; STscObj *pObj = pSql->pTscObj; - if (pRes->qhandle == 0 || pObj->pSql != pSql) { + if (pRes->qhandle == 0) { *rows = NULL; return 0; } @@ -694,7 +687,7 @@ int taos_select_db(TAOS *taos, const char *db) { return taos_query(taos, sql); } -void taos_free_result(TAOS_RES *res) { +void taos_free_result_imp(TAOS_RES* res, int keepCmd) { if (res == NULL) return; SSqlObj *pSql = (SSqlObj *)res; @@ -712,6 +705,8 @@ void taos_free_result(TAOS_RES *res) { pSql->thandle = NULL; tscFreeSqlObj(pSql); tscTrace("%p Async SqlObj is freed by app", pSql); + } else if (keepCmd) { + tscFreeSqlResult(pSql); } else { tscFreeSqlObjPartial(pSql); } @@ -761,8 +756,13 @@ void taos_free_result(TAOS_RES *res) { * Then this object will be reused and no free operation is required. */ pSql->thandle = NULL; - tscFreeSqlObjPartial(pSql); - tscTrace("%p sql result is freed by app", pSql); + if (keepCmd) { + tscFreeSqlResult(pSql); + tscTrace("%p sql result is freed by app while sql command is kept", pSql); + } else { + tscFreeSqlObjPartial(pSql); + tscTrace("%p sql result is freed by app", pSql); + } } } else { // if no free resource msg is sent to vnode, we free this object immediately. @@ -772,6 +772,9 @@ void taos_free_result(TAOS_RES *res) { assert(pRes->numOfRows == 0 || (pCmd->command > TSDB_SQL_LOCAL)); tscFreeSqlObj(pSql); tscTrace("%p Async sql result is freed by app", pSql); + } else if (keepCmd) { + tscFreeSqlResult(pSql); + tscTrace("%p sql result is freed while sql command is kept", pSql); } else { tscFreeSqlObjPartial(pSql); tscTrace("%p sql result is freed", pSql); @@ -779,6 +782,10 @@ void taos_free_result(TAOS_RES *res) { } } +void taos_free_result(TAOS_RES *res) { + taos_free_result_imp(res, 0); +} + int taos_errno(TAOS *taos) { STscObj *pObj = (STscObj *)taos; int code; @@ -861,61 +868,63 @@ void taos_stop_query(TAOS_RES *res) { int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { int len = 0; for (int i = 0; i < num_fields; ++i) { + if (i > 0) { + str[len++] = ' '; + } + if (row[i] == NULL) { - len += sprintf(str + len, "%s ", TSDB_DATA_NULL_STR); + len += sprintf(str + len, "%s", TSDB_DATA_NULL_STR); continue; } switch (fields[i].type) { case TSDB_DATA_TYPE_TINYINT: - len += sprintf(str + len, "%d ", *((char *)row[i])); + len += sprintf(str + len, "%d", *((char *)row[i])); break; case TSDB_DATA_TYPE_SMALLINT: - len += sprintf(str + len, "%d ", *((short *)row[i])); + len += sprintf(str + len, "%d", *((short *)row[i])); break; case TSDB_DATA_TYPE_INT: - len += sprintf(str + len, "%d ", *((int *)row[i])); + len += sprintf(str + len, "%d", *((int *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: - len += sprintf(str + len, "%" PRId64 " ", *((int64_t *)row[i])); + len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT: { float fv = 0; fv = GET_FLOAT_VAL(row[i]); - len += sprintf(str + len, "%f ", fv); + len += sprintf(str + len, "%f", fv); } break; case TSDB_DATA_TYPE_DOUBLE:{ double dv = 0; dv = GET_DOUBLE_VAL(row[i]); - len += sprintf(str + len, "%lf ", dv); + len += sprintf(str + len, "%lf", dv); } - break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: { - /* limit the max length of string to no greater than the maximum length, - * in case of not null-terminated string */ - size_t xlen = strlen(row[i]); - size_t trueLen = MIN(xlen, fields[i].bytes); - - memcpy(str + len, (char *)row[i], trueLen); - - str[len + trueLen] = ' '; - len += (trueLen + 1); - } break; + size_t xlen = 0; + for (xlen = 0; xlen <= fields[i].bytes; xlen++) { + char c = ((char*)row[i])[xlen]; + if (c == 0) break; + str[len++] = c; + } + str[len] = 0; + } + break; case TSDB_DATA_TYPE_TIMESTAMP: - len += sprintf(str + len, "%" PRId64 " ", *((int64_t *)row[i])); + len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_BOOL: - len += sprintf(str + len, "%d ", *((int8_t *)row[i])); + len += sprintf(str + len, "%d", *((int8_t *)row[i])); default: break; } diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index cd2736e910..870f640e17 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -268,11 +268,11 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) tscTrace("%p stream:%p, etime:%" PRId64 " is too old, exceeds the max retention time window:%" PRId64 ", stop the stream", pStream->pSql, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here - taos_close_stream(pStream); if (pStream->callback) { // Callback function from upper level pStream->callback(pStream->param); } + taos_close_stream(pStream); return; } @@ -302,24 +302,24 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { tscTrace("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here - taos_close_stream(pStream); if (pStream->callback) { // Callback function from upper level pStream->callback(pStream->param); } + taos_close_stream(pStream); return; } } else { pStream->stime += pStream->slidingTime; if ((pStream->stime - pStream->interval) >= pStream->etime) { - tscTrace("%p stream:%p, stime:%ld is larger than end time: %ld, stop the stream", pStream->pSql, pStream, + tscTrace("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream, pStream->stime, pStream->etime); // TODO : How to terminate stream here - taos_close_stream(pStream); if (pStream->callback) { // Callback function from upper level pStream->callback(pStream->param); } + taos_close_stream(pStream); return; } @@ -353,7 +353,7 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { int64_t minIntervalTime = (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime; if (pCmd->nAggTimeInterval < minIntervalTime) { - tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64 "", pSql, pStream, + tscWarn("%p stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream, pCmd->nAggTimeInterval, minIntervalTime); pCmd->nAggTimeInterval = minIntervalTime; } diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index f2e9395c68..8a2c8ae44e 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -22,125 +22,407 @@ #include "tsclient.h" #include "tsocket.h" #include "ttime.h" +#include "ttimer.h" #include "tutil.h" +#include "tscUtil.h" +#include "tcache.h" -typedef struct { - void * signature; - char name[TSDB_METER_ID_LEN]; - int mseconds; - TSKEY lastKey; - uint64_t stime; - TAOS_FIELD fields[TSDB_MAX_COLUMNS]; - int numOfFields; - TAOS * taos; - TAOS_RES * result; +typedef struct SSubscriptionProgress { + int64_t uid; + TSKEY key; +} SSubscriptionProgress; + +typedef struct SSub { + void * signature; + char topic[32]; + int64_t lastSyncTime; + int64_t lastConsumeTime; + TAOS * taos; + void * pTimer; + SSqlObj * pSql; + int interval; + TAOS_SUBSCRIBE_CALLBACK fp; + void * param; + int numOfMeters; + SSubscriptionProgress * progress; } SSub; -TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, const char *db, const char *name, int64_t time, int mseconds) { - SSub *pSub; - pSub = (SSub *)malloc(sizeof(SSub)); - if (pSub == NULL) return NULL; - memset(pSub, 0, sizeof(SSub)); +static int tscCompareSubscriptionProgress(const void* a, const void* b) { + const SSubscriptionProgress* x = (const SSubscriptionProgress*)a; + const SSubscriptionProgress* y = (const SSubscriptionProgress*)b; + if (x->uid > y->uid) return 1; + if (x->uid < y->uid) return -1; + return 0; +} - pSub->signature = pSub; - strcpy(pSub->name, name); - pSub->mseconds = mseconds; - pSub->lastKey = time; - if (pSub->lastKey == 0) { - pSub->lastKey = taosGetTimestampMs(); +TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid) { + if (sub == NULL) + return 0; + + SSub* pSub = (SSub*)sub; + for (int s = 0, e = pSub->numOfMeters; s < e;) { + int m = (s + e) / 2; + SSubscriptionProgress* p = pSub->progress + m; + if (p->uid > uid) + e = m; + else if (p->uid < uid) + s = m + 1; + else + return p->key; } - taos_init(); - pSub->taos = taos_connect(host, user, pass, NULL, 0); - if (pSub->taos == NULL) { - tfree(pSub); - } else { - char qstr[256] = {0}; - sprintf(qstr, "use %s", db); - int res = taos_query(pSub->taos, qstr); - if (res != 0) { - tscError("failed to open DB:%s", db); - taos_close(pSub->taos); - tfree(pSub); - } else { - snprintf(qstr, tListLen(qstr), "select * from %s where _c0 > now+1000d", pSub->name); - if (taos_query(pSub->taos, qstr)) { - tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos)); - taos_close(pSub->taos); - tfree(pSub); - return NULL; - } - pSub->result = taos_use_result(pSub->taos); - pSub->numOfFields = taos_num_fields(pSub->result); - memcpy(pSub->fields, taos_fetch_fields(pSub->result), sizeof(TAOS_FIELD) * pSub->numOfFields); + return 0; +} + +void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts) { + if( sub == NULL) + return; + + SSub* pSub = (SSub*)sub; + for (int s = 0, e = pSub->numOfMeters; s < e;) { + int m = (s + e) / 2; + SSubscriptionProgress* p = pSub->progress + m; + if (p->uid > uid) + e = m; + else if (p->uid < uid) + s = m + 1; + else { + if (ts >= p->key) p->key = ts; + break; } } +} + + +static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char* sql) { + SSub* pSub = calloc(1, sizeof(SSub)); + if (pSub == NULL) { + globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY; + tscError("failed to allocate memory for subscription"); + return NULL; + } + + SSqlObj* pSql = calloc(1, sizeof(SSqlObj)); + if (pSql == NULL) { + globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY; + tscError("failed to allocate SSqlObj for subscription"); + goto failed; + } + + pSql->signature = pSql; + pSql->pTscObj = pObj; + + char* sqlstr = (char*)malloc(strlen(sql) + 1); + if (sqlstr == NULL) { + tscError("failed to allocate sql string for subscription"); + goto failed; + } + strcpy(sqlstr, sql); + strtolower(sqlstr, sqlstr); + pSql->sqlstr = sqlstr; + + tsem_init(&pSql->rspSem, 0, 0); + tsem_init(&pSql->emptyRspSem, 0, 1); + + SSqlRes *pRes = &pSql->res; + pRes->numOfRows = 1; + pRes->numOfTotal = 0; + + pSql->pSubscription = pSub; + pSub->pSql = pSql; + pSub->signature = pSub; + strncpy(pSub->topic, topic, sizeof(pSub->topic)); + pSub->topic[sizeof(pSub->topic) - 1] = 0; + return pSub; + +failed: + if (sqlstr != NULL) { + free(sqlstr); + } + if (pSql != NULL) { + free(pSql); + } + free(pSub); + return NULL; +} + + +static void tscProcessSubscriptionTimer(void *handle, void *tmrId) { + SSub *pSub = (SSub *)handle; + if (pSub == NULL || pSub->pTimer != tmrId) return; + + TAOS_RES* res = taos_consume(pSub); + if (res != NULL) { + pSub->fp(pSub, res, pSub->param, 0); + } + + taosTmrReset(tscProcessSubscriptionTimer, pSub->interval, pSub, tscTmr, &pSub->pTimer); +} + + +int tscUpdateSubscription(STscObj* pObj, SSub* pSub) { + int code = (uint8_t)tsParseSql(pSub->pSql, pObj->acctId, pObj->db, false); + if (code != TSDB_CODE_SUCCESS) { + tscError("failed to parse sql statement: %s", pSub->topic); + return 0; + } + + SSqlCmd* pCmd = &pSub->pSql->cmd; + if (pCmd->command != TSDB_SQL_SELECT) { + tscError("only 'select' statement is allowed in subscription: %s", pSub->topic); + return 0; + } + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + int numOfMeters = 0; + if (!UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { + SMetricMeta* pMetricMeta = pMeterMetaInfo->pMetricMeta; + for (int32_t i = 0; i < pMetricMeta->numOfVnodes; i++) { + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, i); + numOfMeters += pVnodeSidList->numOfSids; + } + } + + SSubscriptionProgress* progress = (SSubscriptionProgress*)calloc(numOfMeters, sizeof(SSubscriptionProgress)); + if (progress == NULL) { + tscError("failed to allocate memory for progress: %s", pSub->topic); + return 0; + } + + if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { + numOfMeters = 1; + int64_t uid = pMeterMetaInfo->pMeterMeta->uid; + progress[0].uid = uid; + progress[0].key = tscGetSubscriptionProgress(pSub, uid); + } else { + SMetricMeta* pMetricMeta = pMeterMetaInfo->pMetricMeta; + numOfMeters = 0; + for (int32_t i = 0; i < pMetricMeta->numOfVnodes; i++) { + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, i); + for (int32_t j = 0; j < pVnodeSidList->numOfSids; j++) { + SMeterSidExtInfo *pMeterInfo = tscGetMeterSidInfo(pVnodeSidList, j); + int64_t uid = pMeterInfo->uid; + progress[numOfMeters].uid = uid; + progress[numOfMeters++].key = tscGetSubscriptionProgress(pSub, uid); + } + } + qsort(progress, numOfMeters, sizeof(SSubscriptionProgress), tscCompareSubscriptionProgress); + } + + free(pSub->progress); + pSub->numOfMeters = numOfMeters; + pSub->progress = progress; + + pSub->lastSyncTime = taosGetTimestampMs(); + + return 1; +} + + +static int tscLoadSubscriptionProgress(SSub* pSub) { + char buf[TSDB_MAX_SQL_LEN]; + sprintf(buf, "%s/subscribe/%s", dataDir, pSub->topic); + + FILE* fp = fopen(buf, "r"); + if (fp == NULL) { + tscTrace("subscription progress file does not exist: %s", pSub->topic); + return 1; + } + + if (fgets(buf, sizeof(buf), fp) == NULL) { + tscTrace("invalid subscription progress file: %s", pSub->topic); + fclose(fp); + return 0; + } + + for (int i = 0; i < sizeof(buf); i++) { + if (buf[i] == 0) + break; + if (buf[i] == '\r' || buf[i] == '\n') { + buf[i] = 0; + break; + } + } + if (strcmp(buf, pSub->pSql->sqlstr) != 0) { + tscTrace("subscription sql statement mismatch: %s", pSub->topic); + fclose(fp); + return 0; + } + + if (fgets(buf, sizeof(buf), fp) == NULL || atoi(buf) < 0) { + tscTrace("invalid subscription progress file: %s", pSub->topic); + fclose(fp); + return 0; + } + + int numOfMeters = atoi(buf); + SSubscriptionProgress* progress = calloc(numOfMeters, sizeof(SSubscriptionProgress)); + for (int i = 0; i < numOfMeters; i++) { + if (fgets(buf, sizeof(buf), fp) == NULL) { + fclose(fp); + free(progress); + return 0; + } + int64_t uid, key; + sscanf(buf, "%" SCNd64 ":%" SCNd64, &uid, &key); + progress[i].uid = uid; + progress[i].key = key; + } + + fclose(fp); + + qsort(progress, numOfMeters, sizeof(SSubscriptionProgress), tscCompareSubscriptionProgress); + pSub->numOfMeters = numOfMeters; + pSub->progress = progress; + tscTrace("subscription progress loaded, %d tables: %s", numOfMeters, pSub->topic); + return 1; +} + +void tscSaveSubscriptionProgress(void* sub) { + SSub* pSub = (SSub*)sub; + + char path[256]; + sprintf(path, "%s/subscribe", dataDir); + if (access(path, 0) != 0) { + mkdir(path, 0777); + } + + sprintf(path, "%s/subscribe/%s", dataDir, pSub->topic); + FILE* fp = fopen(path, "w+"); + if (fp == NULL) { + tscError("failed to create progress file for subscription: %s", pSub->topic); + return; + } + + fputs(pSub->pSql->sqlstr, fp); + fprintf(fp, "\n%d\n", pSub->numOfMeters); + for (int i = 0; i < pSub->numOfMeters; i++) { + int64_t uid = pSub->progress[i].uid; + TSKEY key = pSub->progress[i].key; + fprintf(fp, "%" PRId64 ":%" PRId64 "\n", uid, key); + } + + fclose(fp); +} + +TAOS_SUB *taos_subscribe(TAOS *taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval) { + STscObj* pObj = (STscObj*)taos; + if (pObj == NULL || pObj->signature != pObj) { + globalCode = TSDB_CODE_DISCONNECTED; + tscError("connection disconnected"); + return NULL; + } + + SSub* pSub = tscCreateSubscription(pObj, topic, sql); + if (pSub == NULL) { + return NULL; + } + pSub->taos = taos; + + if (restart) { + tscTrace("restart subscription: %s", topic); + } else { + tscLoadSubscriptionProgress(pSub); + } + + if (!tscUpdateSubscription(pObj, pSub)) { + taos_unsubscribe(pSub, 1); + return NULL; + } + + pSub->interval = interval; + if (fp != NULL) { + tscTrace("asynchronize subscription, create new timer", topic); + pSub->fp = fp; + pSub->param = param; + taosTmrReset(tscProcessSubscriptionTimer, interval, pSub, tscTmr, &pSub->pTimer); + } return pSub; } -TAOS_ROW taos_consume(TAOS_SUB *tsub) { - SSub * pSub = (SSub *)tsub; - TAOS_ROW row; - char qstr[256]; +void taos_free_result_imp(SSqlObj* pSql, int keepCmd); +TAOS_RES *taos_consume(TAOS_SUB *tsub) { + SSub *pSub = (SSub *)tsub; if (pSub == NULL) return NULL; - if (pSub->signature != pSub) return NULL; - while (1) { - if (pSub->result != NULL) { - row = taos_fetch_row(pSub->result); - if (row != NULL) { - pSub->lastKey = *((uint64_t *)row[0]); - return row; - } + tscSaveSubscriptionProgress(pSub); - taos_free_result(pSub->result); - pSub->result = NULL; - uint64_t etime = taosGetTimestampMs(); - int64_t mseconds = pSub->mseconds - etime + pSub->stime; - if (mseconds < 0) mseconds = 0; - taosMsleep((int)mseconds); - } + SSqlObj* pSql = pSub->pSql; + SSqlRes *pRes = &pSql->res; - pSub->stime = taosGetTimestampMs(); - - sprintf(qstr, "select * from %s where _c0 > %" PRId64 " order by _c0 asc", pSub->name, pSub->lastKey); - if (taos_query(pSub->taos, qstr)) { - tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos)); - return NULL; - } - - pSub->result = taos_use_result(pSub->taos); - - if (pSub->result == NULL) { - tscTrace("failed to get result, reason:%s", taos_errstr(pSub->taos)); - return NULL; + if (pSub->pTimer == NULL) { + int64_t duration = taosGetTimestampMs() - pSub->lastConsumeTime; + if (duration < (int64_t)(pSub->interval)) { + tscTrace("subscription consume too frequently, blocking..."); + taosMsleep(pSub->interval - (int32_t)duration); } } - return NULL; + for (int retry = 0; retry < 3; retry++) { + tscRemoveFromSqlList(pSql); + + if (taosGetTimestampMs() - pSub->lastSyncTime > 10 * 60 * 1000) { + tscTrace("begin meter synchronization"); + char* sqlstr = pSql->sqlstr; + pSql->sqlstr = NULL; + taos_free_result_imp(pSql, 0); + pSql->sqlstr = sqlstr; + taosClearDataCache(tscCacheHandle); + if (!tscUpdateSubscription(pSub->taos, pSub)) return NULL; + tscTrace("meter synchronization completed"); + } else { + uint16_t type = pSql->cmd.type; + taos_free_result_imp(pSql, 1); + pRes->numOfRows = 1; + pRes->numOfTotal = 0; + pRes->qhandle = 0; + pSql->thandle = NULL; + pSql->cmd.command = TSDB_SQL_SELECT; + pSql->cmd.type = type; + + tscGetMeterMetaInfo(&pSql->cmd, 0)->vnodeIndex = 0; + } + + tscDoQuery(pSql); + if (pRes->code != TSDB_CODE_NOT_ACTIVE_TABLE) { + break; + } + // meter was removed, make sync time zero, so that next retry will + // do synchronization first + pSub->lastSyncTime = 0; + } + + if (pRes->code != TSDB_CODE_SUCCESS) { + tscError("failed to query data, error code=%d", pRes->code); + tscRemoveFromSqlList(pSql); + return NULL; + } + + pSub->lastConsumeTime = taosGetTimestampMs(); + return pSql; } -void taos_unsubscribe(TAOS_SUB *tsub) { +void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress) { SSub *pSub = (SSub *)tsub; + if (pSub == NULL || pSub->signature != pSub) return; - if (pSub == NULL) return; - if (pSub->signature != pSub) return; + if (pSub->pTimer != NULL) { + taosTmrStop(pSub->pTimer); + } - taos_close(pSub->taos); + if (keepProgress) { + tscSaveSubscriptionProgress(pSub); + } else { + char path[256]; + sprintf(path, "%s/subscribe/%s", dataDir, pSub->topic); + remove(path); + } + + tscFreeSqlObj(pSub->pSql); + free(pSub->progress); + memset(pSub, 0, sizeof(*pSub)); free(pSub); } - -int taos_subfields_count(TAOS_SUB *tsub) { - SSub *pSub = (SSub *)tsub; - - return pSub->numOfFields; -} - -TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub) { - SSub *pSub = (SSub *)tsub; - - return pSub->fields; -} diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 6efe344719..6c685b06b4 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -48,6 +48,7 @@ static pthread_once_t tscinit = PTHREAD_ONCE_INIT; extern int tsTscEnableRecordSql; extern int tsNumOfLogLines; void taosInitNote(int numOfNoteLines, int maxNotes, char* lable); +void deltaToUtcInitOnce(); void tscCheckDiskUsage(void *para, void *unused) { taosGetDisk(); @@ -60,6 +61,7 @@ void taos_init_imp() { SRpcInit rpcInit; srand(taosGetTimestampSec()); + deltaToUtcInitOnce(); if (tscEmbedded == 0) { /* @@ -93,7 +95,6 @@ void taos_init_imp() { taosInitNote(tsNumOfLogLines / 10, 1, (char*)"tsc_note"); } -#ifdef CLUSTER tscMgmtIpList.numOfIps = 2; strcpy(tscMgmtIpList.ipstr[0], tsMasterIp); tscMgmtIpList.ip[0] = inet_addr(tsMasterIp); @@ -106,7 +107,6 @@ void taos_init_imp() { strcpy(tscMgmtIpList.ipstr[2], tsSecondIp); tscMgmtIpList.ip[2] = inet_addr(tsSecondIp); } -#endif tscInitMsgs(); slaveIndex = rand(); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 02d0d9c6b0..f22e322bdf 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -376,6 +376,21 @@ void tscFreeSqlCmdData(SSqlCmd* pCmd) { } } +void tscFreeSqlResult(SSqlObj* pSql) { + tfree(pSql->res.pRsp); + pSql->res.row = 0; + pSql->res.numOfRows = 0; + pSql->res.numOfTotal = 0; + + pSql->res.numOfGroups = 0; + tfree(pSql->res.pGroupRec); + + tscDestroyLocalReducer(pSql); + + tscDestroyResPointerInfo(&pSql->res); + tfree(pSql->res.pColumnIndex); +} + void tscFreeSqlObjPartial(SSqlObj* pSql) { if (pSql == NULL || pSql->signature != pSql) { return; @@ -399,20 +414,9 @@ void tscFreeSqlObjPartial(SSqlObj* pSql) { tfree(pSql->sqlstr); pthread_mutex_unlock(&pObj->mutex); - tfree(pSql->res.pRsp); - pSql->res.row = 0; - pSql->res.numOfRows = 0; - pSql->res.numOfTotal = 0; - - pSql->res.numOfGroups = 0; - tfree(pSql->res.pGroupRec); - - tscDestroyLocalReducer(pSql); - + tscFreeSqlResult(pSql); tfree(pSql->pSubs); pSql->numOfSubs = 0; - tscDestroyResPointerInfo(pRes); - tfree(pSql->res.pColumnIndex); tscFreeSqlCmdData(pCmd); tscRemoveAllMeterMetaInfo(pCmd, false); @@ -822,7 +826,7 @@ void tscFieldInfoSetValFromField(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIE } void tscFieldInfoUpdateVisible(SFieldInfo* pFieldInfo, int32_t index, bool visible) { - if (index < 0 || index > pFieldInfo->numOfOutputCols) { + if (index < 0 || index >= pFieldInfo->numOfOutputCols) { return; } diff --git a/src/connector/grafana/tdengine/dist/plugin.json b/src/connector/grafana/tdengine/dist/plugin.json index 3734fbfc2d..e9954ce6ce 100644 --- a/src/connector/grafana/tdengine/dist/plugin.json +++ b/src/connector/grafana/tdengine/dist/plugin.json @@ -1,6 +1,6 @@ { "name": "TDengine", - "id": "tdengine", + "id": "taosdata-tdengine-datasource", "type": "datasource", "partials": { @@ -24,8 +24,8 @@ {"name": "GitHub", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine"}, {"name": "AGPL 3.0", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine/LICENSE"} ], - "version": "1.6.0", - "updated": "2019-11-12" + "version": "1.0.0", + "updated": "2020-01-13" }, "dependencies": { diff --git a/src/connector/grafana/tdengine/package.json b/src/connector/grafana/tdengine/package.json index 83d29b78ce..8e542bef26 100644 --- a/src/connector/grafana/tdengine/package.json +++ b/src/connector/grafana/tdengine/package.json @@ -1,9 +1,8 @@ { "name": "TDengine", - "private": true, + "private": false, "version": "1.0.0", "description": "grafana datasource plugin for tdengine", - "main": "index.js", "scripts": { "build": "./node_modules/grunt-cli/bin/grunt", "test": "./node_modules/grunt-cli/bin/grunt mochaTest" @@ -12,7 +11,7 @@ "type": "git", "url": "git+https://github.com/taosdata/TDengine.git" }, - "author": "", + "author": "https://www.taosdata.com", "license": "AGPL 3.0", "bugs": { "url": "https://github.com/taosdata/TDengine/issues" diff --git a/src/connector/grafana/tdengine/src/plugin.json b/src/connector/grafana/tdengine/src/plugin.json index 3734fbfc2d..e9954ce6ce 100644 --- a/src/connector/grafana/tdengine/src/plugin.json +++ b/src/connector/grafana/tdengine/src/plugin.json @@ -1,6 +1,6 @@ { "name": "TDengine", - "id": "tdengine", + "id": "taosdata-tdengine-datasource", "type": "datasource", "partials": { @@ -24,8 +24,8 @@ {"name": "GitHub", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine"}, {"name": "AGPL 3.0", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine/LICENSE"} ], - "version": "1.6.0", - "updated": "2019-11-12" + "version": "1.0.0", + "updated": "2020-01-13" }, "dependencies": { diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py index 86a3489d07..505619436c 100644 --- a/src/connector/python/linux/python2/taos/cinterface.py +++ b/src/connector/python/linux/python2/taos/cinterface.py @@ -13,14 +13,14 @@ def _convert_microsecond_to_datetime(micro): def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ - _timstamp_converter = _convert_millisecond_to_datetime + _timestamp_converter = _convert_millisecond_to_datetime if micro: - _timstamp_converter = _convert_microsecond_to_datetime + _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)][::-1])) else: - return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)])) def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -144,6 +144,8 @@ class CTaosInterface(object): libtaos.taos_use_result.restype = ctypes.c_void_p libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p def __init__(self, config=None): ''' @@ -252,6 +254,41 @@ class CTaosInterface(object): """ return CTaosInterface.libtaos.taos_affected_rows(connection) + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + @staticmethod def useResult(connection): '''Use result after calling self.query @@ -275,8 +312,8 @@ class CTaosInterface(object): if num_of_rows == 0: return None, 0 - blocks = [None] * len(fields) isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] @@ -351,4 +388,20 @@ class CTaosInterface(object): def errStr(connection): """Return the error styring """ - return CTaosInterface.libtaos.taos_errstr(connection) \ No newline at end of file + return CTaosInterface.libtaos.taos_errstr(connection) + + +if __name__ == '__main__': + cinter = CTaosInterface() + conn = cinter.connect() + + print('Query return value: {}'.format(cinter.query(conn, 'show databases'))) + print('Affected rows: {}'.format(cinter.affectedRows(conn))) + + result, des = CTaosInterface.useResult(conn) + + data, num_of_rows = CTaosInterface.fetchBlock(result, des) + + print(data) + + cinter.close(conn) \ No newline at end of file diff --git a/src/connector/python/linux/python2/taos/connection.py b/src/connector/python/linux/python2/taos/connection.py index ba24209552..04fbbdec04 100644 --- a/src/connector/python/linux/python2/taos/connection.py +++ b/src/connector/python/linux/python2/taos/connection.py @@ -1,5 +1,5 @@ -# from .cursor import TDengineCursor from .cursor import TDengineCursor +from .subscription import TDengineSubscription from .cinterface import CTaosInterface class TDengineConnection(object): @@ -50,6 +50,14 @@ class TDengineConnection(object): """ return CTaosInterface.close(self._conn) + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + def cursor(self): """Return a new Cursor object using the connection. """ diff --git a/src/connector/python/linux/python2/taos/subscription.py b/src/connector/python/linux/python2/taos/subscription.py new file mode 100644 index 0000000000..2d01395532 --- /dev/null +++ b/src/connector/python/linux/python2/taos/subscription.py @@ -0,0 +1,52 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineSubscription(object): + """TDengine subscription object + """ + def __init__(self, sub): + self._sub = sub + + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + + def close(self, keepProgress = True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0,10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() \ No newline at end of file diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/linux/python3/taos/cinterface.py index 259c8bbd06..7fcedc9fe9 100644 --- a/src/connector/python/linux/python3/taos/cinterface.py +++ b/src/connector/python/linux/python3/taos/cinterface.py @@ -144,6 +144,8 @@ class CTaosInterface(object): libtaos.taos_use_result.restype = ctypes.c_void_p libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p def __init__(self, config=None): ''' @@ -252,6 +254,41 @@ class CTaosInterface(object): """ return CTaosInterface.libtaos.taos_affected_rows(connection) + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + @staticmethod def useResult(connection): '''Use result after calling self.query diff --git a/src/connector/python/linux/python3/taos/connection.py b/src/connector/python/linux/python3/taos/connection.py index ba24209552..04fbbdec04 100644 --- a/src/connector/python/linux/python3/taos/connection.py +++ b/src/connector/python/linux/python3/taos/connection.py @@ -1,5 +1,5 @@ -# from .cursor import TDengineCursor from .cursor import TDengineCursor +from .subscription import TDengineSubscription from .cinterface import CTaosInterface class TDengineConnection(object): @@ -50,6 +50,14 @@ class TDengineConnection(object): """ return CTaosInterface.close(self._conn) + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + def cursor(self): """Return a new Cursor object using the connection. """ diff --git a/src/connector/python/linux/python3/taos/subscription.py b/src/connector/python/linux/python3/taos/subscription.py new file mode 100644 index 0000000000..d3cf10d5ad --- /dev/null +++ b/src/connector/python/linux/python3/taos/subscription.py @@ -0,0 +1,52 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineSubscription(object): + """TDengine subscription object + """ + def __init__(self, sub): + self._sub = sub + + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + + def close(self, keepProgress = True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0,10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() \ No newline at end of file diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py index 8e3b701929..f8cdfcc51e 100644 --- a/src/connector/python/windows/python2/taos/cinterface.py +++ b/src/connector/python/windows/python2/taos/cinterface.py @@ -13,14 +13,14 @@ def _convert_microsecond_to_datetime(micro): def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ - _timstamp_converter = _convert_millisecond_to_datetime + _timestamp_converter = _convert_millisecond_to_datetime if micro: - _timstamp_converter = _convert_microsecond_to_datetime + _timestamp_converter = _convert_microsecond_to_datetime if num_of_rows > 0: - return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) else: - return list(map(_timstamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row @@ -144,6 +144,8 @@ class CTaosInterface(object): libtaos.taos_use_result.restype = ctypes.c_void_p libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p def __init__(self, config=None): ''' @@ -252,6 +254,41 @@ class CTaosInterface(object): """ return CTaosInterface.libtaos.taos_affected_rows(connection) + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + @staticmethod def useResult(connection): '''Use result after calling self.query @@ -275,8 +312,8 @@ class CTaosInterface(object): if num_of_rows == 0: return None, 0 - blocks = [None] * len(fields) isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) for i in range(len(fields)): data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] @@ -351,4 +388,20 @@ class CTaosInterface(object): def errStr(connection): """Return the error styring """ - return CTaosInterface.libtaos.taos_errstr(connection) \ No newline at end of file + return CTaosInterface.libtaos.taos_errstr(connection) + + +if __name__ == '__main__': + cinter = CTaosInterface() + conn = cinter.connect() + + print('Query return value: {}'.format(cinter.query(conn, 'show databases'))) + print('Affected rows: {}'.format(cinter.affectedRows(conn))) + + result, des = CTaosInterface.useResult(conn) + + data, num_of_rows = CTaosInterface.fetchBlock(result, des) + + print(data) + + cinter.close(conn) \ No newline at end of file diff --git a/src/connector/python/windows/python2/taos/connection.py b/src/connector/python/windows/python2/taos/connection.py index ba24209552..e2783975d9 100644 --- a/src/connector/python/windows/python2/taos/connection.py +++ b/src/connector/python/windows/python2/taos/connection.py @@ -1,5 +1,5 @@ -# from .cursor import TDengineCursor from .cursor import TDengineCursor +from .subscription import TDengineSubscription from .cinterface import CTaosInterface class TDengineConnection(object): @@ -15,7 +15,8 @@ class TDengineConnection(object): self._config = None self._chandle = None - self.config(**kwargs) + if len(kwargs) > 0: + self.config(**kwargs) def config(self, **kwargs): # host @@ -50,6 +51,14 @@ class TDengineConnection(object): """ return CTaosInterface.close(self._conn) + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + def cursor(self): """Return a new Cursor object using the connection. """ diff --git a/src/connector/python/windows/python2/taos/subscription.py b/src/connector/python/windows/python2/taos/subscription.py new file mode 100644 index 0000000000..d3cf10d5ad --- /dev/null +++ b/src/connector/python/windows/python2/taos/subscription.py @@ -0,0 +1,52 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineSubscription(object): + """TDengine subscription object + """ + def __init__(self, sub): + self._sub = sub + + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + + def close(self, keepProgress = True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0,10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() \ No newline at end of file diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py index 2cddf5fccf..b4b44e199c 100644 --- a/src/connector/python/windows/python3/taos/cinterface.py +++ b/src/connector/python/windows/python3/taos/cinterface.py @@ -1,370 +1,407 @@ -import ctypes -from .constants import FieldType -from .error import * -import math -import datetime - -def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli/1000.0) - -def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro/1000000.0) - -def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - _timestamp_converter = _convert_millisecond_to_datetime - if micro: - _timestamp_converter = _convert_microsecond_to_datetime - - if num_of_rows > 0: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) - else: - return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) - -def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] - else: - return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] - -def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] - else: - return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] - -def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::-1]] - else: - return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] - -def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::-1] ] - else: - return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] - -def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1] ] - else: - return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] - -def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C float row to python row - """ - if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::-1] ] - else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] - -def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C double row to python row - """ - if num_of_rows > 0: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::-1] ] - else: - return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] - -def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - if num_of_rows > 0: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::-1]] - else: - return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - -def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - - res = [] - - for i in range(abs(num_of_rows)): - try: - if num_of_rows >= 0: - res.append( (ctypes.cast(data+nbytes*(abs(num_of_rows - i -1)), ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) - else: - res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) - except ValueError: - res.append(None) - - return res - # if num_of_rows > 0: - # for i in range(abs(num_of_rows)): - # try: - # res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) - # except ValueError: - # res.append(None) - # return res - # # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)][::-1]] - # else: - # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)]] - -_CONVERT_FUNC = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT : _crow_tinyint_to_python, - FieldType.C_SMALLINT : _crow_smallint_to_python, - FieldType.C_INT : _crow_int_to_python, - FieldType.C_BIGINT : _crow_bigint_to_python, - FieldType.C_FLOAT : _crow_float_to_python, - FieldType.C_DOUBLE : _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP : _crow_timestamp_to_python, - FieldType.C_NCHAR : _crow_nchar_to_python -} - -# Corresponding TAOS_FIELD structure in C -class TaosField(ctypes.Structure): - _fields_ = [('name', ctypes.c_char * 64), - ('bytes', ctypes.c_short), - ('type', ctypes.c_char)] - -# C interface class -class CTaosInterface(object): - - libtaos = ctypes.windll.LoadLibrary('taos') - - libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) - libtaos.taos_init.restype = None - libtaos.taos_connect.restype = ctypes.c_void_p - libtaos.taos_use_result.restype = ctypes.c_void_p - libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) - libtaos.taos_errstr.restype = ctypes.c_char_p - - def __init__(self, config=None): - ''' - Function to initialize the class - @host : str, hostname to connect - @user : str, username to connect to server - @password : str, password to connect to server - @db : str, default db to use when log in - @config : str, config directory - - @rtype : None - ''' - if config is None: - self._config = ctypes.c_char_p(None) - else: - try: - self._config = ctypes.c_char_p(config.encode('utf-8')) - except AttributeError: - raise AttributeError("config is expected as a str") - - if config != None: - CTaosInterface.libtaos.taos_options(3, self._config) - - CTaosInterface.libtaos.taos_init() - - @property - def config(self): - """ Get current config - """ - return self._config - - def connect(self, host=None, user="root", password="taosdata", db=None, port=0): - ''' - Function to connect to server - - @rtype: c_void_p, TDengine handle - ''' - # host - try: - _host = ctypes.c_char_p(host.encode( - "utf-8")) if host != None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("host is expected as a str") - - # user - try: - _user = ctypes.c_char_p(user.encode("utf-8")) - except AttributeError: - raise AttributeError("user is expected as a str") - - # password - try: - _password = ctypes.c_char_p(password.encode("utf-8")) - except AttributeError: - raise AttributeError("password is expected as a str") - - # db - try: - _db = ctypes.c_char_p( - db.encode("utf-8")) if db != None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("db is expected as a str") - - # port - try: - _port = ctypes.c_int(port) - except TypeError: - raise TypeError("port is expected as an int") - - connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( - _host, _user, _password, _db, _port)) - - if connection.value == None: - print('connect to TDengine failed') - # sys.exit(1) - else: - print('connect to TDengine success') - - return connection - - @staticmethod - def close(connection): - '''Close the TDengine handle - ''' - CTaosInterface.libtaos.taos_close(connection) - print('connection is closed') - - @staticmethod - def query(connection, sql): - '''Run SQL - - @sql: str, sql string to run - - @rtype: 0 on success and -1 on failure - ''' - try: - return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) - except AttributeError: - raise AttributeError("sql is expected as a string") - # finally: - # CTaosInterface.libtaos.close(connection) - - @staticmethod - def affectedRows(connection): - """The affected rows after runing query - """ - return CTaosInterface.libtaos.taos_affected_rows(connection) - - @staticmethod - def useResult(connection): - '''Use result after calling self.query - ''' - result = ctypes.c_void_p(CTaosInterface.libtaos.taos_use_result(connection)) - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.fieldsCount(connection)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - - return result, fields - - @staticmethod - def fetchBlock(result, fields): - pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - - if num_of_rows == 0: - return None, 0 - - isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError("Invalid data type returned from database") - - blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fields[i]['bytes'], isMicro) - - return blocks, abs(num_of_rows) - - @staticmethod - def freeResult(result): - CTaosInterface.libtaos.taos_free_result(result) - result.value = None - - @staticmethod - def fieldsCount(connection): - return CTaosInterface.libtaos.taos_field_count(connection) - - @staticmethod - def fetchFields(result): - return CTaosInterface.libtaos.taos_fetch_fields(result) - - # @staticmethod - # def fetchRow(result, fields): - # l = [] - # row = CTaosInterface.libtaos.taos_fetch_row(result) - # if not row: - # return None - - # for i in range(len(fields)): - # l.append(CTaosInterface.getDataValue( - # row[i], fields[i]['type'], fields[i]['bytes'])) - - # return tuple(l) - - # @staticmethod - # def getDataValue(data, dtype, byte): - # ''' - # ''' - # if not data: - # return None - - # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): - # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): - # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') - - @staticmethod - def errno(connection): - """Return the error number. - """ - return CTaosInterface.libtaos.taos_errno(connection) - - @staticmethod - def errStr(connection): - """Return the error styring - """ - return CTaosInterface.libtaos.taos_errstr(connection).decode('utf-8') - - -if __name__ == '__main__': - cinter = CTaosInterface() - conn = cinter.connect() - - print('Query return value: {}'.format(cinter.query(conn, 'show databases'))) - print('Affected rows: {}'.format(cinter.affectedRows(conn))) - - result, des = CTaosInterface.useResult(conn) - - data, num_of_rows = CTaosInterface.fetchBlock(result, des) - - print(data) - +import ctypes +from .constants import FieldType +from .error import * +import math +import datetime + +def _convert_millisecond_to_datetime(milli): + return datetime.datetime.fromtimestamp(milli/1000.0) + +def _convert_microsecond_to_datetime(micro): + return datetime.datetime.fromtimestamp(micro/1000000.0) + +def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + _timestamp_converter = _convert_millisecond_to_datetime + if micro: + _timestamp_converter = _convert_microsecond_to_datetime + + if num_of_rows > 0: + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1])) + else: + return list(map(_timestamp_converter, ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)])) + +def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bool row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[:abs(num_of_rows)] ] + +def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C tinyint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)] ] + +def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C smallint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)][::-1]] + else: + return [ None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[:abs(num_of_rows)] ] + +def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C int row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)] ] + +def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C bigint row to python row + """ + if num_of_rows > 0: + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)][::-1] ] + else: + return [ None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_longlong))[:abs(num_of_rows)] ] + +def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C float row to python row + """ + if num_of_rows > 0: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)][::-1] ] + else: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)] ] + +def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C double row to python row + """ + if num_of_rows > 0: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)][::-1] ] + else: + return [ None if math.isnan(ele) else ele for ele in ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)] ] + +def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C binary row to python row + """ + if num_of_rows > 0: + return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)][::-1]] + else: + return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + +def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): + """Function to convert C nchar row to python row + """ + assert(nbytes is not None) + + res = [] + + for i in range(abs(num_of_rows)): + try: + if num_of_rows >= 0: + res.append( (ctypes.cast(data+nbytes*(abs(num_of_rows - i -1)), ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + else: + res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + except ValueError: + res.append(None) + + return res + # if num_of_rows > 0: + # for i in range(abs(num_of_rows)): + # try: + # res.append( (ctypes.cast(data+nbytes*i, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[0].value ) + # except ValueError: + # res.append(None) + # return res + # # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)][::-1]] + # else: + # return [ele.value for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_wchar * (nbytes//4))))[:abs(num_of_rows)]] + +_CONVERT_FUNC = { + FieldType.C_BOOL: _crow_bool_to_python, + FieldType.C_TINYINT : _crow_tinyint_to_python, + FieldType.C_SMALLINT : _crow_smallint_to_python, + FieldType.C_INT : _crow_int_to_python, + FieldType.C_BIGINT : _crow_bigint_to_python, + FieldType.C_FLOAT : _crow_float_to_python, + FieldType.C_DOUBLE : _crow_double_to_python, + FieldType.C_BINARY: _crow_binary_to_python, + FieldType.C_TIMESTAMP : _crow_timestamp_to_python, + FieldType.C_NCHAR : _crow_nchar_to_python +} + +# Corresponding TAOS_FIELD structure in C +class TaosField(ctypes.Structure): + _fields_ = [('name', ctypes.c_char * 64), + ('bytes', ctypes.c_short), + ('type', ctypes.c_char)] + +# C interface class +class CTaosInterface(object): + + libtaos = ctypes.windll.LoadLibrary('taos') + + libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) + libtaos.taos_init.restype = None + libtaos.taos_connect.restype = ctypes.c_void_p + libtaos.taos_use_result.restype = ctypes.c_void_p + libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) + libtaos.taos_errstr.restype = ctypes.c_char_p + libtaos.taos_subscribe.restype = ctypes.c_void_p + libtaos.taos_consume.restype = ctypes.c_void_p + + def __init__(self, config=None): + ''' + Function to initialize the class + @host : str, hostname to connect + @user : str, username to connect to server + @password : str, password to connect to server + @db : str, default db to use when log in + @config : str, config directory + + @rtype : None + ''' + if config is None: + self._config = ctypes.c_char_p(None) + else: + try: + self._config = ctypes.c_char_p(config.encode('utf-8')) + except AttributeError: + raise AttributeError("config is expected as a str") + + if config != None: + CTaosInterface.libtaos.taos_options(3, self._config) + + CTaosInterface.libtaos.taos_init() + + @property + def config(self): + """ Get current config + """ + return self._config + + def connect(self, host=None, user="root", password="taosdata", db=None, port=0): + ''' + Function to connect to server + + @rtype: c_void_p, TDengine handle + ''' + # host + try: + _host = ctypes.c_char_p(host.encode( + "utf-8")) if host != None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("host is expected as a str") + + # user + try: + _user = ctypes.c_char_p(user.encode("utf-8")) + except AttributeError: + raise AttributeError("user is expected as a str") + + # password + try: + _password = ctypes.c_char_p(password.encode("utf-8")) + except AttributeError: + raise AttributeError("password is expected as a str") + + # db + try: + _db = ctypes.c_char_p( + db.encode("utf-8")) if db != None else ctypes.c_char_p(None) + except AttributeError: + raise AttributeError("db is expected as a str") + + # port + try: + _port = ctypes.c_int(port) + except TypeError: + raise TypeError("port is expected as an int") + + connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( + _host, _user, _password, _db, _port)) + + if connection.value == None: + print('connect to TDengine failed') + # sys.exit(1) + else: + print('connect to TDengine success') + + return connection + + @staticmethod + def close(connection): + '''Close the TDengine handle + ''' + CTaosInterface.libtaos.taos_close(connection) + print('connection is closed') + + @staticmethod + def query(connection, sql): + '''Run SQL + + @sql: str, sql string to run + + @rtype: 0 on success and -1 on failure + ''' + try: + return CTaosInterface.libtaos.taos_query(connection, ctypes.c_char_p(sql.encode('utf-8'))) + except AttributeError: + raise AttributeError("sql is expected as a string") + # finally: + # CTaosInterface.libtaos.close(connection) + + @staticmethod + def affectedRows(connection): + """The affected rows after runing query + """ + return CTaosInterface.libtaos.taos_affected_rows(connection) + + @staticmethod + def subscribe(connection, restart, topic, sql, interval): + """Create a subscription + @restart boolean, + @sql string, sql statement for data query, must be a 'select' statement. + @topic string, name of this subscription + """ + return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( + connection, + 1 if restart else 0, + ctypes.c_char_p(topic.encode('utf-8')), + ctypes.c_char_p(sql.encode('utf-8')), + None, + None, + interval)) + + @staticmethod + def consume(sub): + """Consume data of a subscription + """ + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.libtaos.taos_num_fields(result)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + return result, fields + + @staticmethod + def unsubscribe(sub, keepProgress): + """Cancel a subscription + """ + CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) + + @staticmethod + def useResult(connection): + '''Use result after calling self.query + ''' + result = ctypes.c_void_p(CTaosInterface.libtaos.taos_use_result(connection)) + fields = [] + pfields = CTaosInterface.fetchFields(result) + for i in range(CTaosInterface.fieldsCount(connection)): + fields.append({'name': pfields[i].name.decode('utf-8'), + 'bytes': pfields[i].bytes, + 'type': ord(pfields[i].type)}) + + return result, fields + + @staticmethod + def fetchBlock(result, fields): + pblock = ctypes.c_void_p(0) + num_of_rows = CTaosInterface.libtaos.taos_fetch_block( + result, ctypes.byref(pblock)) + + if num_of_rows == 0: + return None, 0 + + isMicro = (CTaosInterface.libtaos.taos_result_precision(result) == FieldType.C_TIMESTAMP_MICRO) + blocks = [None] * len(fields) + for i in range(len(fields)): + data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] + + if fields[i]['type'] not in _CONVERT_FUNC: + raise DatabaseError("Invalid data type returned from database") + + blocks[i] = _CONVERT_FUNC[fields[i]['type']](data, num_of_rows, fields[i]['bytes'], isMicro) + + return blocks, abs(num_of_rows) + + @staticmethod + def freeResult(result): + CTaosInterface.libtaos.taos_free_result(result) + result.value = None + + @staticmethod + def fieldsCount(connection): + return CTaosInterface.libtaos.taos_field_count(connection) + + @staticmethod + def fetchFields(result): + return CTaosInterface.libtaos.taos_fetch_fields(result) + + # @staticmethod + # def fetchRow(result, fields): + # l = [] + # row = CTaosInterface.libtaos.taos_fetch_row(result) + # if not row: + # return None + + # for i in range(len(fields)): + # l.append(CTaosInterface.getDataValue( + # row[i], fields[i]['type'], fields[i]['bytes'])) + + # return tuple(l) + + # @staticmethod + # def getDataValue(data, dtype, byte): + # ''' + # ''' + # if not data: + # return None + + # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): + # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): + # return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0] + # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): + # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') + + @staticmethod + def errno(connection): + """Return the error number. + """ + return CTaosInterface.libtaos.taos_errno(connection) + + @staticmethod + def errStr(connection): + """Return the error styring + """ + return CTaosInterface.libtaos.taos_errstr(connection).decode('utf-8') + + +if __name__ == '__main__': + cinter = CTaosInterface() + conn = cinter.connect() + + print('Query return value: {}'.format(cinter.query(conn, 'show databases'))) + print('Affected rows: {}'.format(cinter.affectedRows(conn))) + + result, des = CTaosInterface.useResult(conn) + + data, num_of_rows = CTaosInterface.fetchBlock(result, des) + + print(data) + cinter.close(conn) \ No newline at end of file diff --git a/src/connector/python/windows/python3/taos/connection.py b/src/connector/python/windows/python3/taos/connection.py index a88e25a6db..e2783975d9 100644 --- a/src/connector/python/windows/python3/taos/connection.py +++ b/src/connector/python/windows/python3/taos/connection.py @@ -1,81 +1,89 @@ -# from .cursor import TDengineCursor -from .cursor import TDengineCursor -from .cinterface import CTaosInterface - -class TDengineConnection(object): - """ TDengine connection object - """ - def __init__(self, *args, **kwargs): - self._conn = None - self._host = None - self._user = "root" - self._password = "taosdata" - self._database = None - self._port = 0 - self._config = None - self._chandle = None - - if len(kwargs) > 0: - self.config(**kwargs) - - def config(self, **kwargs): - # host - if 'host' in kwargs: - self._host = kwargs['host'] - - # user - if 'user' in kwargs: - self._user = kwargs['user'] - - # password - if 'password' in kwargs: - self._password = kwargs['password'] - - # database - if 'database' in kwargs: - self._database = kwargs['database'] - - # port - if 'port' in kwargs: - self._port = kwargs['port'] - - # config - if 'config' in kwargs: - self._config = kwargs['config'] - - self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) - - def close(self): - """Close current connection. - """ - return CTaosInterface.close(self._conn) - - def cursor(self): - """Return a new Cursor object using the connection. - """ - return TDengineCursor(self) - - def commit(self): - """Commit any pending transaction to the database. - - Since TDengine do not support transactions, the implement is void functionality. - """ - pass - - def rollback(self): - """Void functionality - """ - pass - - def clear_result_set(self): - """Clear unused result set on this connection. - """ - result = self._chandle.useResult(self._conn)[0] - if result: - self._chandle.freeResult(result) - -if __name__ == "__main__": - conn = TDengineConnection(host='192.168.1.107') - conn.close() +from .cursor import TDengineCursor +from .subscription import TDengineSubscription +from .cinterface import CTaosInterface + +class TDengineConnection(object): + """ TDengine connection object + """ + def __init__(self, *args, **kwargs): + self._conn = None + self._host = None + self._user = "root" + self._password = "taosdata" + self._database = None + self._port = 0 + self._config = None + self._chandle = None + + if len(kwargs) > 0: + self.config(**kwargs) + + def config(self, **kwargs): + # host + if 'host' in kwargs: + self._host = kwargs['host'] + + # user + if 'user' in kwargs: + self._user = kwargs['user'] + + # password + if 'password' in kwargs: + self._password = kwargs['password'] + + # database + if 'database' in kwargs: + self._database = kwargs['database'] + + # port + if 'port' in kwargs: + self._port = kwargs['port'] + + # config + if 'config' in kwargs: + self._config = kwargs['config'] + + self._chandle = CTaosInterface(self._config) + self._conn = self._chandle.connect(self._host, self._user, self._password, self._database, self._port) + + def close(self): + """Close current connection. + """ + return CTaosInterface.close(self._conn) + + def subscribe(self, restart, topic, sql, interval): + """Create a subscription. + """ + if self._conn is None: + return None + sub = CTaosInterface.subscribe(self._conn, restart, topic, sql, interval) + return TDengineSubscription(sub) + + def cursor(self): + """Return a new Cursor object using the connection. + """ + return TDengineCursor(self) + + def commit(self): + """Commit any pending transaction to the database. + + Since TDengine do not support transactions, the implement is void functionality. + """ + pass + + def rollback(self): + """Void functionality + """ + pass + + def clear_result_set(self): + """Clear unused result set on this connection. + """ + result = self._chandle.useResult(self._conn)[0] + if result: + self._chandle.freeResult(result) + +if __name__ == "__main__": + conn = TDengineConnection(host='192.168.1.107') + conn.close() print("Hello world") \ No newline at end of file diff --git a/src/connector/python/windows/python3/taos/subscription.py b/src/connector/python/windows/python3/taos/subscription.py new file mode 100644 index 0000000000..d3cf10d5ad --- /dev/null +++ b/src/connector/python/windows/python3/taos/subscription.py @@ -0,0 +1,52 @@ +from .cinterface import CTaosInterface +from .error import * + +class TDengineSubscription(object): + """TDengine subscription object + """ + def __init__(self, sub): + self._sub = sub + + + def consume(self): + """Consume rows of a subscription + """ + if self._sub is None: + raise OperationalError("Invalid use of consume") + + result, fields = CTaosInterface.consume(self._sub) + buffer = [[] for i in range(len(fields))] + while True: + block, num_of_fields = CTaosInterface.fetchBlock(result, fields) + if num_of_fields == 0: break + for i in range(len(fields)): + buffer[i].extend(block[i]) + + self.fields = fields + return list(map(tuple, zip(*buffer))) + + + def close(self, keepProgress = True): + """Close the Subscription. + """ + if self._sub is None: + return False + + CTaosInterface.unsubscribe(self._sub, keepProgress) + return True + + +if __name__ == '__main__': + from .connection import TDengineConnection + conn = TDengineConnection(host="127.0.0.1", user="root", password="taosdata", database="test") + + # Generate a cursor object to run SQL commands + sub = conn.subscribe(True, "test", "select * from meters;", 1000) + + for i in range(0,10): + data = sub.consume() + for d in data: + print(d) + + sub.close() + conn.close() \ No newline at end of file diff --git a/src/inc/taos.h b/src/inc/taos.h index c56d0e86d7..d9db79fbcb 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -116,11 +116,10 @@ DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); DLL_EXPORT void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param); -DLL_EXPORT TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, const char *db, const char *table, int64_t time, int mseconds); -DLL_EXPORT TAOS_ROW taos_consume(TAOS_SUB *tsub); -DLL_EXPORT void taos_unsubscribe(TAOS_SUB *tsub); -DLL_EXPORT int taos_subfields_count(TAOS_SUB *tsub); -DLL_EXPORT TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub); +typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code); +DLL_EXPORT TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval); +DLL_EXPORT TAOS_RES *taos_consume(TAOS_SUB *tsub); +DLL_EXPORT void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress); DLL_EXPORT TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *)); diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 22b10eaa60..9151851330 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -490,6 +490,7 @@ typedef struct SColumnInfo { typedef struct SMeterSidExtInfo { int32_t sid; int64_t uid; + TSKEY key; // key for subscription char tags[]; } SMeterSidExtInfo; diff --git a/src/inc/tglobalcfg.h b/src/inc/tglobalcfg.h index d4c998af31..7603591e06 100644 --- a/src/inc/tglobalcfg.h +++ b/src/inc/tglobalcfg.h @@ -54,6 +54,7 @@ extern char tsDirectory[]; extern char dataDir[]; extern char logDir[]; extern char scriptDir[]; +extern char osName[]; extern char tsMasterIp[]; extern char tsSecondIp[]; @@ -78,7 +79,6 @@ extern char tsPrivateIp[]; extern short tsNumOfVnodesPerCore; extern short tsNumOfTotalVnodes; extern short tsCheckHeaderFile; -extern uint32_t tsServerIp; extern uint32_t tsPublicIpInt; extern short tsAffectedRowsMod; diff --git a/src/inc/ttime.h b/src/inc/ttime.h index eae24a56b5..34c241cbc0 100644 --- a/src/inc/ttime.h +++ b/src/inc/ttime.h @@ -42,6 +42,7 @@ int64_t taosGetTimestamp(int32_t precision); int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts); int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec); +void deltaToUtcInitOnce(); #ifdef __cplusplus } diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index 7442367e91..244eafb44f 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -9,6 +9,7 @@ INCLUDE_DIRECTORIES(inc) IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM)) AUX_SOURCE_DIRECTORY(./src SRC) LIST(REMOVE_ITEM SRC ./src/shellWindows.c) + LIST(REMOVE_ITEM SRC ./src/shellDarwin.c) ADD_EXECUTABLE(shell ${SRC}) TARGET_LINK_LIBRARIES(shell taos_static) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) @@ -24,7 +25,9 @@ ELSEIF (TD_WINDOWS_64) ELSEIF (TD_DARWIN_64) LIST(APPEND SRC ./src/shellEngine.c) LIST(APPEND SRC ./src/shellMain.c) - LIST(APPEND SRC ./src/shellWindows.c) + LIST(APPEND SRC ./src/shellDarwin.c) + LIST(APPEND SRC ./src/shellCommand.c) + LIST(APPEND SRC ./src/shellImport.c) ADD_EXECUTABLE(shell ${SRC}) TARGET_LINK_LIBRARIES(shell taos_static) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c new file mode 100644 index 0000000000..b624f5ee68 --- /dev/null +++ b/src/kit/shell/src/shellDarwin.c @@ -0,0 +1,532 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define __USE_XOPEN + +#include "os.h" + +#include "shell.h" +#include "shellCommand.h" +#include "tkey.h" + +#define OPT_ABORT 1 /* �Cabort */ + +int indicator = 1; +struct termios oldtio; + +extern int wcwidth(wchar_t c); +void insertChar(Command *cmd, char *c, int size); + + +void printHelp() { + char indent[10] = " "; + printf("taos shell is used to test the TDEngine database\n"); + + printf("%s%s\n", indent, "-h"); + printf("%s%s%s\n", indent, indent, "TDEngine server IP address to connect. The default host is localhost."); + printf("%s%s\n", indent, "-p"); + printf("%s%s%s\n", indent, indent, "The password to use when connecting to the server."); + printf("%s%s\n", indent, "-P"); + printf("%s%s%s\n", indent, indent, "The TCP/IP port number to use for the connection"); + printf("%s%s\n", indent, "-u"); + printf("%s%s%s\n", indent, indent, "The TDEngine user name to use when connecting to the server."); + printf("%s%s\n", indent, "-c"); + printf("%s%s%s\n", indent, indent, "Configuration directory."); + printf("%s%s\n", indent, "-s"); + printf("%s%s%s\n", indent, indent, "Commands to run without enter the shell."); + printf("%s%s\n", indent, "-r"); + printf("%s%s%s\n", indent, indent, "Output time as unsigned long.."); + printf("%s%s\n", indent, "-f"); + printf("%s%s%s\n", indent, indent, "Script to run without enter the shell."); + printf("%s%s\n", indent, "-d"); + printf("%s%s%s\n", indent, indent, "Database to use when connecting to the server."); + printf("%s%s\n", indent, "-t"); + printf("%s%s%s\n", indent, indent, "Time zone of the shell, default is local."); + printf("%s%s\n", indent, "-D"); + printf("%s%s%s\n", indent, indent, "Use multi-thread to import all SQL files in the directory separately."); + printf("%s%s\n", indent, "-T"); + printf("%s%s%s\n", indent, indent, "Number of threads when using multi-thread to import data."); + + exit(EXIT_SUCCESS); +} + +void shellParseArgument(int argc, char *argv[], struct arguments *arguments) { + wordexp_t full_path; + for (int i = 1; i < argc; i++) { + // for host + if (strcmp(argv[i], "-h") == 0) { + if (i < argc - 1) { + arguments->host = argv[++i]; + } else { + fprintf(stderr, "option -h requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // for password + else if (strcmp(argv[i], "-p") == 0) { + arguments->is_use_passwd = true; + } + // for management port + else if (strcmp(argv[i], "-P") == 0) { + if (i < argc - 1) { + tsMgmtShellPort = atoi(argv[++i]); + } else { + fprintf(stderr, "option -P requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // for user + else if (strcmp(argv[i], "-u") == 0) { + if (i < argc - 1) { + arguments->user = argv[++i]; + } else { + fprintf(stderr, "option -u requires an argument\n"); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-c") == 0) { + if (i < argc - 1) { + strcpy(configDir, argv[++i]); + } else { + fprintf(stderr, "Option -c requires an argument\n"); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-s") == 0) { + if (i < argc - 1) { + arguments->commands = argv[++i]; + } else { + fprintf(stderr, "option -s requires an argument\n"); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-r") == 0) { + arguments->is_raw_time = true; + } + // For temperory batch commands to run TODO + else if (strcmp(argv[i], "-f") == 0) { + if (i < argc - 1) { + strcpy(arguments->file, argv[++i]); + } else { + fprintf(stderr, "option -f requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // for default database + else if (strcmp(argv[i], "-d") == 0) { + if (i < argc - 1) { + arguments->database = argv[++i]; + } else { + fprintf(stderr, "option -d requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // For time zone + else if (strcmp(argv[i], "-t") == 0) { + if (i < argc - 1) { + arguments->timezone = argv[++i]; + } else { + fprintf(stderr, "option -t requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // For import directory + else if (strcmp(argv[i], "-D") == 0) { + if (i < argc - 1) { + if (wordexp(argv[++i], &full_path, 0) != 0) { + fprintf(stderr, "Invalid path %s\n", argv[i]); + exit(EXIT_FAILURE); + } + strcpy(arguments->dir, full_path.we_wordv[0]); + wordfree(&full_path); + } else { + fprintf(stderr, "option -D requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // For time zone + else if (strcmp(argv[i], "-T") == 0) { + if (i < argc - 1) { + arguments->threadNum = atoi(argv[++i]); + } else { + fprintf(stderr, "option -T requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // For temperory command TODO + else if (strcmp(argv[i], "--help") == 0) { + printHelp(); + exit(EXIT_FAILURE); + } else { + fprintf(stderr, "wrong options\n"); + printHelp(); + exit(EXIT_FAILURE); + } + } +} + +void shellReadCommand(TAOS *con, char *command) { + unsigned hist_counter = history.hend; + char utf8_array[10] = "\0"; + Command cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.buffer = (char *)calloc(1, MAX_COMMAND_SIZE); + cmd.command = (char *)calloc(1, MAX_COMMAND_SIZE); + showOnScreen(&cmd); + + // Read input. + char c; + while (1) { + c = getchar(); + + if (c < 0) { // For UTF-8 + int count = countPrefixOnes(c); + utf8_array[0] = c; + for (int k = 1; k < count; k++) { + c = getchar(); + utf8_array[k] = c; + } + insertChar(&cmd, utf8_array, count); + } else if (c < '\033') { + // Ctrl keys. TODO: Implement ctrl combinations + switch (c) { + case 1: // ctrl A + positionCursorHome(&cmd); + break; + case 3: + printf("\n"); + resetCommand(&cmd, ""); + kill(0, SIGINT); + break; + case 4: // EOF or Ctrl+D + printf("\n"); + taos_close(con); + // write the history + write_history(); + exitShell(); + break; + case 5: // ctrl E + positionCursorEnd(&cmd); + break; + case 8: + backspaceChar(&cmd); + break; + case '\n': + case '\r': + printf("\n"); + if (isReadyGo(&cmd)) { + sprintf(command, "%s%s", cmd.buffer, cmd.command); + tfree(cmd.buffer); + tfree(cmd.command); + return; + } else { + updateBuffer(&cmd); + } + break; + case 12: // Ctrl + L; + system("clear"); + showOnScreen(&cmd); + break; + } + } else if (c == '\033') { + c = getchar(); + switch (c) { + case '[': + c = getchar(); + switch (c) { + case 'A': // Up arrow + if (hist_counter != history.hstart) { + hist_counter = (hist_counter + MAX_HISTORY_SIZE - 1) % MAX_HISTORY_SIZE; + resetCommand(&cmd, (history.hist[hist_counter] == NULL) ? "" : history.hist[hist_counter]); + } + break; + case 'B': // Down arrow + if (hist_counter != history.hend) { + int next_hist = (hist_counter + 1) % MAX_HISTORY_SIZE; + + if (next_hist != history.hend) { + resetCommand(&cmd, (history.hist[next_hist] == NULL) ? "" : history.hist[next_hist]); + } else { + resetCommand(&cmd, ""); + } + hist_counter = next_hist; + } + break; + case 'C': // Right arrow + moveCursorRight(&cmd); + break; + case 'D': // Left arrow + moveCursorLeft(&cmd); + break; + case '1': + if ((c = getchar()) == '~') { + // Home key + positionCursorHome(&cmd); + } + break; + case '2': + if ((c = getchar()) == '~') { + // Insert key + } + break; + case '3': + if ((c = getchar()) == '~') { + // Delete key + deleteChar(&cmd); + } + break; + case '4': + if ((c = getchar()) == '~') { + // End key + positionCursorEnd(&cmd); + } + break; + case '5': + if ((c = getchar()) == '~') { + // Page up key + } + break; + case '6': + if ((c = getchar()) == '~') { + // Page down key + } + break; + case 72: + // Home key + positionCursorHome(&cmd); + break; + case 70: + // End key + positionCursorEnd(&cmd); + break; + } + break; + } + } else if (c == 0x7f) { + // press delete key + backspaceChar(&cmd); + } else { + insertChar(&cmd, &c, 1); + } + } +} + +void *shellLoopQuery(void *arg) { + if (indicator) { + get_old_terminal_mode(&oldtio); + indicator = 0; + } + + TAOS *con = (TAOS *)arg; + + pthread_cleanup_push(cleanup_handler, NULL); + + char *command = malloc(MAX_COMMAND_SIZE); + if (command == NULL){ + tscError("failed to malloc command"); + return NULL; + } + while (1) { + // Read command from shell. + + memset(command, 0, MAX_COMMAND_SIZE); + set_terminal_mode(); + shellReadCommand(con, command); + reset_terminal_mode(); + + // Run the command + shellRunCommand(con, command); + } + + pthread_cleanup_pop(1); + + return NULL; +} + +void shellPrintNChar(char *str, int width, bool printMode) { + int col_left = width; + wchar_t wc; + while (col_left > 0) { + if (*str == '\0') break; + char *tstr = str; + int byte_width = mbtowc(&wc, tstr, MB_CUR_MAX); + if (byte_width <= 0) break; + int col_width = wcwidth(wc); + if (col_width <= 0) { + str += byte_width; + continue; + } + if (col_left < col_width) break; + printf("%lc", wc); + str += byte_width; + col_left -= col_width; + } + + while (col_left > 0) { + printf(" "); + col_left--; + } + + if (!printMode) { + printf("|"); + } else { + printf("\n"); + } +} + +int get_old_terminal_mode(struct termios *tio) { + /* Make sure stdin is a terminal. */ + if (!isatty(STDIN_FILENO)) { + return -1; + } + + // Get the parameter of current terminal + if (tcgetattr(0, &oldtio) != 0) { + return -1; + } + + return 1; +} + +void reset_terminal_mode() { + if (tcsetattr(0, TCSANOW, &oldtio) != 0) { + fprintf(stderr, "Fail to reset the terminal properties!\n"); + exit(EXIT_FAILURE); + } +} + +void set_terminal_mode() { + struct termios newtio; + + /* if (atexit(reset_terminal_mode) != 0) { */ + /* fprintf(stderr, "Error register exit function!\n"); */ + /* exit(EXIT_FAILURE); */ + /* } */ + + memcpy(&newtio, &oldtio, sizeof(oldtio)); + + // Set new terminal attributes. + newtio.c_iflag &= ~(IXON | IXOFF | ICRNL | INLCR | IGNCR | IMAXBEL | ISTRIP); + newtio.c_iflag |= IGNBRK; + + // newtio.c_oflag &= ~(OPOST|ONLCR|OCRNL|ONLRET); + newtio.c_oflag |= OPOST; + newtio.c_oflag |= ONLCR; + newtio.c_oflag &= ~(OCRNL | ONLRET); + + newtio.c_lflag &= ~(IEXTEN | ICANON | ECHO | ECHOE | ECHONL | ECHOCTL | ECHOPRT | ECHOKE | ISIG); + newtio.c_cc[VMIN] = 1; + newtio.c_cc[VTIME] = 0; + + if (tcsetattr(0, TCSANOW, &newtio) != 0) { + fprintf(stderr, "Fail to set terminal properties!\n"); + exit(EXIT_FAILURE); + } +} + +void get_history_path(char *history) { sprintf(history, "%s/%s", getpwuid(getuid())->pw_dir, HISTORY_FILE); } + +void clearScreen(int ecmd_pos, int cursor_pos) { + struct winsize w; + ioctl(0, TIOCGWINSZ, &w); + + int cursor_x = cursor_pos / w.ws_col; + int cursor_y = cursor_pos % w.ws_col; + int command_x = ecmd_pos / w.ws_col; + positionCursor(cursor_y, LEFT); + positionCursor(command_x - cursor_x, DOWN); + fprintf(stdout, "\033[2K"); + for (int i = 0; i < command_x; i++) { + positionCursor(1, UP); + fprintf(stdout, "\033[2K"); + } + fflush(stdout); +} + +void showOnScreen(Command *cmd) { + struct winsize w; + if (ioctl(0, TIOCGWINSZ, &w) < 0 || w.ws_col == 0 || w.ws_row == 0) { + fprintf(stderr, "No stream device\n"); + exit(EXIT_FAILURE); + } + + wchar_t wc; + int size = 0; + + // Print out the command. + char *total_string = malloc(MAX_COMMAND_SIZE); + memset(total_string, '\0', MAX_COMMAND_SIZE); + if (strcmp(cmd->buffer, "") == 0) { + sprintf(total_string, "%s%s", PROMPT_HEADER, cmd->command); + } else { + sprintf(total_string, "%s%s", CONTINUE_PROMPT, cmd->command); + } + + int remain_column = w.ws_col; + /* size = cmd->commandSize + prompt_size; */ + for (char *str = total_string; size < cmd->commandSize + prompt_size;) { + int ret = mbtowc(&wc, str, MB_CUR_MAX); + if (ret < 0) break; + size += ret; + /* assert(size >= 0); */ + int width = wcwidth(wc); + if (remain_column > width) { + printf("%lc", wc); + remain_column -= width; + } else { + if (remain_column == width) { + printf("%lc\n\r", wc); + remain_column = w.ws_col; + } else { + printf("\n\r%lc", wc); + remain_column = w.ws_col - width; + } + } + + str = total_string + size; + } + + free(total_string); + /* for (int i = 0; i < size; i++){ */ + /* char c = total_string[i]; */ + /* if (k % w.ws_col == 0) { */ + /* printf("%c\n\r", c); */ + /* } */ + /* else { */ + /* printf("%c", c); */ + /* } */ + /* k += 1; */ + /* } */ + + // Position the cursor + int cursor_pos = cmd->screenOffset + prompt_size; + int ecmd_pos = cmd->endOffset + prompt_size; + + int cursor_x = cursor_pos / w.ws_col; + int cursor_y = cursor_pos % w.ws_col; + // int cursor_y = cursor % w.ws_col; + int command_x = ecmd_pos / w.ws_col; + int command_y = ecmd_pos % w.ws_col; + // int command_y = (command.size() + prompt_size) % w.ws_col; + positionCursor(command_y, LEFT); + positionCursor(command_x, UP); + positionCursor(cursor_x, DOWN); + positionCursor(cursor_y, RIGHT); + fflush(stdout); +} + +void cleanup_handler(void *arg) { tcsetattr(0, TCSANOW, &oldtio); } + +void exitShell() { + tcsetattr(0, TCSANOW, &oldtio); + exit(EXIT_SUCCESS); +} diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 1ca969a468..d159109f3e 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -21,28 +21,13 @@ #include "shellCommand.h" #include "ttime.h" #include "tutil.h" +#include "taoserror.h" #include /**************** Global variables ****************/ -#ifdef WINDOWS - char CLIENT_VERSION[] = "Welcome to the TDengine shell from windows, client version:%s "; -#elif defined(DARWIN) - char CLIENT_VERSION[] = "Welcome to the TDengine shell from mac, client version:%s "; -#else - #ifdef CLUSTER - char CLIENT_VERSION[] = "Welcome to the TDengine shell from linux, enterprise client version:%s "; - #else - char CLIENT_VERSION[] = "Welcome to the TDengine shell from linux, community client version:%s "; - #endif -#endif - -#ifdef CLUSTER - char SERVER_VERSION[] = "enterprise server version:%s\nCopyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; -#else - char SERVER_VERSION[] = "community server version:%s\nCopyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; -#endif - +char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "taos> "; char CONTINUE_PROMPT[] = " -> "; int prompt_size = 6; @@ -54,7 +39,7 @@ History history; */ TAOS *shellInit(struct arguments *args) { printf("\n"); - printf(CLIENT_VERSION, taos_get_client_info()); + printf(CLIENT_VERSION, osName, taos_get_client_info()); fflush(stdout); // set options before initializing @@ -111,7 +96,7 @@ TAOS *shellInit(struct arguments *args) { exit(EXIT_SUCCESS); } -#ifdef LINUX +#ifndef WINDOWS if (args->dir[0] != 0) { source_dir(con, args); taos_close(con); @@ -119,8 +104,6 @@ TAOS *shellInit(struct arguments *args) { } #endif - printf(SERVER_VERSION, taos_get_server_info(con)); - return con; } @@ -817,11 +800,16 @@ void source_file(TAOS *con, char *fptr) { } void shellGetGrantInfo(void *con) { -#ifdef CLUSTER char sql[] = "show grants"; - if (taos_query(con, sql)) { - fprintf(stdout, "\n"); + int code = taos_query(con, sql); + + if (code != TSDB_CODE_SUCCESS) { + if (code == TSDB_CODE_OPS_NOT_SUPPORT) { + fprintf(stdout, "Server is Community Edition, version is %s\n\n", taos_get_server_info(con)); + } else { + fprintf(stderr, "Failed to check Server Edition, Reason:%d:%s\n\n", taos_errno(con), taos_errstr(con)); + } return; } @@ -843,18 +831,18 @@ void shellGetGrantInfo(void *con) { exit(0); } - char version[32] = {0}; + char serverVersion[32] = {0}; char expiretime[32] = {0}; char expired[32] = {0}; - memcpy(version, row[0], fields[0].bytes); + memcpy(serverVersion, row[0], fields[0].bytes); memcpy(expiretime, row[1], fields[1].bytes); memcpy(expired, row[2], fields[2].bytes); if (strcmp(expiretime, "unlimited") == 0) { - fprintf(stdout, "This is the %s version and will never expire.\n", version); + fprintf(stdout, "Server is Enterprise %s Edition, version is %s and will never expire.\n", serverVersion, taos_get_server_info(con)); } else { - fprintf(stdout, "This is the %s version and will expire at %s.\n", version, expiretime); + fprintf(stdout, "Server is Enterprise %s Edition, version is %s and will expire at %s.\n", serverVersion, taos_get_server_info(con), expiretime); } taos_free_result(result); @@ -862,5 +850,4 @@ void shellGetGrantInfo(void *con) { } fprintf(stdout, "\n"); -#endif } diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c index 1057634803..dd04f935e7 100644 --- a/src/kit/shell/src/shellImport.c +++ b/src/kit/shell/src/shellImport.c @@ -90,20 +90,12 @@ static void shellParseDirectory(const char *directoryName, const char *prefix, c static void shellCheckTablesSQLFile(const char *directoryName) { - char cmd[1024] = { 0 }; - sprintf(cmd, "ls %s/tables.sql", directoryName); + sprintf(shellTablesSQLFile, "%s/tables.sql", directoryName); - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); - exit(0); + struct stat fstat; + if (stat(shellTablesSQLFile, &fstat) < 0) { + shellTablesSQLFile[0] = 0; } - - while (fscanf(fp, "%s", shellTablesSQLFile)) { - break; - } - - pclose(fp); } static void shellMallocSQLFiles() diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index bcb9c7b4e7..081b9eae31 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -119,13 +119,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { static struct argp argp = {options, parse_opt, args_doc, doc}; void shellParseArgument(int argc, char *argv[], struct arguments *arguments) { - char verType[32] = {0}; - #ifdef CLUSTER - sprintf(verType, "enterprise version: %s\n", version); - #else - sprintf(verType, "community version: %s\n", version); - #endif - + static char verType[32] = {0}; + sprintf(verType, "version: %s\n", version); + argp_program_version = verType; argp_parse(&argp, argc, argv, 0, 0, arguments); diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 6eb8426e58..24855ab8b5 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -17,6 +17,7 @@ #include #include +#include #ifndef _ALPINE #include @@ -575,7 +576,7 @@ void *readTable(void *sarg) { double totalT = 0; int count = 0; for (int i = 0; i < num_of_tables; i++) { - sprintf(command, "select %s from %s%d where ts>= %ld", aggreFunc[j], tb_prefix, i, sTime); + sprintf(command, "select %s from %s%d where ts>= %" PRId64, aggreFunc[j], tb_prefix, i, sTime); double t = getCurrentTime(); if (taos_query(taos, command) != 0) { @@ -818,7 +819,7 @@ double getCurrentTime() { void generateData(char *res, char **data_type, int num_of_cols, int64_t timestamp, int len_of_binary) { memset(res, 0, MAX_DATA_SIZE); char *pstr = res; - pstr += sprintf(pstr, "(%ld", timestamp); + pstr += sprintf(pstr, "(%" PRId64, timestamp); int c = 0; for (; c < MAX_NUM_DATATYPE; c++) { @@ -835,7 +836,7 @@ void generateData(char *res, char **data_type, int num_of_cols, int64_t timestam } else if (strcasecmp(data_type[i % c], "int") == 0) { pstr += sprintf(pstr, ", %d", (int)(rand() % 10)); } else if (strcasecmp(data_type[i % c], "bigint") == 0) { - pstr += sprintf(pstr, ", %ld", rand() % 2147483648); + pstr += sprintf(pstr, ", %" PRId64, rand() % 2147483648); } else if (strcasecmp(data_type[i % c], "float") == 0) { pstr += sprintf(pstr, ", %10.4f", (float)(rand() / 1000)); } else if (strcasecmp(data_type[i % c], "double") == 0) { diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 07c52b912f..8cf015b342 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -890,7 +890,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments) { if (arguments->schemaonly) return 0; - sprintf(command, "select * from %s where _c0 >= %ld and _c0 <= %ld order by _c0 asc", tbname, arguments->start_time, + sprintf(command, "select * from %s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc", tbname, arguments->start_time, arguments->end_time); if (taos_query(taos, command) != 0) { fprintf(stderr, "failed to run command %s, reason: %s\n", command, taos_errstr(taos)); @@ -944,13 +944,13 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments) { pstr += sprintf(pstr, "%d", *((int *)row[col])); break; case TSDB_DATA_TYPE_BIGINT: - pstr += sprintf(pstr, "%ld", *((int64_t *)row[col])); + pstr += sprintf(pstr, "%" PRId64 "", *((int64_t *)row[col])); break; case TSDB_DATA_TYPE_FLOAT: - pstr += sprintf(pstr, "%f", *((float *)row[col])); + pstr += sprintf(pstr, "%f", GET_FLOAT_VAL(row[col])); break; case TSDB_DATA_TYPE_DOUBLE: - pstr += sprintf(pstr, "%f", *((double *)row[col])); + pstr += sprintf(pstr, "%f", GET_DOUBLE_VAL(row[col])); break; case TSDB_DATA_TYPE_BINARY: *(pstr++) = '\''; @@ -963,7 +963,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments) { pstr += sprintf(pstr, "\'%s\'", tbuf); break; case TSDB_DATA_TYPE_TIMESTAMP: - pstr += sprintf(pstr, "%ld", *(int64_t *)row[col]); + pstr += sprintf(pstr, "%" PRId64 "", *(int64_t *)row[col]); break; default: break; @@ -1145,7 +1145,7 @@ int taosDumpIn(struct arguments *arguments) { } taosReplaceCtrlChar(tcommand); if (taos_query(taos, tcommand) != 0) - fprintf(stderr, "linenu: %ld failed to run command %s reason:%s \ncontinue...\n", linenu, command, + fprintf(stderr, "linenu: %" PRId64 " failed to run command %s reason:%s \ncontinue...\n", linenu, command, taos_errstr(taos)); pstr = command; @@ -1193,7 +1193,7 @@ int taosDumpIn(struct arguments *arguments) { } taosReplaceCtrlChar(tcommand); if (taos_query(taos, tcommand) != 0) - fprintf(stderr, "linenu:%ld failed to run command %s reason: %s \ncontinue...\n", linenu, command, + fprintf(stderr, "linenu:%" PRId64 " failed to run command %s reason: %s \ncontinue...\n", linenu, command, taos_errstr(taos)); } @@ -1216,7 +1216,7 @@ int taosDumpIn(struct arguments *arguments) { } taosReplaceCtrlChar(lcommand); if (taos_query(taos, tcommand) != 0) - fprintf(stderr, "linenu:%ld failed to run command %s reason:%s \ncontinue...\n", linenu, command, + fprintf(stderr, "linenu:%" PRId64 " failed to run command %s reason:%s \ncontinue...\n", linenu, command, taos_errstr(taos)); } diff --git a/src/modules/http/src/httpJson.c b/src/modules/http/src/httpJson.c index ca88de59e6..5d5d29f4e0 100644 --- a/src/modules/http/src/httpJson.c +++ b/src/modules/http/src/httpJson.c @@ -119,7 +119,7 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) { return 0; // there is no data to dump. } else { int len = sprintf(sLen, "%lx\r\n", srcLen); - httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%ld, response:\n%s", + httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", response:\n%s", buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, buf->buf); httpWriteBufNoTrace(buf->pContext, sLen, len); remain = httpWriteBufNoTrace(buf->pContext, buf->buf, (int) srcLen); @@ -131,7 +131,7 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) { if (ret == 0) { if (compressBufLen > 0) { int len = sprintf(sLen, "%x\r\n", compressBufLen); - httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%ld, compressSize:%d, last:%d, response:\n%s", + httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", compressSize:%d, last:%d, response:\n%s", buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, compressBufLen, isTheLast, buf->buf); httpWriteBufNoTrace(buf->pContext, sLen, len); remain = httpWriteBufNoTrace(buf->pContext, (const char *) compressBuf, (int) compressBufLen); @@ -257,7 +257,7 @@ void httpJsonStringForTransMean(JsonBuf* buf, char* sVal, int maxLen) { void httpJsonInt64(JsonBuf* buf, int64_t num) { httpJsonItemToken(buf); httpJsonTestBuf(buf, MAX_NUM_STR_SZ); - buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%ld", num); + buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%" PRId64, num); } void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us) { diff --git a/src/modules/http/src/restJson.c b/src/modules/http/src/restJson.c index 6c04d39f45..7e98472d53 100644 --- a/src/modules/http/src/restJson.c +++ b/src/modules/http/src/restJson.c @@ -152,7 +152,7 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, } if (cmd->numOfRows >= tsRestRowLimit) { - httpTrace("context:%p, fd:%d, ip:%s, user:%s, retrieve rows:%lld larger than limit:%d, abort retrieve", pContext, + httpTrace("context:%p, fd:%d, ip:%s, user:%s, retrieve rows:%d larger than limit:%d, abort retrieve", pContext, pContext->fd, pContext->ipstr, pContext->user, cmd->numOfRows, tsRestRowLimit); return false; } @@ -163,7 +163,7 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, return false; } else { - httpTrace("context:%p, fd:%d, ip:%s, user:%s, total rows:%lld retrieved", pContext, pContext->fd, pContext->ipstr, + httpTrace("context:%p, fd:%d, ip:%s, user:%s, total rows:%d retrieved", pContext, pContext->fd, pContext->ipstr, pContext->user, cmd->numOfRows); return true; } diff --git a/src/modules/http/src/tgHandle.c b/src/modules/http/src/tgHandle.c index cec1e40c4c..b9adf54162 100644 --- a/src/modules/http/src/tgHandle.c +++ b/src/modules/http/src/tgHandle.c @@ -572,7 +572,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { orderTagsLen = orderTagsLen < TSDB_MAX_TAGS ? orderTagsLen : TSDB_MAX_TAGS; table_cmd->tagNum = stable_cmd->tagNum = (int8_t)orderTagsLen; - table_cmd->timestamp = stable_cmd->timestamp = httpAddToSqlCmdBuffer(pContext, "%ld", timestamp->valueint); + table_cmd->timestamp = stable_cmd->timestamp = httpAddToSqlCmdBuffer(pContext, "%" PRId64, timestamp->valueint); // stable name char *stname = tgGetStableName(name->valuestring, fields, fieldsSize); @@ -593,7 +593,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { if (tag->type == cJSON_String) stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "'%s'", tag->valuestring); else if (tag->type == cJSON_Number) - stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "%ld", tag->valueint); + stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "%" PRId64, tag->valueint); else if (tag->type == cJSON_True) stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "1"); else if (tag->type == cJSON_False) @@ -614,7 +614,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { if (tag->type == cJSON_String) httpAddToSqlCmdBufferNoTerminal(pContext, "_%s", tag->valuestring); else if (tag->type == cJSON_Number) - httpAddToSqlCmdBufferNoTerminal(pContext, "_%ld", tag->valueint); + httpAddToSqlCmdBufferNoTerminal(pContext, "_%" PRId64, tag->valueint); else if (tag->type == cJSON_False) httpAddToSqlCmdBufferNoTerminal(pContext, "_0"); else if (tag->type == cJSON_True) @@ -670,7 +670,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { cJSON *tag = orderedTags[i]; if (i != orderTagsLen - 1) { if (tag->type == cJSON_Number) - httpAddToSqlCmdBufferNoTerminal(pContext, "%ld,", tag->valueint); + httpAddToSqlCmdBufferNoTerminal(pContext, "%" PRId64 ",", tag->valueint); else if (tag->type == cJSON_String) httpAddToSqlCmdBufferNoTerminal(pContext, "'%s',", tag->valuestring); else if (tag->type == cJSON_False) @@ -682,7 +682,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { } } else { if (tag->type == cJSON_Number) - httpAddToSqlCmdBufferNoTerminal(pContext, "%ld)", tag->valueint); + httpAddToSqlCmdBufferNoTerminal(pContext, "%" PRId64 ")", tag->valueint); else if (tag->type == cJSON_String) httpAddToSqlCmdBufferNoTerminal(pContext, "'%s')", tag->valuestring); else if (tag->type == cJSON_False) @@ -695,7 +695,7 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { } } - httpAddToSqlCmdBufferNoTerminal(pContext, " values(%ld,", timestamp->valueint); + httpAddToSqlCmdBufferNoTerminal(pContext, " values(%" PRId64 ",", timestamp->valueint); for (int i = 0; i < fieldsSize; ++i) { cJSON *field = cJSON_GetArrayItem(fields, i); if (i != fieldsSize - 1) { diff --git a/src/modules/monitor/src/monitorSystem.c b/src/modules/monitor/src/monitorSystem.c index 9d132e51ce..f403a27293 100644 --- a/src/modules/monitor/src/monitorSystem.c +++ b/src/modules/monitor/src/monitorSystem.c @@ -217,9 +217,7 @@ void monitorInitDatabaseCb(void *param, TAOS_RES *result, int code) { if (monitor->cmdIndex == MONITOR_CMD_CREATE_TB_LOG) { taosLogFp = monitorSaveLog; taosLogSqlFp = monitorExecuteSQL; -#ifdef CLUSTER taosLogAcctFp = monitorSaveAcctLog; -#endif monitorLPrint("dnode:%s is started", tsPrivateIp); } monitor->cmdIndex++; diff --git a/src/os/darwin/inc/os.h b/src/os/darwin/inc/os.h index ea7a95c4da..1aececeec9 100644 --- a/src/os/darwin/inc/os.h +++ b/src/os/darwin/inc/os.h @@ -1,57 +1,85 @@ /* -* Copyright (c) 2019 TAOS Data, Inc. -* -* This program is free software: you can use, redistribute, and/or modify -* it under the terms of the GNU Affero General Public License, version 3 -* or later ("AGPL"), as published by the Free Software Foundation. -* -* This program is distributed in the hope that it will be useful, but WITHOUT -* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -* FITNESS FOR A PARTICULAR PURPOSE. -* -* You should have received a copy of the GNU Affero General Public License -* along with this program. If not, see . -*/ + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#ifndef TDENGINE_PLATFORM_LINUX_H +#define TDENGINE_PLATFORM_LINUX_H -#ifndef TDENGINE_PLATFORM_DARWIN_H -#define TDENGINE_PLATFORM_DARWIN_H +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include -#include -#include -#include -#include -#include -#include -#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include +#include #include +#include #include +#include #include +#include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include #include #define htobe64 htonll #define taosCloseSocket(x) \ { \ - if (FD_VALID(x)) { \ + if (FD_VALID(x)) { \ close(x); \ - x = -1; \ + x = FD_INITIALIZER; \ } \ } + #define taosWriteSocket(fd, buf, len) write(fd, buf, len) #define taosReadSocket(fd, buf, len) read(fd, buf, len) @@ -160,7 +188,7 @@ (__a < __b) ? __a : __b; \ }) -#define MILLISECOND_PER_SECOND (1000L) +#define MILLISECOND_PER_SECOND ((int64_t)1000L) #define tsem_t dispatch_semaphore_t @@ -197,6 +225,10 @@ bool taosSkipSocketCheck(); bool taosGetDisk(); +int fsendfile(FILE* out_file, FILE* in_file, int64_t* offset, int32_t count); + +void taosSetCoreDump(); + typedef int(*__compar_fn_t)(const void *, const void *); // for send function in tsocket.c @@ -219,4 +251,8 @@ typedef int(*__compar_fn_t)(const void *, const void *); #define BUILDIN_CLZ(val) __builtin_clz(val) #define BUILDIN_CTZ(val) __builtin_ctz(val) -#endif \ No newline at end of file +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/os/darwin/src/tdarwin.c b/src/os/darwin/src/tdarwin.c index 133bb4893c..af3b1bd8a5 100644 --- a/src/os/darwin/src/tdarwin.c +++ b/src/os/darwin/src/tdarwin.c @@ -33,11 +33,12 @@ #include "tsdb.h" #include "tutil.h" -char configDir[TSDB_FILENAME_LEN] = "~/TDengine/cfg"; -char tsDirectory[TSDB_FILENAME_LEN] = "~/TDengine/data"; -char dataDir[TSDB_FILENAME_LEN] = "~/TDengine/data"; -char logDir[TSDB_FILENAME_LEN] = "~/TDengine/log"; -char scriptDir[TSDB_FILENAME_LEN] = "~/TDengine/script"; +char configDir[TSDB_FILENAME_LEN] = "/etc/taos"; +char tsDirectory[TSDB_FILENAME_LEN] = "/var/lib/taos"; +char dataDir[TSDB_FILENAME_LEN] = "/var/lib/taos"; +char logDir[TSDB_FILENAME_LEN] = "~/TDengineLog"; +char scriptDir[TSDB_FILENAME_LEN] = "/etc/taos"; +char osName[] = "Darwin"; int64_t str2int64(char *str) { char *endptr = NULL; @@ -418,4 +419,43 @@ int32_t __sync_val_load_32(int32_t *ptr) { void __sync_val_restore_32(int32_t *ptr, int32_t newval) { __atomic_store_n(ptr, newval, __ATOMIC_RELEASE); -} \ No newline at end of file +} + +#define _SEND_FILE_STEP_ 1000 + +int fsendfile(FILE* out_file, FILE* in_file, int64_t* offset, int32_t count) { + fseek(in_file, (int32_t)(*offset), 0); + int writeLen = 0; + uint8_t buffer[_SEND_FILE_STEP_] = { 0 }; + + for (int len = 0; len < (count - _SEND_FILE_STEP_); len += _SEND_FILE_STEP_) { + size_t rlen = fread(buffer, 1, _SEND_FILE_STEP_, in_file); + if (rlen <= 0) { + return writeLen; + } + else if (rlen < _SEND_FILE_STEP_) { + fwrite(buffer, 1, rlen, out_file); + return (int)(writeLen + rlen); + } + else { + fwrite(buffer, 1, _SEND_FILE_STEP_, in_file); + writeLen += _SEND_FILE_STEP_; + } + } + + int remain = count - writeLen; + if (remain > 0) { + size_t rlen = fread(buffer, 1, remain, in_file); + if (rlen <= 0) { + return writeLen; + } + else { + fwrite(buffer, 1, remain, out_file); + writeLen += remain; + } + } + + return writeLen; +} + +void taosSetCoreDump() {} \ No newline at end of file diff --git a/src/os/linux/src/tlinux.c b/src/os/linux/src/tlinux.c index ccd6fc8a34..b81b98a5f7 100644 --- a/src/os/linux/src/tlinux.c +++ b/src/os/linux/src/tlinux.c @@ -39,6 +39,7 @@ char tsDirectory[TSDB_FILENAME_LEN] = "/var/lib/taos"; char dataDir[TSDB_FILENAME_LEN] = "/var/lib/taos"; char logDir[TSDB_FILENAME_LEN] = "/var/log/taos"; char scriptDir[TSDB_FILENAME_LEN] = "/etc/taos"; +char osName[] = "Linux"; int64_t str2int64(char *str) { char *endptr = NULL; diff --git a/src/os/linux/src/tsystem.c b/src/os/linux/src/tsystem.c index c3b8b41c9d..8cd0e69436 100644 --- a/src/os/linux/src/tsystem.c +++ b/src/os/linux/src/tsystem.c @@ -517,7 +517,8 @@ bool taosGetProcIO(float *readKB, float *writeKB) { static int64_t lastReadbyte = -1; static int64_t lastWritebyte = -1; - int64_t curReadbyte, curWritebyte; + int64_t curReadbyte = 0; + int64_t curWritebyte = 0; if (!taosReadProcIO(&curReadbyte, &curWritebyte)) { return false; diff --git a/src/os/windows/src/twindows.c b/src/os/windows/src/twindows.c index b29919b969..30973165df 100644 --- a/src/os/windows/src/twindows.c +++ b/src/os/windows/src/twindows.c @@ -37,6 +37,7 @@ char tsDirectory[TSDB_FILENAME_LEN] = "C:/TDengine/data"; char logDir[TSDB_FILENAME_LEN] = "C:/TDengine/log"; char dataDir[TSDB_FILENAME_LEN] = "C:/TDengine/data"; char scriptDir[TSDB_FILENAME_LEN] = "C:/TDengine/script"; +char osName[] = "Windows"; bool taosCheckPthreadValid(pthread_t thread) { return thread.p != NULL; diff --git a/src/rpc/src/trpc.c b/src/rpc/src/trpc.c index 87f7e2e266..9e0b5dab0f 100755 --- a/src/rpc/src/trpc.c +++ b/src/rpc/src/trpc.c @@ -169,7 +169,7 @@ static int32_t taosCompressRpcMsg(char* pCont, int32_t contLen) { memcpy(pCont + overhead, buf, compLen); pHeader->comp = 1; - tTrace("compress rpc msg, before:%lld, after:%lld", contLen, compLen); + tTrace("compress rpc msg, before:%d, after:%d", contLen, compLen); finalLen = compLen + overhead; //tDump(pCont, contLen); diff --git a/src/rpc/src/tudp.c b/src/rpc/src/tudp.c index fb0b37d93b..db3e5e81c4 100644 --- a/src/rpc/src/tudp.c +++ b/src/rpc/src/tudp.c @@ -296,7 +296,7 @@ void *taosTransferDataViaTcp(void *argv) { } if (!taosCheckHandleViaTcpValid(&handleViaTcp)) { - tError("%s UDP server read handle via tcp invalid, handle:%ld, hash:%ld", pSet->label, handleViaTcp.handle, + tError("%s UDP server read handle via tcp invalid, handle:%" PRIu64 ", hash:%" PRIu64, pSet->label, handleViaTcp.handle, handleViaTcp.hash); taosCloseSocket(connFd); free(pTransfer); @@ -698,12 +698,17 @@ int taosSendPacketViaTcp(uint32_t ip, uint16_t port, char *data, int dataLen, vo // send a UDP header first to set up the connection pHead = (STaosHeader *)buffer; memcpy(pHead, data, sizeof(STaosHeader)); + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wbitfield-constant-conversion" pHead->tcp = 2; +#pragma GCC diagnostic pop + msgLen = sizeof(STaosHeader); pHead->msgLen = (int32_t)htonl(msgLen); code = taosSendUdpData(ip, port, buffer, msgLen, chandle); - pHead = (STaosHeader *)data; + //pHead = (STaosHeader *)data; tinet_ntoa(ipstr, ip); int fd = taosOpenTcpClientSocket(ipstr, pConn->port, tsLocalIp); diff --git a/src/sdb/src/sdbEngine.c b/src/sdb/src/sdbEngine.c index 9a5e6413f3..77b6f6d958 100644 --- a/src/sdb/src/sdbEngine.c +++ b/src/sdb/src/sdbEngine.c @@ -24,6 +24,8 @@ extern char version[]; const int16_t sdbFileVersion = 0; int sdbExtConns = 0; +SIpList *pSdbIpList = NULL; +SIpList *pSdbPublicIpList = NULL; #ifdef CLUSTER int sdbMaster = 0; @@ -373,22 +375,22 @@ int64_t sdbInsertRow(void *handle, void *row, int rowSize) { */ pTable->id++; sdbVersion++; - sdbPrint("table:%s, record:%s already exist, think it successed, sdbVersion:%ld id:%d", + sdbPrint("table:%s, record:%s already exist, think it successed, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id); return 0; } else { switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbError("table:%s, failed to insert record:%s sdbVersion:%ld id:%d", pTable->name, (char *)row, sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record:%s sdbVersion:%" PRId64 " id:%" PRId64 , pTable->name, (char *)row, sdbVersion, pTable->id); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbError("table:%s, failed to insert record:%s sdbVersion:%ld id:%d", pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record:%s sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id); break; case SDB_KEYTYPE_AUTO: - sdbError("table:%s, failed to insert record:%d sdbVersion:%ld id:%d", pTable->name, *(int32_t *)row, sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record:%d sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, *(int32_t *)row, sdbVersion, pTable->id); break; default: - sdbError("table:%s, failed to insert record sdbVersion:%ld id:%d", pTable->name, sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, sdbVersion, pTable->id); break; } return -1; @@ -452,19 +454,19 @@ int64_t sdbInsertRow(void *handle, void *row, int rowSize) { pTable->numOfRows++; switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%" PRId64 " id:%" PRId64 " rowSize:%d numOfRows:%d fileSize:%" PRId64, pTable->name, (char *)row, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + sdbTrace("table:%s, a record is inserted:%s, sdbVersion:%" PRId64 " id:%" PRId64 " rowSize:%d numOfRows:%d fileSize:%" PRId64, pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; case SDB_KEYTYPE_AUTO: - sdbTrace("table:%s, a record is inserted:%d, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + sdbTrace("table:%s, a record is inserted:%d, sdbVersion:%" PRId64 " id:%" PRId64 " rowSize:%d numOfRows:%d fileSize:%" PRId64, pTable->name, *(int32_t *)row, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; default: - sdbTrace("table:%s, a record is inserted, sdbVersion:%ld id:%ld rowSize:%d numOfRows:%d fileSize:%ld", + sdbTrace("table:%s, a record is inserted, sdbVersion:%" PRId64 " id:%" PRId64 " rowSize:%d numOfRows:%d fileSize:%" PRId64, pTable->name, sdbVersion, rowHead->id, rowHead->rowSize, pTable->numOfRows, pTable->size); break; } @@ -555,19 +557,19 @@ int sdbDeleteRow(void *handle, void *row) { sdbAddIntoUpdateList(pTable, SDB_TYPE_DELETE, pMetaRow); switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%d", pTable->name, (char *)row, sdbVersion, pTable->id, pTable->numOfRows); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is deleted:%s, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%d", pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id, pTable->numOfRows); break; case SDB_KEYTYPE_AUTO: - sdbTrace("table:%s, a record is deleted:%d, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is deleted:%d, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%d", pTable->name, *(int32_t *)row, sdbVersion, pTable->id, pTable->numOfRows); break; default: - sdbTrace("table:%s, a record is deleted, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is deleted, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%d", pTable->name, sdbVersion, pTable->id, pTable->numOfRows); break; } @@ -602,19 +604,19 @@ int sdbUpdateRow(void *handle, void *row, int updateSize, char isUpdated) { if (pMeta == NULL) { switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, (char *) row, sdbVersion, pTable->id); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, taosIpStr(*(int32_t *) row), sdbVersion, pTable->id); break; case SDB_KEYTYPE_AUTO: - sdbError("table:%s, failed to update record:%d, record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record:%d, record is not there, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, *(int32_t *) row, sdbVersion, pTable->id); break; default: - sdbError("table:%s, failed to update record, record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record, record is not there, sdbVersion:%" PRId64 " id:%" PRId64, pTable->name, sdbVersion, pTable->id); break; } @@ -674,19 +676,19 @@ int sdbUpdateRow(void *handle, void *row, int updateSize, char isUpdated) { switch (pTable->keyType) { case SDB_KEYTYPE_STRING: - sdbTrace("table:%s, a record is updated:%s, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is updated:%s, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%" PRId64, pTable->name, (char *)row, sdbVersion, pTable->id, pTable->numOfRows); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbTrace("table:%s, a record is updated:%s, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is updated:%s, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%" PRId64, pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id, pTable->numOfRows); break; case SDB_KEYTYPE_AUTO: - sdbTrace("table:%s, a record is updated:%d, sdbVersion:%ld id:%ld numOfRows:%d", + sdbTrace("table:%s, a record is updated:%d, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%" PRId64, pTable->name, *(int32_t *)row, sdbVersion, pTable->id, pTable->numOfRows); break; default: - sdbTrace("table:%s, a record is updated, sdbVersion:%ld id:%ld numOfRows:%d", pTable->name, sdbVersion, + sdbTrace("table:%s, a record is updated, sdbVersion:%" PRId64 " id:%" PRId64 " numOfRows:%" PRId64, pTable->name, sdbVersion, pTable->id, pTable->numOfRows); break; } @@ -795,7 +797,7 @@ void sdbCloseTable(void *handle) { pthread_mutex_destroy(&pTable->mutex); sdbNumOfTables--; - sdbTrace("table:%s is closed, id:%ld numOfTables:%d", pTable->name, pTable->id, sdbNumOfTables); + sdbTrace("table:%s is closed, id:%" PRId64 " numOfTables:%d", pTable->name, pTable->id, sdbNumOfTables); tfree(pTable->update); tfree(pTable); @@ -899,7 +901,7 @@ void sdbResetTable(SSdbTable *pTable) { tfree(rowHead); - sdbPrint("table:%s is updated, sdbVerion:%ld id:%ld", pTable->name, sdbVersion, pTable->id); + sdbPrint("table:%s is updated, sdbVerion:%" PRId64 " id:%" PRId64, pTable->name, sdbVersion, pTable->id); } // TODO:A problem here :use snapshot file to sync another node will cause diff --git a/src/system/detail/src/dnodeMgmt.c b/src/system/detail/src/dnodeMgmt.c index 14ce95ce55..5e2b150cbb 100644 --- a/src/system/detail/src/dnodeMgmt.c +++ b/src/system/detail/src/dnodeMgmt.c @@ -153,7 +153,7 @@ int vnodeProcessAlterStreamRequest(char *pMsg, int msgLen, SMgmtObj *pObj) { } if (pAlter->sid >= pVnode->cfg.maxSessions || pAlter->sid < 0) { - dError("vid:%d sid:%d uid:%ld, sid is out of range", pAlter->vnode, pAlter->sid, pAlter->uid); + dError("vid:%d sid:%d uid:%" PRIu64 ", sid is out of range", pAlter->vnode, pAlter->sid, pAlter->uid); code = TSDB_CODE_INVALID_TABLE_ID; goto _over; } diff --git a/src/system/detail/src/mgmtDnodeInt.c b/src/system/detail/src/mgmtDnodeInt.c index c715dd42e2..a1dae7738f 100644 --- a/src/system/detail/src/mgmtDnodeInt.c +++ b/src/system/detail/src/mgmtDnodeInt.c @@ -465,8 +465,11 @@ int mgmtCfgDynamicOptions(SDnodeObj *pDnode, char *msg) { } int mgmtSendCfgDnodeMsg(char *cont) { +#ifdef CLUSTER char * pMsg, *pStart; int msgLen = 0; +#endif + SDnodeObj *pDnode; SCfgMsg * pCfg = (SCfgMsg *)cont; uint32_t ip; diff --git a/src/system/detail/src/mgmtMeter.c b/src/system/detail/src/mgmtMeter.c index d1a43260af..a2a6ed8a7d 100644 --- a/src/system/detail/src/mgmtMeter.c +++ b/src/system/detail/src/mgmtMeter.c @@ -688,7 +688,7 @@ int mgmtCreateMeter(SDbObj *pDb, SCreateTableMsg *pCreate) { pMeter->uid = (((uint64_t)pMeter->gid.vgId) << 40) + ((((uint64_t)pMeter->gid.sid) & ((1ul << 24) - 1ul)) << 16) + ((uint64_t)sdbVersion & ((1ul << 16) - 1ul)); - mTrace("table:%s, create table in vgroup, vgId:%d sid:%d vnode:%d uid:%llu db:%s", + mTrace("table:%s, create table in vgroup, vgId:%d sid:%d vnode:%d uid:%" PRIu64 " db:%s", pMeter->meterId, pVgroup->vgId, sid, pVgroup->vnodeGid[0].vnode, pMeter->uid, pDb->name); } else { pMeter->uid = (((uint64_t)pMeter->createdTime) << 16) + ((uint64_t)sdbVersion & ((1ul << 16) - 1ul)); @@ -1186,6 +1186,8 @@ int mgmtRetrieveMetricMeta(SConnObj *pConn, char **pStart, SMetricMetaMsg *pMetr int32_t * tagLen = calloc(1, sizeof(int32_t) * pMetricMetaMsg->numOfMeters); if (result == NULL || tagLen == NULL) { + tfree(result); + tfree(tagLen); return -1; } diff --git a/src/system/detail/src/mgmtShell.c b/src/system/detail/src/mgmtShell.c index e26fee0f38..060084ffce 100644 --- a/src/system/detail/src/mgmtShell.c +++ b/src/system/detail/src/mgmtShell.c @@ -257,7 +257,7 @@ int mgmtProcessMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { pRsp->code = TSDB_CODE_DB_NOT_SELECTED; pMsg++; } else { - mTrace("%s, uid:%lld meter meta is retrieved", pInfo->meterId, pMeterObj->uid); + mTrace("%s, uid:%" PRIu64 " meter meta is retrieved", pInfo->meterId, pMeterObj->uid); pRsp->code = 0; pMsg += sizeof(STaosRsp); *pMsg = TSDB_IE_TYPE_META; @@ -402,7 +402,7 @@ int mgmtProcessMultiMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { if (pMeterObj == NULL || (pDbObj == NULL)) { continue; } else { - mTrace("%s, uid:%lld sversion:%d meter meta is retrieved", tblName, pMeterObj->uid, pMeterObj->sversion); + mTrace("%s, uid:%" PRIu64 " sversion:%d meter meta is retrieved", tblName, pMeterObj->uid, pMeterObj->sversion); pMeta = (SMultiMeterMeta *)pCurMeter; memcpy(pMeta->meterId, tblName, strlen(tblName)); @@ -446,7 +446,7 @@ int mgmtProcessMultiMeterMetaMsg(char *pMsg, int msgLen, SConnObj *pConn) { if (pVgroup == NULL) { pRsp->code = TSDB_CODE_INVALID_TABLE; pNewMsg++; - mError("%s, uid:%lld sversion:%d vgId:%d pVgroup is NULL", tblName, pMeterObj->uid, pMeterObj->sversion, + mError("%s, uid:%" PRIu64 " sversion:%d vgId:%d pVgroup is NULL", tblName, pMeterObj->uid, pMeterObj->sversion, pMeterObj->gid.vgId); goto _error_exit_code; } @@ -1078,7 +1078,7 @@ int mgmtProcessCreateTableMsg(char *pMsg, int msgLen, SConnObj *pConn) { STabObj* pMeter = mgmtGetMeter(pCreate->meterId); assert(pMeter != NULL); - mWarn("table:%s, table already created, failed to create table, ts:%lld, code:%d", pCreate->meterId, + mWarn("table:%s, table already created, failed to create table, ts:%" PRId64 ", code:%d", pCreate->meterId, pMeter->createdTime, code); } else { // other errors mError("table:%s, failed to create table, code:%d", pCreate->meterId, code); @@ -1202,21 +1202,28 @@ int mgmtProcessHeartBeatMsg(char *cont, int contLen, SConnObj *pConn) { pConn->streamId = 0; pHBRsp->killConnection = pConn->killConnection; -#ifdef CLUSTER if (pConn->usePublicIp) { - int size = pSdbPublicIpList->numOfIps * 4; - pHBRsp->ipList.numOfIps = pSdbPublicIpList->numOfIps; - memcpy(pHBRsp->ipList.ip, pSdbPublicIpList->ip, size); - pMsg += sizeof(SHeartBeatRsp) + size; + if (pSdbPublicIpList != NULL) { + int size = pSdbPublicIpList->numOfIps * 4; + pHBRsp->ipList.numOfIps = pSdbPublicIpList->numOfIps; + memcpy(pHBRsp->ipList.ip, pSdbPublicIpList->ip, size); + pMsg += sizeof(SHeartBeatRsp) + size; + } else { + pHBRsp->ipList.numOfIps = 0; + pMsg += sizeof(SHeartBeatRsp); + } + } else { - int size = pSdbIpList->numOfIps * 4; - pHBRsp->ipList.numOfIps = pSdbIpList->numOfIps; - memcpy(pHBRsp->ipList.ip, pSdbIpList->ip, size); - pMsg += sizeof(SHeartBeatRsp) + size; + if (pSdbIpList != NULL) { + int size = pSdbIpList->numOfIps * 4; + pHBRsp->ipList.numOfIps = pSdbIpList->numOfIps; + memcpy(pHBRsp->ipList.ip, pSdbIpList->ip, size); + pMsg += sizeof(SHeartBeatRsp) + size; + } else { + pHBRsp->ipList.numOfIps = 0; + pMsg += sizeof(SHeartBeatRsp); + } } -#else - pMsg += sizeof(SHeartBeatRsp); -#endif msgLen = pMsg - pStart; taosSendMsgToPeer(pConn->thandle, pStart, msgLen); @@ -1334,15 +1341,22 @@ _rsp: pConnectRsp->superAuth = pConn->superAuth; pMsg += sizeof(SConnectRsp); -#ifdef CLUSTER - int size = pSdbPublicIpList->numOfIps * 4 + sizeof(SIpList); - if (pConn->usePublicIp) { - memcpy(pMsg, pSdbPublicIpList, size); + int size; + if (pSdbPublicIpList != NULL && pSdbIpList != NULL) { + size = pSdbPublicIpList->numOfIps * 4 + sizeof(SIpList); + if (pConn->usePublicIp) { + memcpy(pMsg, pSdbPublicIpList, size); + } else { + memcpy(pMsg, pSdbIpList, size); + } } else { - memcpy(pMsg, pSdbIpList, size); + SIpList tmpIpList; + tmpIpList.numOfIps = 0; + size = tmpIpList.numOfIps * 4 + sizeof(SIpList); + memcpy(pMsg, &tmpIpList, size); } + pMsg += size; -#endif // set the time resolution: millisecond or microsecond *((uint32_t *)pMsg) = tsTimePrecision; diff --git a/src/system/detail/src/vnodeCache.c b/src/system/detail/src/vnodeCache.c index 36bf872109..bc3a733706 100644 --- a/src/system/detail/src/vnodeCache.c +++ b/src/system/detail/src/vnodeCache.c @@ -653,8 +653,8 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) { for (int32_t j = startPos; j < pCacheBlock->numOfPoints; ++j) { TSKEY key = vnodeGetTSInCacheBlock(pCacheBlock, j); if (key < startkey || key > endkey) { - dError("vid:%d sid:%d id:%s, timestamp in cache slot is disordered. slot:%d, pos:%d, ts:%lld, block " - "range:%lld-%lld", pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startkey, endkey); + dError("vid:%d sid:%d id:%s, timestamp in cache slot is disordered. slot:%d, pos:%d, ts:%" PRId64 ", block " + "range:%" PRId64 "-%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startkey, endkey); tfree(ids); return -TSDB_CODE_FILE_BLOCK_TS_DISORDERED; } @@ -678,8 +678,8 @@ int vnodeQueryFromCache(SMeterObj *pObj, SQuery *pQuery) { for (int32_t j = startPos; j >= 0; --j) { TSKEY key = vnodeGetTSInCacheBlock(pCacheBlock, j); if (key < startkey || key > endkey) { - dError("vid:%d sid:%d id:%s, timestamp in cache slot is disordered. slot:%d, pos:%d, ts:%lld, block " - "range:%lld-%lld", pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startkey, endkey); + dError("vid:%d sid:%d id:%s, timestamp in cache slot is disordered. slot:%d, pos:%d, ts:%" PRId64 ", block " + "range:%" PRId64 "-%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startkey, endkey); tfree(ids); return -TSDB_CODE_FILE_BLOCK_TS_DISORDERED; } @@ -962,10 +962,11 @@ void vnodeSetCommitQuery(SMeterObj *pObj, SQuery *pQuery) { if (firstKey < pQuery->skey) { pQuery->over = 1; - dTrace("vid:%d sid:%d id:%s, first key is small, keyFirst:%ld commitFirstKey:%ld", + dTrace("vid:%d sid:%d id:%s, first key is small, keyFirst:%" PRId64 " commitFirstKey:%" PRId64 "", pObj->vnode, pObj->sid, pObj->meterId, firstKey, pQuery->skey); pthread_mutex_lock(&(pVnode->vmutex)); if (firstKey < pVnode->firstKey) pVnode->firstKey = firstKey; + assert(pVnode->firstKey > 0); pthread_mutex_unlock(&(pVnode->vmutex)); } } @@ -1013,7 +1014,7 @@ int vnodeSyncRetrieveCache(int vnode, int fd) { if (taosWriteMsg(fd, &(pObj->lastKeyOnFile), sizeof(pObj->lastKeyOnFile)) <= 0) return -1; if (taosWriteMsg(fd, &(pInfo->commitPoint), sizeof(pInfo->commitPoint)) <= 0) return -1; - dTrace("vid:%d sid:%d id:%s, send lastKey:%lld lastKeyOnFile:%lld", vnode, sid, pObj->meterId, pObj->lastKey, + dTrace("vid:%d sid:%d id:%s, send lastKey:%" PRId64 " lastKeyOnFile:%" PRId64, vnode, sid, pObj->meterId, pObj->lastKey, pObj->lastKeyOnFile); slot = pInfo->commitSlot; @@ -1033,7 +1034,7 @@ int vnodeSyncRetrieveCache(int vnode, int fd) { if (taosWriteMsg(fd, pBlock->offset[col], pObj->schema[col].bytes * points) <= 0) return -1; TSKEY lastKey = *((TSKEY *)(pBlock->offset[0] + pObj->schema[0].bytes * (points - 1))); - dTrace("vid:%d sid:%d id:%s, cache block is sent, points:%d lastKey:%ld", vnode, sid, pObj->meterId, points, + dTrace("vid:%d sid:%d id:%s, cache block is sent, points:%d lastKey:%" PRId64, vnode, sid, pObj->meterId, points, lastKey); blocksSent++; @@ -1097,7 +1098,7 @@ int vnodeSyncRestoreCache(int vnode, int fd) { if (taosReadMsg(fd, &(pObj->lastKeyOnFile), sizeof(pObj->lastKeyOnFile)) <= 0) return -1; if (taosReadMsg(fd, &(pInfo->commitPoint), sizeof(pInfo->commitPoint)) <= 0) return -1; - dTrace("vid:%d sid:%d id:%s, commitPoint:%d lastKeyOnFile:%ld", vnode, sid, pObj->meterId, pInfo->commitPoint, + dTrace("vid:%d sid:%d id:%s, commitPoint:%d lastKeyOnFile:%" PRId64, vnode, sid, pObj->meterId, pInfo->commitPoint, pObj->lastKeyOnFile); if (vnodeList[pObj->vnode].lastKey < pObj->lastKey) vnodeList[pObj->vnode].lastKey = pObj->lastKey; @@ -1135,7 +1136,7 @@ int vnodeSyncRestoreCache(int vnode, int fd) { if (vnodeList[pObj->vnode].firstKey > *(TSKEY *)(pBlock->offset[0])) vnodeList[pObj->vnode].firstKey = *(TSKEY *)(pBlock->offset[0]); - dTrace("vid:%d sid:%d id:%s, cache block is received, points:%d lastKey:%ld", vnode, sid, pObj->meterId, points, + dTrace("vid:%d sid:%d id:%s, cache block is received, points:%d lastKey:%" PRId64, vnode, sid, pObj->meterId, points, pObj->lastKey); } } diff --git a/src/system/detail/src/vnodeCommit.c b/src/system/detail/src/vnodeCommit.c index b5c9f80745..a650376afa 100644 --- a/src/system/detail/src/vnodeCommit.c +++ b/src/system/detail/src/vnodeCommit.c @@ -51,7 +51,7 @@ int vnodeOpenCommitLog(int vnode, uint64_t firstV) { int64_t length = statbuf.st_size; if (length != pVnode->mappingSize) { - dError("vid:%d, logfd:%d, alloc file size:%ld not equal to mapping size:%ld", vnode, pVnode->logFd, length, + dError("vid:%d, logfd:%d, alloc file size:%" PRId64 " not equal to mapping size:%" PRId64, vnode, pVnode->logFd, length, pVnode->mappingSize); goto _err_log_open; } diff --git a/src/system/detail/src/vnodeFile.c b/src/system/detail/src/vnodeFile.c index b8d3088fe1..1760c85322 100644 --- a/src/system/detail/src/vnodeFile.c +++ b/src/system/detail/src/vnodeFile.c @@ -197,7 +197,7 @@ int vnodeCreateNeccessaryFiles(SVnodeObj *pVnode) { numOfFiles = (pVnode->lastKeyOnFile - pVnode->commitFirstKey) / tsMsPerDay[(uint8_t)pVnode->cfg.precision] / pCfg->daysPerFile; if (pVnode->commitFirstKey > pVnode->lastKeyOnFile) numOfFiles = -1; - dTrace("vid:%d, commitFirstKey:%ld lastKeyOnFile:%ld numOfFiles:%d fileId:%d vnodeNumOfFiles:%d", pVnode->vnode, + dTrace("vid:%d, commitFirstKey:%" PRId64 " lastKeyOnFile:%" PRId64 " numOfFiles:%d fileId:%d vnodeNumOfFiles:%d", pVnode->vnode, pVnode->commitFirstKey, pVnode->lastKeyOnFile, numOfFiles, pVnode->fileId, pVnode->numOfFiles); if (numOfFiles >= pVnode->numOfFiles) { @@ -251,7 +251,7 @@ int vnodeOpenCommitFiles(SVnodeObj *pVnode, int noTempLast) { fileId = pVnode->commitFileId; - dTrace("vid:%d, commit fileId:%d, commitLastKey:%ld, vnodeLastKey:%ld, lastKeyOnFile:%ld numOfFiles:%d", + dTrace("vid:%d, commit fileId:%d, commitLastKey:%" PRId64 ", vnodeLastKey:%" PRId64 ", lastKeyOnFile:%" PRId64 " numOfFiles:%d", vnode, fileId, pVnode->commitLastKey, pVnode->lastKey, pVnode->lastKeyOnFile, pVnode->numOfFiles); int minSize = sizeof(SCompHeader) * pVnode->cfg.maxSessions + sizeof(TSCKSUM) + TSDB_FILE_HEADER_LEN; @@ -506,7 +506,7 @@ void *vnodeCommitMultiToFile(SVnodeObj *pVnode, int ssid, int esid) { SVnodeHeadInfo headInfo; uint8_t * pOldCompBlocks; - dPrint("vid:%d, committing to file, firstKey:%ld lastKey:%ld ssid:%d esid:%d", vnode, pVnode->firstKey, + dPrint("vid:%d, committing to file, firstKey:%" PRId64 " lastKey:%" PRId64 " ssid:%d esid:%d", vnode, pVnode->firstKey, pVnode->lastKey, ssid, esid); if (pVnode->lastKey == 0) goto _over; @@ -573,7 +573,7 @@ _again: memset(&query, 0, sizeof(query)); if (vnodeOpenCommitFiles(pVnode, ssid) < 0) goto _over; - dTrace("vid:%d, start to commit, commitFirstKey:%ld commitLastKey:%ld", vnode, pVnode->commitFirstKey, + dTrace("vid:%d, start to commit, commitFirstKey:%" PRId64 " commitLastKey:%" PRId64, vnode, pVnode->commitFirstKey, pVnode->commitLastKey); headLen = 0; @@ -642,7 +642,7 @@ _again: read(pVnode->hfd, &pMeter->lastBlock, sizeof(SCompBlock)); } } else { - dTrace("vid:%d sid:%d id:%s, uid:%ld is not matched w/ old:%ld, old data will be thrown away", + dTrace("vid:%d sid:%d id:%s, uid:%" PRIu64 " is not matched with old:%" PRIu64 ", old data will be thrown away", vnode, sid, pObj->meterId, pObj->uid, compInfo.uid); pMeter->oldNumOfBlocks = 0; } @@ -683,7 +683,7 @@ _again: query.sdata = data; vnodeSetCommitQuery(pObj, &query); - dTrace("vid:%d sid:%d id:%s, start to commit, startKey:%lld slot:%d pos:%d", pObj->vnode, pObj->sid, pObj->meterId, + dTrace("vid:%d sid:%d id:%s, start to commit, startKey:%" PRId64 " slot:%d pos:%d", pObj->vnode, pObj->sid, pObj->meterId, pObj->lastKeyOnFile, query.slot, query.pos); pointsRead = 0; @@ -760,7 +760,7 @@ _again: pMeter->newNumOfBlocks++; pMeter->committedPoints += (pointsRead - pointsReadLast); - dTrace("vid:%d sid:%d id:%s, pointsRead:%d, pointsReadLast:%d lastKey:%lld, " + dTrace("vid:%d sid:%d id:%s, pointsRead:%d, pointsReadLast:%d lastKey:%" PRId64 ", " "slot:%d pos:%d newNumOfBlocks:%d headLen:%d", pObj->vnode, pObj->sid, pObj->meterId, pointsRead, pointsReadLast, pObj->lastKeyOnFile, query.slot, query.pos, pMeter->newNumOfBlocks, headLen); @@ -771,7 +771,7 @@ _again: pointsReadLast = 0; } - dTrace("vid:%d sid:%d id:%s, %d points are committed, lastKey:%lld slot:%d pos:%d newNumOfBlocks:%d", + dTrace("vid:%d sid:%d id:%s, %d points are committed, lastKey:%" PRId64 " slot:%d pos:%d newNumOfBlocks:%d", pObj->vnode, pObj->sid, pObj->meterId, pMeter->committedPoints, pObj->lastKeyOnFile, query.slot, query.pos, pMeter->newNumOfBlocks); @@ -1093,7 +1093,7 @@ int vnodeReadColumnToMem(int fd, SCompBlock *pBlock, SField **fields, int col, c } if (len <= 0) { - dError("failed to read col:%d, offset:%ld, reason:%s", col, tfields[col].offset, strerror(errno)); + dError("failed to read col:%d, offset:%d, reason:%s", col, (int32_t)(tfields[col].offset), strerror(errno)); return -1; } @@ -1218,7 +1218,7 @@ int vnodeWriteBlockToFile(SMeterObj *pObj, SCompBlock *pCompBlock, SData *data[] int dfd = pVnode->dfd; if (pCompBlock->last && (points < pObj->pointsPerFileBlock * tsFileBlockMinPercent)) { - dTrace("vid:%d sid:%d id:%s, points:%d are written to last block, block stime: %ld, block etime: %ld", + dTrace("vid:%d sid:%d id:%s, points:%d are written to last block, block stime: %" PRId64 ", block etime: %" PRId64, pObj->vnode, pObj->sid, pObj->meterId, points, *((TSKEY *)(data[0]->data)), *((TSKEY * )(data[0]->data + (points - 1) * pObj->schema[0].bytes))); pCompBlock->last = 1; @@ -1303,7 +1303,7 @@ int vnodeWriteBlockToFile(SMeterObj *pObj, SCompBlock *pCompBlock, SData *data[] pCompBlock->len += wlen; } - dTrace("vid:%d, vnode compStorage size is: %ld", pObj->vnode, pVnode->vnodeStatistic.compStorage); + dTrace("vid:%d, vnode compStorage size is: %" PRId64, pObj->vnode, pVnode->vnodeStatistic.compStorage); pCompBlock->algorithm = pCfg->compression; pCompBlock->numOfPoints = points; @@ -1355,7 +1355,7 @@ int vnodeSearchPointInFile(SMeterObj *pObj, SQuery *pQuery) { if (pQuery->skey < oldest) pQuery->skey = oldest; } - dTrace("vid:%d sid:%d id:%s, skey:%ld ekey:%ld oldest:%ld latest:%ld fileId:%d numOfFiles:%d", + dTrace("vid:%d sid:%d id:%s, skey:%" PRId64 " ekey:%" PRId64 " oldest:%" PRId64 " latest:%" PRId64 " fileId:%d numOfFiles:%d", pObj->vnode, pObj->sid, pObj->meterId, pQuery->skey, pQuery->ekey, oldest, latest, pVnode->fileId, pVnode->numOfFiles); @@ -1383,7 +1383,7 @@ int vnodeSearchPointInFile(SMeterObj *pObj, SQuery *pQuery) { firstSlot = 0; lastSlot = pQuery->numOfBlocks - 1; - numOfBlocks = pQuery->numOfBlocks; + //numOfBlocks = pQuery->numOfBlocks; if (QUERY_IS_ASC_QUERY(pQuery) && pBlock[lastSlot].keyLast < pQuery->skey) continue; if (!QUERY_IS_ASC_QUERY(pQuery) && pBlock[firstSlot].keyFirst > pQuery->skey) continue; @@ -1659,8 +1659,8 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) { for (int32_t j = startPos; j < pBlock->numOfPoints; j -= step) { TSKEY key = vnodeGetTSInDataBlock(pQuery, j, startPositionFactor); if (key < startKey || key > endKey) { - dError("vid:%d sid:%d id:%s, timestamp in file block disordered. slot:%d, pos:%d, ts:%lld, block " - "range:%lld-%lld", pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startKey, endKey); + dError("vid:%d sid:%d id:%s, timestamp in file block disordered. slot:%d, pos:%d, ts:%" PRId64 ", block " + "range:%" PRId64 "-%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startKey, endKey); tfree(ids); return -TSDB_CODE_FILE_BLOCK_TS_DISORDERED; } @@ -1684,8 +1684,8 @@ int vnodeQueryFromFile(SMeterObj *pObj, SQuery *pQuery) { for (int32_t j = pQuery->pos; j >= 0; --j) { TSKEY key = vnodeGetTSInDataBlock(pQuery, j, startPositionFactor); if (key < startKey || key > endKey) { - dError("vid:%d sid:%d id:%s, timestamp in file block disordered. slot:%d, pos:%d, ts:%lld, block " - "range:%lld-%lld", pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startKey, endKey); + dError("vid:%d sid:%d id:%s, timestamp in file block disordered. slot:%d, pos:%d, ts:%" PRId64 ", block " + "range:%" PRId64 "-%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pQuery->slot, j, key, startKey, endKey); tfree(ids); return -TSDB_CODE_FILE_BLOCK_TS_DISORDERED; } diff --git a/src/system/detail/src/vnodeImport.c b/src/system/detail/src/vnodeImport.c index 1e6e5e6dc9..7ebab90f0b 100644 --- a/src/system/detail/src/vnodeImport.c +++ b/src/system/detail/src/vnodeImport.c @@ -119,7 +119,7 @@ int vnodeFindKeyInCache(SImportInfo *pImport, int order) { if (pInfo->commitPoint >= pObj->pointsPerBlock) pImport->slot = (pImport->slot + 1) % pInfo->maxBlocks; pImport->pos = 0; pImport->key = 0; - dTrace("vid:%d sid:%d id:%s, key:%ld, import to head of cache", pObj->vnode, pObj->sid, pObj->meterId, key); + dTrace("vid:%d sid:%d id:%s, key:%" PRId64 ", import to head of cache", pObj->vnode, pObj->sid, pObj->meterId, key); code = 0; } else { pImport->slot = query.slot; @@ -184,8 +184,8 @@ int vnodeImportPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi vnodeGetValidDataRange(pObj->vnode, now, &minKey, &maxKey); if (firstKey < minKey || firstKey > maxKey || lastKey < minKey || lastKey > maxKey) { dError( - "vid:%d sid:%d id:%s, invalid timestamp to import, rows:%d firstKey: %ld lastKey: %ld minAllowedKey:%ld " - "maxAllowedKey:%ld", + "vid:%d sid:%d id:%s, invalid timestamp to import, rows:%d firstKey: %" PRId64 " lastKey: %" PRId64 " minAllowedKey:%" PRId64 " " + "maxAllowedKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, rows, firstKey, lastKey, minKey, maxKey); return TSDB_CODE_TIMESTAMP_OUT_OF_RANGE; } @@ -221,7 +221,7 @@ int vnodeImportPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi SImportInfo import = {0}; - dTrace("vid:%d sid:%d id:%s, try to import %d rows data, firstKey:%ld, lastKey:%ld, object lastKey:%ld", + dTrace("vid:%d sid:%d id:%s, try to import %d rows data, firstKey:%" PRId64 ", lastKey:%" PRId64 ", object lastKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, rows, firstKey, lastKey, pObj->lastKey); import.firstKey = firstKey; @@ -491,7 +491,7 @@ static int vnodeLoadNeededBlockData(SMeterObj *pObj, SImportHandle *pHandle, int lseek(dfd, pBlock->offset, SEEK_SET); if (read(dfd, (void *)(pHandle->pField), pHandle->pFieldSize) < 0) { - dError("vid:%d sid:%d meterId:%s, failed to read data file, size:%ld reason:%s", pVnode->vnode, pObj->sid, + dError("vid:%d sid:%d meterId:%s, failed to read data file, size:%zu reason:%s", pVnode->vnode, pObj->sid, pObj->meterId, pHandle->pFieldSize, strerror(errno)); *code = TSDB_CODE_FILE_CORRUPTED; return -1; @@ -610,7 +610,7 @@ static int vnodeCloseImportFiles(SMeterObj *pObj, SImportHandle *pHandle) { lseek(pVnode->nfd, 0, SEEK_END); lseek(pVnode->hfd, pHandle->nextNo0Offset, SEEK_SET); if (tsendfile(pVnode->nfd, pVnode->hfd, NULL, pHandle->hfSize - pHandle->nextNo0Offset) < 0) { - dError("vid:%d sid:%d meterId:%s, failed to sendfile, size:%ld, reason:%s", pObj->vnode, pObj->sid, + dError("vid:%d sid:%d meterId:%s, failed to sendfile, size:%" PRId64 ", reason:%s", pObj->vnode, pObj->sid, pObj->meterId, pHandle->hfSize - pHandle->nextNo0Offset, strerror(errno)); return -1; } @@ -627,7 +627,7 @@ static int vnodeCloseImportFiles(SMeterObj *pObj, SImportHandle *pHandle) { taosCalcChecksumAppend(0, (uint8_t *)(pHandle->pHeader), pHandle->pHeaderSize); lseek(pVnode->nfd, TSDB_FILE_HEADER_LEN, SEEK_SET); if (twrite(pVnode->nfd, (void *)(pHandle->pHeader), pHandle->pHeaderSize) < 0) { - dError("vid:%d sid:%d meterId:%s, failed to wirte SCompHeader part, size:%ld, reason:%s", pObj->vnode, pObj->sid, + dError("vid:%d sid:%d meterId:%s, failed to wirte SCompHeader part, size:%zu, reason:%s", pObj->vnode, pObj->sid, pObj->meterId, pHandle->pHeaderSize, strerror(errno)); return -1; } @@ -1528,7 +1528,7 @@ int vnodeImportDataToFiles(SImportInfo *pImport, char *payload, const int rows) assert(nrows > 0); - dTrace("vid:%d sid:%d meterId:%s, %d rows of data will be imported to file %d, srow:%d firstKey:%ld lastKey:%ld", + dTrace("vid:%d sid:%d meterId:%s, %d rows of data will be imported to file %d, srow:%d firstKey:%" PRId64 " lastKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, nrows, fid, srow, KEY_AT_INDEX(payload, pObj->bytesPerPoint, srow), KEY_AT_INDEX(payload, pObj->bytesPerPoint, (srow + nrows - 1))); diff --git a/src/system/detail/src/vnodeMeter.c b/src/system/detail/src/vnodeMeter.c index 4dc6d78707..7c1cea1a37 100644 --- a/src/system/detail/src/vnodeMeter.c +++ b/src/system/detail/src/vnodeMeter.c @@ -353,7 +353,7 @@ int vnodeRestoreMeterObj(char *buffer, int64_t length) { // taosSetSecurityInfo(pObj->vnode, pObj->sid, pObj->meterId, pObj->spi, pObj->encrypt, pObj->secret, pObj->cipheringKey); - dTrace("vid:%d sid:%d id:%s, meter is restored, uid:%ld", pObj->vnode, pObj->sid, pObj->meterId, pObj->uid); + dTrace("vid:%d sid:%d id:%s, meter is restored, uid:%" PRIu64 "", pObj->vnode, pObj->sid, pObj->meterId, pObj->uid); return TSDB_CODE_SUCCESS; } @@ -493,7 +493,7 @@ int vnodeCreateMeterObj(SMeterObj *pNew, SConnSec *pSec) { vnodeSaveMeterObjToFile(pNew); // vnodeCreateMeterMgmt(pNew, pSec); vnodeCreateStream(pNew); - dTrace("vid:%d, sid:%d id:%s, meterObj is created, uid:%ld", pNew->vnode, pNew->sid, pNew->meterId, pNew->uid); + dTrace("vid:%d, sid:%d id:%s, meterObj is created, uid:%" PRIu64 "", pNew->vnode, pNew->sid, pNew->meterId, pNew->uid); } return code; @@ -616,7 +616,7 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi TSKEY minAllowedKey = (cfid - pVnode->maxFiles + 1)*pVnode->cfg.daysPerFile*tsMsPerDay[(uint8_t)pVnode->cfg.precision]; TSKEY maxAllowedKey = (cfid + 2)*pVnode->cfg.daysPerFile*tsMsPerDay[(uint8_t)pVnode->cfg.precision] - 2; if (firstKey < minAllowedKey || firstKey > maxAllowedKey || lastKey < minAllowedKey || lastKey > maxAllowedKey) { - dError("vid:%d sid:%d id:%s, vnode lastKeyOnFile:%lld, data is out of range, numOfPoints:%d firstKey:%lld lastKey:%lld minAllowedKey:%lld maxAllowedKey:%lld", + dError("vid:%d sid:%d id:%s, vnode lastKeyOnFile:%" PRId64 ", data is out of range, numOfPoints:%d firstKey:%" PRId64 " lastKey:%" PRId64 " minAllowedKey:%" PRId64 " maxAllowedKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, pVnode->lastKeyOnFile, numOfPoints,firstKey, lastKey, minAllowedKey, maxAllowedKey); return TSDB_CODE_TIMESTAMP_OUT_OF_RANGE; } @@ -635,7 +635,7 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi } if (*((TSKEY *)pData) <= pObj->lastKey) { - dWarn("vid:%d sid:%d id:%s, received key:%ld not larger than lastKey:%ld", pObj->vnode, pObj->sid, pObj->meterId, + dWarn("vid:%d sid:%d id:%s, received key:%" PRId64 " not larger than lastKey:%" PRId64, pObj->vnode, pObj->sid, pObj->meterId, *((TSKEY *)pData), pObj->lastKey); pData += pObj->bytesPerPoint; continue; @@ -664,6 +664,7 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi if (pObj->lastKey > pVnode->lastKey) pVnode->lastKey = pObj->lastKey; if (firstKey < pVnode->firstKey) pVnode->firstKey = firstKey; + assert(pVnode->firstKey > 0); pVnode->version++; @@ -672,7 +673,7 @@ int vnodeInsertPoints(SMeterObj *pObj, char *cont, int contLen, char source, voi vnodeClearMeterState(pObj, TSDB_METER_STATE_INSERTING); _over: - dTrace("vid:%d sid:%d id:%s, %d out of %d points are inserted, lastKey:%ld source:%d, vnode total storage: %ld", + dTrace("vid:%d sid:%d id:%s, %d out of %d points are inserted, lastKey:%" PRId64 " source:%d, vnode total storage: %" PRId64 "", pObj->vnode, pObj->sid, pObj->meterId, points, numOfPoints, pObj->lastKey, source, pVnode->vnodeStatistic.totalStorage); diff --git a/src/system/detail/src/vnodeQueryImpl.c b/src/system/detail/src/vnodeQueryImpl.c index c1653b5dad..dd618c0321 100644 --- a/src/system/detail/src/vnodeQueryImpl.c +++ b/src/system/detail/src/vnodeQueryImpl.c @@ -108,7 +108,7 @@ static FORCE_INLINE int32_t getCompHeaderStartPosition(SVnodeCfg *pCfg) { static FORCE_INLINE int32_t validateCompBlockOffset(SQInfo *pQInfo, SMeterObj *pMeterObj, SCompHeader *pCompHeader, SQueryFilesInfo *pQueryFileInfo, int32_t headerSize) { if (pCompHeader->compInfoOffset < headerSize || pCompHeader->compInfoOffset > pQueryFileInfo->headerFileSize) { - dError("QInfo:%p vid:%d sid:%d id:%s, compInfoOffset:%d is not valid, size:%ld", pQInfo, pMeterObj->vnode, + dError("QInfo:%p vid:%d sid:%d id:%s, compInfoOffset:%" PRId64 " is not valid, size:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pCompHeader->compInfoOffset, pQueryFileInfo->headerFileSize); return -1; @@ -121,7 +121,7 @@ static FORCE_INLINE int32_t validateCompBlockOffset(SQInfo *pQInfo, SMeterObj *p static FORCE_INLINE int32_t validateCompBlockInfoSegment(SQInfo *pQInfo, const char *filePath, int32_t vid, SCompInfo *compInfo, int64_t offset) { if (!taosCheckChecksumWhole((uint8_t *)compInfo, sizeof(SCompInfo))) { - dLError("QInfo:%p vid:%d, failed to read header file:%s, file compInfo broken, offset:%lld", pQInfo, vid, filePath, + dLError("QInfo:%p vid:%d, failed to read header file:%s, file compInfo broken, offset:%" PRId64, pQInfo, vid, filePath, offset); return -1; } @@ -133,7 +133,7 @@ static FORCE_INLINE int32_t validateCompBlockSegment(SQInfo *pQInfo, const char uint32_t size = compInfo->numOfBlocks * sizeof(SCompBlock); if (checksum != taosCalcChecksum(0, (uint8_t *)pBlock, size)) { - dLError("QInfo:%p vid:%d, failed to read header file:%s, file compblock is broken:%ld", pQInfo, vid, filePath, + dLError("QInfo:%p vid:%d, failed to read header file:%s, file compblock is broken:%zu", pQInfo, vid, filePath, (char *)compInfo + sizeof(SCompInfo)); return -1; } @@ -752,7 +752,7 @@ static int32_t loadColumnIntoMem(SQuery *pQuery, SQueryFilesInfo *pQueryFileInfo // check column data integrity if (checksum != taosCalcChecksum(0, (const uint8_t *)dst, pFields[col].len)) { - dLError("QInfo:%p, column data checksum error, file:%s, col: %d, offset:%ld", GET_QINFO_ADDR(pQuery), + dLError("QInfo:%p, column data checksum error, file:%s, col: %d, offset:%" PRId64, GET_QINFO_ADDR(pQuery), pQueryFileInfo->dataFilePath, col, offset); return -1; @@ -794,7 +794,7 @@ static int32_t loadDataBlockFieldsInfo(SQueryRuntimeEnv *pRuntimeEnv, SCompBlock // check fields integrity if (!taosCheckChecksumWhole((uint8_t *)(*pField), size)) { - dLError("QInfo:%p vid:%d sid:%d id:%s, slot:%d, failed to read sfields, file:%s, sfields area broken:%lld", pQInfo, + dLError("QInfo:%p vid:%d sid:%d id:%s, slot:%d, failed to read sfields, file:%s, sfields area broken:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pVnodeFilesInfo->dataFilePath, pBlock->offset); return -1; @@ -850,7 +850,7 @@ static int32_t loadDataBlockIntoMem(SCompBlock *pBlock, SField **pField, SQueryR if (status == DISK_BLOCK_NO_NEED_TO_LOAD) { dTrace( "QInfo:%p vid:%d sid:%d id:%s, fileId:%d, data block has been loaded, no need to load again, ts:%d, slot:%d, " - "brange:%lld-%lld, rows:%d", + "brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->fileId, loadPrimaryCol, pQuery->slot, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfPoints); @@ -1564,7 +1564,7 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) { TSKEY key = *(TSKEY *)(pCtx[0].aInputElemBuf + TSDB_KEYSIZE * offset); #if defined(_DEBUG_VIEW) - printf("elem in comp ts file:%lld, key:%lld, tag:%d, id:%s, query order:%d, ts order:%d, traverse:%d, index:%d\n", + printf("elem in comp ts file:%" PRId64 ", key:%" PRId64 ", tag:%d, id:%s, query order:%d, ts order:%d, traverse:%d, index:%d\n", elem.ts, key, elem.tag, pRuntimeEnv->pMeterObj->meterId, pQuery->order.order, pRuntimeEnv->pTSBuf->tsOrder, pRuntimeEnv->pTSBuf->cur.order, pRuntimeEnv->pTSBuf->cur.tsIndex); #endif @@ -2829,7 +2829,7 @@ static bool getNeighborPoints(SMeterQuerySupportObj *pSupporter, SMeterObj *pMet * no previous data exists reset the status and load the data block that contains the qualified point */ if (Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK)) { - dTrace("QInfo:%p no previous data block, start fileId:%d, slot:%d, pos:%d, qrange:%lld-%lld, out of range", + dTrace("QInfo:%p no previous data block, start fileId:%d, slot:%d, pos:%d, qrange:%" PRId64 "-%" PRId64 ", out of range", GET_QINFO_ADDR(pQuery), pRuntimeEnv->startPos.fileId, pRuntimeEnv->startPos.slot, pRuntimeEnv->startPos.pos, pQuery->skey, pQuery->ekey); @@ -3255,8 +3255,8 @@ static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB static void changeExecuteScanOrder(SQuery *pQuery, bool metricQuery) { // in case of point-interpolation query, use asc order scan char msg[] = - "QInfo:%p scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%lld-%lld, " - "new qrange:%lld-%lld"; + "QInfo:%p scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%" PRId64 "-%" PRId64 ", " + "new qrange:%" PRId64 "-%" PRId64; // descending order query if (isFirstLastRowQuery(pQuery)) { @@ -3354,7 +3354,7 @@ static int32_t doSkipDataBlock(SQueryRuntimeEnv *pRuntimeEnv) { pQuery->lastKey = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.keyLast : blockInfo.keyFirst; pQuery->lastKey += step; - qTrace("QInfo:%p skip rows:%d, offset:%lld", GET_QINFO_ADDR(pQuery), maxReads, pQuery->limit.offset); + qTrace("QInfo:%p skip rows:%d, offset:%" PRId64 "", GET_QINFO_ADDR(pQuery), maxReads, pQuery->limit.offset); } } @@ -3720,7 +3720,7 @@ int32_t vnodeQuerySingleMeterPrepare(SQInfo *pQInfo, SMeterObj *pMeterObj, SMete if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->skey > pQuery->ekey)) || (!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->ekey > pQuery->skey))) { - dTrace("QInfo:%p no result in time range %lld-%lld, order %d", pQInfo, pQuery->skey, pQuery->ekey, + dTrace("QInfo:%p no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo, pQuery->skey, pQuery->ekey, pQuery->order.order); sem_post(&pQInfo->dataReady); @@ -3868,7 +3868,7 @@ void vnodeQueryFreeQInfoEx(SQInfo *pQInfo) { if (FD_VALID(pSupporter->meterOutputFd)) { assert(pSupporter->meterOutputMMapBuf != NULL); - dTrace("QInfo:%p disk-based output buffer during query:%lld bytes", pQInfo, pSupporter->bufSize); + dTrace("QInfo:%p disk-based output buffer during query:%" PRId64 " bytes", pQInfo, pSupporter->bufSize); munmap(pSupporter->meterOutputMMapBuf, pSupporter->bufSize); tclose(pSupporter->meterOutputFd); @@ -3895,7 +3895,7 @@ int32_t vnodeMultiMeterQueryPrepare(SQInfo *pQInfo, SQuery *pQuery, void *param) if ((QUERY_IS_ASC_QUERY(pQuery) && (pQuery->skey > pQuery->ekey)) || (!QUERY_IS_ASC_QUERY(pQuery) && (pQuery->ekey > pQuery->skey))) { - dTrace("QInfo:%p no result in time range %lld-%lld, order %d", pQInfo, pQuery->skey, pQuery->ekey, + dTrace("QInfo:%p no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo, pQuery->skey, pQuery->ekey, pQuery->order.order); sem_post(&pQInfo->dataReady); @@ -4131,7 +4131,7 @@ void getQueryPositionForCacheInvalid(SQueryRuntimeEnv *pRuntimeEnv, __block_sear dTrace( "QInfo:%p vid:%d sid:%d id:%s cache block re-allocated to other meter, " - "try get query start position in file/cache, qrange:%lld-%lld, lastKey:%lld", + "try get query start position in file/cache, qrange:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); if (step == QUERY_DESC_FORWARD_STEP) { @@ -4396,7 +4396,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { SPositionInfo *pStartPos = &pRuntimeEnv->startPos; assert(pQuery->slot == pStartPos->slot); - dTrace("QInfo:%p query start, qrange:%lld-%lld, lastkey:%lld, order:%d, start fileId:%d, slot:%d, pos:%d, bstatus:%d", + dTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", lastkey:%" PRId64 ", order:%d, start fileId:%d, slot:%d, pos:%d, bstatus:%d", GET_QINFO_ADDR(pQuery), pQuery->skey, pQuery->ekey, pQuery->lastKey, pQuery->order.order, pStartPos->fileId, pStartPos->slot, pStartPos->pos, pRuntimeEnv->blockStatus); @@ -4416,7 +4416,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { doHandleCacheBlockImpl(pRuntimeEnv, &blockInfo, searchFn, &numOfRes, &forwardStep); } - dTrace("QInfo:%p check data block, brange:%lld-%lld, fileId:%d, slot:%d, pos:%d, bstatus:%d, rows:%d, checked:%d", + dTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", fileId:%d, slot:%d, pos:%d, bstatus:%d, rows:%d, checked:%d", GET_QINFO_ADDR(pQuery), blockInfo.keyFirst, blockInfo.keyLast, pQuery->fileId, pQuery->slot, pQuery->pos, pRuntimeEnv->blockStatus, blockInfo.size, forwardStep); @@ -4606,27 +4606,27 @@ static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) { switch (srcDataType) { case TSDB_DATA_TYPE_BINARY: - printf("%ld,%s\t", *(TSKEY *)data, (data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%s\t", *(TSKEY *)data, (data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_BOOL: - printf("%ld,%d\t", *(TSKEY *)data, *(int8_t *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%d\t", *(TSKEY *)data, *(int8_t *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_SMALLINT: - printf("%ld,%d\t", *(TSKEY *)data, *(int16_t *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%d\t", *(TSKEY *)data, *(int16_t *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - printf("%ld,%ld\t", *(TSKEY *)data, *(TSKEY *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%" PRId64 "\t", *(TSKEY *)data, *(TSKEY *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_INT: - printf("%ld,%d\t", *(TSKEY *)data, *(int32_t *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%d\t", *(TSKEY *)data, *(int32_t *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_FLOAT: - printf("%ld,%f\t", *(TSKEY *)data, *(float *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%f\t", *(TSKEY *)data, *(float *)(data + TSDB_KEYSIZE + 1)); break; case TSDB_DATA_TYPE_DOUBLE: - printf("%ld,%lf\t", *(TSKEY *)data, *(double *)(data + TSDB_KEYSIZE + 1)); + printf("%" PRId64 ",%lf\t", *(TSKEY *)data, *(double *)(data + TSDB_KEYSIZE + 1)); break; } } else if (functionId == TSDB_FUNC_AVG) { @@ -4635,7 +4635,7 @@ static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) printf("%lf,%lf\t", *(double *)data, *(double *)(data + sizeof(double))); } else if (functionId == TSDB_FUNC_TWA) { data += 1; - printf("%lf,%ld,%ld,%ld\t", *(double *)data, *(int64_t *)(data + 8), *(int64_t *)(data + 16), + printf("%lf,%" PRId64 ",%" PRId64 ",%" PRId64 "\t", *(double *)data, *(int64_t *)(data + 8), *(int64_t *)(data + 16), *(int64_t *)(data + 24)); } else if (functionId == TSDB_FUNC_MIN || functionId == TSDB_FUNC_MAX) { switch (srcDataType) { @@ -4648,7 +4648,7 @@ static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - printf("%ld\t", *(int64_t *)data); + printf("%" PRId64 "\t", *(int64_t *)data); break; case TSDB_DATA_TYPE_INT: printf("%d\t", *(int *)data); @@ -4664,7 +4664,7 @@ static void printBinaryData(int32_t functionId, char *data, int32_t srcDataType) if (srcDataType == TSDB_DATA_TYPE_FLOAT || srcDataType == TSDB_DATA_TYPE_DOUBLE) { printf("%lf\t", *(float *)data); } else { - printf("%ld\t", *(int64_t *)data); + printf("%" PRId64 "\t", *(int64_t *)data); } } else { printf("%s\t", data); @@ -4696,7 +4696,7 @@ void UNUSED_FUNC displayInterResult(SData **pdata, SQuery *pQuery, int32_t numOf } case TSDB_DATA_TYPE_TIMESTAMP: case TSDB_DATA_TYPE_BIGINT: - printf("%ld\t", *(int64_t *)(pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j)); + printf("%" PRId64 "\t", *(int64_t *)(pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j)); break; case TSDB_DATA_TYPE_INT: printf("%d\t", *(int32_t *)(pdata[i]->data + pQuery->pSelectExpr[i].resBytes * j)); @@ -4962,7 +4962,7 @@ int32_t doMergeMetersResultsToGroupRes(SMeterQuerySupportObj *pSupporter, SQuery displayInterResult(pQuery->sdata, pQuery, pQuery->sdata[0]->len); #endif - dTrace("QInfo:%p result merge completed, elapsed time:%lld ms", GET_QINFO_ADDR(pQuery), endt - startt); + dTrace("QInfo:%p result merge completed, elapsed time:%" PRId64 " ms", GET_QINFO_ADDR(pQuery), endt - startt); tfree(pTree); tfree(pValidMeter); tfree(posArray); @@ -5941,11 +5941,11 @@ static bool setCurrentQueryRange(SMeterDataInfo *pMeterDataInfo, SQuery *pQuery, } if (*minval > *maxval) { - qTrace("QInfo:%p vid:%d sid:%d id:%s, no result in files, qrange:%lld-%lld, lastKey:%lld", pQInfo, pMeterObj->vnode, + qTrace("QInfo:%p vid:%d sid:%d id:%s, no result in files, qrange:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pMeterQInfo->skey, pMeterQInfo->ekey, pMeterQInfo->lastKey); return false; } else { - qTrace("QInfo:%p vid:%d sid:%d id:%s, query in files, qrange:%lld-%lld, lastKey:%lld", pQInfo, pMeterObj->vnode, + qTrace("QInfo:%p vid:%d sid:%d id:%s, query in files, qrange:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pMeterQInfo->skey, pMeterQInfo->ekey, pMeterQInfo->lastKey); return true; } @@ -6331,7 +6331,7 @@ void setCtxOutputPointerForSupplementScan(SMeterQuerySupportObj *pSupporter, SMe // the first column is always the timestamp for interval query TSKEY ts = *(TSKEY *)pRuntimeEnv->pCtx[0].aOutputBuf; SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; - qTrace("QInfo:%p vid:%d sid:%d id:%s, set output result pointer, ts:%lld, index:%d", GET_QINFO_ADDR(pQuery), + qTrace("QInfo:%p vid:%d sid:%d id:%s, set output result pointer, ts:%" PRId64 ", index:%d", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, ts, pMeterQueryInfo->reverseIndex); } @@ -6730,7 +6730,7 @@ int32_t LoadDatablockOnDemand(SCompBlock *pBlock, SField **pFields, uint8_t *blk } if (req == BLK_DATA_NO_NEEDED) { - qTrace("QInfo:%p vid:%d sid:%d id:%s, slot:%d, data block ignored, brange:%lld-%lld, rows:%d", + qTrace("QInfo:%p vid:%d sid:%d id:%s, slot:%d, data block ignored, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->slot, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfPoints); @@ -6761,7 +6761,7 @@ int32_t LoadDatablockOnDemand(SCompBlock *pBlock, SField **pFields, uint8_t *blk dTrace("QInfo:%p fileId:%d, slot:%d, block discarded by per-filter, ", GET_QINFO_ADDR(pQuery), pQuery->fileId, pQuery->slot); #endif - qTrace("QInfo:%p id:%s slot:%d, data block ignored by pre-filter, fields loaded, brange:%lld-%lld, rows:%d", + qTrace("QInfo:%p id:%s slot:%d, data block ignored by pre-filter, fields loaded, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_QINFO_ADDR(pQuery), pMeterObj->meterId, pQuery->slot, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfPoints); return DISK_DATA_DISCARDED; @@ -6874,7 +6874,7 @@ int32_t saveResult(SMeterQuerySupportObj *pSupporter, SMeterQueryInfo *pMeterQue TSKEY ts = *(TSKEY *)getOutputResPos(pRuntimeEnv, pData, pData->numOfElems, 0); SMeterObj *pMeterObj = pRuntimeEnv->pMeterObj; - qTrace("QInfo:%p vid:%d sid:%d id:%s, save results, ts:%lld, total:%d", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, + qTrace("QInfo:%p vid:%d sid:%d id:%s, save results, ts:%" PRId64 ", total:%d", GET_QINFO_ADDR(pQuery), pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, ts, pMeterQueryInfo->numOfRes + 1); pData->numOfElems += numOfResult; @@ -7190,7 +7190,7 @@ int32_t vnodeCopyQueryResultToMsg(void *handle, char *data, int32_t numOfRows) { // make sure file exist if (FD_VALID(fd)) { size_t s = lseek(fd, 0, SEEK_END); - dTrace("QInfo:%p ts comp data return, file:%s, size:%lld", pQInfo, pQuery->sdata[0]->data, s); + dTrace("QInfo:%p ts comp data return, file:%s, size:%zu", pQInfo, pQuery->sdata[0]->data, s); lseek(fd, 0, SEEK_SET); read(fd, data, s); diff --git a/src/system/detail/src/vnodeQueryProcess.c b/src/system/detail/src/vnodeQueryProcess.c index 8ad2c3f309..f4197ab224 100644 --- a/src/system/detail/src/vnodeQueryProcess.c +++ b/src/system/detail/src/vnodeQueryProcess.c @@ -147,8 +147,8 @@ static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) { if ((pQuery->lastKey > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || (pQuery->lastKey < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { dTrace( - "QInfo:%p vid:%d sid:%d id:%s, query completed, no need to scan data in cache. qrange:%lld-%lld, " - "lastKey:%lld", + "QInfo:%p vid:%d sid:%d id:%s, query completed, no need to scan data in cache. qrange:%" PRId64 "-%" PRId64 ", " + "lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); @@ -164,7 +164,7 @@ static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) { } } - qTrace("QInfo:%p vid:%d sid:%d id:%s, query in cache, qrange:%lld-%lld, lastKey:%lld", pQInfo, pMeterObj->vnode, + qTrace("QInfo:%p vid:%d sid:%d id:%s, query in cache, qrange:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); /* @@ -176,7 +176,7 @@ static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) { */ TSKEY nextKey = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, true); if (nextKey < 0) { - qTrace("QInfo:%p vid:%d sid:%d id:%s, no data qualified in cache, cache blocks:%d, lastKey:%lld", pQInfo, + qTrace("QInfo:%p vid:%d sid:%d id:%s, no data qualified in cache, cache blocks:%d, lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->numOfBlocks, pQuery->lastKey); continue; } @@ -228,7 +228,7 @@ static void queryOnMultiDataCache(SQInfo *pQInfo, SMeterDataInfo *pMeterInfo) { SET_CACHE_BLOCK_FLAG(pRuntimeEnv->blockStatus); SBlockInfo binfo = getBlockBasicInfo(pBlock, BLK_CACHE_BLOCK); - dTrace("QInfo:%p check data block, brange:%lld-%lld, fileId:%d, slot:%d, pos:%d, bstatus:%d", + dTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", fileId:%d, slot:%d, pos:%d, bstatus:%d", GET_QINFO_ADDR(pQuery), binfo.keyFirst, binfo.keyLast, pQuery->fileId, pQuery->slot, pQuery->pos, pRuntimeEnv->blockStatus); @@ -376,7 +376,7 @@ static void queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMeterDataInfo stimeUnit = taosGetTimestampMs(); } else if ((j % TRACE_OUTPUT_BLOCK_CNT) == 0) { etimeUnit = taosGetTimestampMs(); - dTrace("QInfo:%p load and check %ld blocks, and continue. elapsed:%ldms", pQInfo, TRACE_OUTPUT_BLOCK_CNT, + dTrace("QInfo:%p load and check %" PRId64 " blocks, and continue. elapsed:%" PRId64 " ms", pQInfo, TRACE_OUTPUT_BLOCK_CNT, etimeUnit - stimeUnit); stimeUnit = taosGetTimestampMs(); } @@ -395,8 +395,8 @@ static void queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMeterDataInfo if ((pQuery->lastKey > pQuery->ekey && QUERY_IS_ASC_QUERY(pQuery)) || (pQuery->lastKey < pQuery->ekey && !QUERY_IS_ASC_QUERY(pQuery))) { qTrace( - "QInfo:%p vid:%d sid:%d id:%s, query completed, no need to scan this data block. qrange:%lld-%lld, " - "lastKey:%lld", + "QInfo:%p vid:%d sid:%d id:%s, query completed, no need to scan this data block. qrange:%" PRId64 "-%" PRId64 ", " + "lastKey:%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery->lastKey); @@ -479,7 +479,7 @@ static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool * vnodeSetTagValueInParam(pSupporter->pSidSet, pRuntimeEnv, pMeterSidExtInfo[index]); - dTrace("QInfo:%p query on (%d): vid:%d sid:%d meterId:%s, qrange:%lld-%lld", pQInfo, index - start, pMeterObj->vnode, + dTrace("QInfo:%p query on (%d): vid:%d sid:%d meterId:%s, qrange:%" PRId64 "-%" PRId64, pQInfo, index - start, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey); pQInfo->pObj = pMeterObj; @@ -490,7 +490,7 @@ static bool multimeterMultioutputHelper(SQInfo *pQInfo, bool *dataInDisk, bool * // data in file or cache is not qualified for the query. abort if (!(dataInCache || dataInDisk)) { - dTrace("QInfo:%p vid:%d sid:%d meterId:%s, qrange:%lld-%lld, nores, %p", pQInfo, pMeterObj->vnode, pMeterObj->sid, + dTrace("QInfo:%p vid:%d sid:%d meterId:%s, qrange:%" PRId64 "-%" PRId64 ", nores, %p", pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->skey, pQuery->ekey, pQuery); return false; } @@ -685,6 +685,11 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { return; } + TSKEY skey = pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[k]->key; + if (skey > 0) { + pQuery->skey = skey; + } + bool dataInDisk = true; bool dataInCache = true; if (!multimeterMultioutputHelper(pQInfo, &dataInDisk, &dataInCache, k, 0)) { @@ -748,6 +753,8 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { pQuery->ekey = pSupporter->rawEKey; pSupporter->meterIdx++; + pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[k]->key = pQuery->lastKey; + // if the buffer is full or group by each table, we need to jump out of the loop if (Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL) || isGroupbyEachTable(pQuery->pGroupbyExpr, pSupporter->pSidSet)) { @@ -763,6 +770,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { assert(!Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)); continue; } else { + pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[k]->key = pQuery->lastKey; // buffer is full, wait for the next round to retrieve data from current meter assert(Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL)); break; @@ -799,7 +807,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) { dTrace( "QInfo %p vid:%d, numOfMeters:%d, index:%d, numOfGroups:%d, %d points returned, totalRead:%d totalReturn:%d," - "next skey:%lld, offset:%lld", + "next skey:%" PRId64 ", offset:%" PRId64, pQInfo, pOneMeter->vnode, pSids->numOfSids, pSupporter->meterIdx, pSids->numOfSubSet, pQuery->pointsRead, pQInfo->pointsRead, pQInfo->pointsReturned, pQuery->skey, pQuery->limit.offset); } @@ -909,7 +917,7 @@ static void vnodeMultiMeterQueryProcessor(SQInfo *pQInfo) { return; } - dTrace("QInfo:%p query start, qrange:%lld-%lld, order:%d, group:%d", pQInfo, pSupporter->rawSKey, pSupporter->rawEKey, + dTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", order:%d, group:%d", pQInfo, pSupporter->rawSKey, pSupporter->rawEKey, pQuery->order.order, pSupporter->pSidSet->numOfSubSet); dTrace("QInfo:%p main query scan start", pQInfo); @@ -1029,7 +1037,7 @@ static void vnodeSingleMeterMultiOutputProcessor(SQInfo *pQInfo) { TSKEY nextTimestamp = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->nextPos); assert(nextTimestamp > 0 || ((nextTimestamp < 0) && Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK))); - dTrace("QInfo:%p vid:%d sid:%d id:%s, skip current result, offset:%lld, next qrange:%lld-%lld", pQInfo, + dTrace("QInfo:%p vid:%d sid:%d id:%s, skip current result, offset:%" PRId64 ", next qrange:%" PRId64 "-%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->limit.offset, pQuery->lastKey, pQuery->ekey); resetCtxOutputBuf(pRuntimeEnv); @@ -1044,7 +1052,7 @@ static void vnodeSingleMeterMultiOutputProcessor(SQInfo *pQInfo) { TSKEY nextTimestamp = loadRequiredBlockIntoMem(pRuntimeEnv, &pRuntimeEnv->nextPos); assert(nextTimestamp > 0 || ((nextTimestamp < 0) && Q_STATUS_EQUAL(pQuery->over, QUERY_NO_DATA_TO_CHECK))); - dTrace("QInfo:%p vid:%d sid:%d id:%s, query abort due to buffer limitation, next qrange:%lld-%lld", pQInfo, + dTrace("QInfo:%p vid:%d sid:%d id:%s, query abort due to buffer limitation, next qrange:%" PRId64 "-%" PRId64, pQInfo, pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->lastKey, pQuery->ekey); } diff --git a/src/system/detail/src/vnodeRead.c b/src/system/detail/src/vnodeRead.c index bbd3e9465c..5f77e2781b 100644 --- a/src/system/detail/src/vnodeRead.c +++ b/src/system/detail/src/vnodeRead.c @@ -581,13 +581,13 @@ void vnodeQueryData(SSchedMsg *pMsg) { pQuery->slot = -1; // reset the handle pQuery->over = 0; - dTrace("vid:%d sid:%d id:%s, query in other media, order:%d, skey:%lld query:%p", pObj->vnode, pObj->sid, + dTrace("vid:%d sid:%d id:%s, query in other media, order:%d, skey:%" PRId64 " query:%p", pObj->vnode, pObj->sid, pObj->meterId, pQuery->order.order, pQuery->skey, pQuery); } pQInfo->pointsRead += pQuery->pointsRead; - dTrace("vid:%d sid:%d id:%s, %d points returned, totalRead:%d totalReturn:%d last key:%lld, query:%p", pObj->vnode, + dTrace("vid:%d sid:%d id:%s, %d points returned, totalRead:%d totalReturn:%d last key:%" PRId64 ", query:%p", pObj->vnode, pObj->sid, pObj->meterId, pQuery->pointsRead, pQInfo->pointsRead, pQInfo->pointsReturned, pQuery->lastKey, pQuery); @@ -630,7 +630,13 @@ void *vnodeQueryOnSingleTable(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE pQuery = &(pQInfo->query); dTrace("qmsg:%p create QInfo:%p, QInfo created", pQueryMsg, pQInfo); - pQuery->skey = pQueryMsg->skey; + SMeterSidExtInfo** pSids = (SMeterSidExtInfo**)pQueryMsg->pSidExtInfo; + if (pSids != NULL && pSids[0]->key > 0) { + pQuery->skey = pSids[0]->key; + } else { + pQuery->skey = pQueryMsg->skey; + } + pQuery->ekey = pQueryMsg->ekey; pQuery->lastKey = pQuery->skey; @@ -753,7 +759,6 @@ void *vnodeQueryOnMultiMeters(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE taosAddIntHash(pSupporter->pMeterObj, pMetersObj[i]->sid, (char *)&pMetersObj[i]); } - pSupporter->pMeterSidExtInfo = (SMeterSidExtInfo **)pQueryMsg->pSidExtInfo; int32_t sidElemLen = pQueryMsg->tagLength + sizeof(SMeterSidExtInfo); int32_t size = POINTER_BYTES * pQueryMsg->numOfSids + sidElemLen * pQueryMsg->numOfSids; @@ -767,12 +772,16 @@ void *vnodeQueryOnMultiMeters(SMeterObj **pMetersObj, SSqlGroupbyExpr *pGroupbyE char *px = ((char *)pSupporter->pMeterSidExtInfo) + POINTER_BYTES * pQueryMsg->numOfSids; for (int32_t i = 0; i < pQueryMsg->numOfSids; ++i) { - pSupporter->pMeterSidExtInfo[i] = (SMeterSidExtInfo *)px; - pSupporter->pMeterSidExtInfo[i]->sid = ((SMeterSidExtInfo **)pQueryMsg->pSidExtInfo)[i]->sid; + SMeterSidExtInfo* pSrc = ((SMeterSidExtInfo **)pQueryMsg->pSidExtInfo)[i]; + SMeterSidExtInfo* pDst = (SMeterSidExtInfo *)px; + + pSupporter->pMeterSidExtInfo[i] = pDst; + pDst->sid = pSrc->sid; + pDst->uid = pSrc->uid; + pDst->key = pSrc->key; if (pQueryMsg->tagLength > 0) { - memcpy(pSupporter->pMeterSidExtInfo[i]->tags, ((SMeterSidExtInfo **)pQueryMsg->pSidExtInfo)[i]->tags, - pQueryMsg->tagLength); + memcpy(pDst->tags, pSrc->tags, pQueryMsg->tagLength); } px += sidElemLen; } @@ -923,27 +932,27 @@ int vnodeSaveQueryResult(void *handle, char *data, int32_t *size) { static int32_t validateQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { if (pQueryMsg->nAggTimeInterval < 0) { - dError("qmsg:%p illegal value of aggTimeInterval %ld", pQueryMsg, pQueryMsg->nAggTimeInterval); + dError("qmsg:%p illegal value of aggTimeInterval %" PRId64 "", pQueryMsg, pQueryMsg->nAggTimeInterval); return -1; } if (pQueryMsg->numOfTagsCols < 0 || pQueryMsg->numOfTagsCols > TSDB_MAX_TAGS + 1) { - dError("qmsg:%p illegal value of numOfTagsCols %ld", pQueryMsg, pQueryMsg->numOfTagsCols); + dError("qmsg:%p illegal value of numOfTagsCols %d", pQueryMsg, pQueryMsg->numOfTagsCols); return -1; } if (pQueryMsg->numOfCols <= 0 || pQueryMsg->numOfCols > TSDB_MAX_COLUMNS) { - dError("qmsg:%p illegal value of numOfCols %ld", pQueryMsg, pQueryMsg->numOfCols); + dError("qmsg:%p illegal value of numOfCols %d", pQueryMsg, pQueryMsg->numOfCols); return -1; } if (pQueryMsg->numOfSids <= 0) { - dError("qmsg:%p illegal value of numOfSids %ld", pQueryMsg, pQueryMsg->numOfSids); + dError("qmsg:%p illegal value of numOfSids %d", pQueryMsg, pQueryMsg->numOfSids); return -1; } if (pQueryMsg->numOfGroupCols < 0) { - dError("qmsg:%p illegal value of numOfGroupbyCols %ld", pQueryMsg, pQueryMsg->numOfGroupCols); + dError("qmsg:%p illegal value of numOfGroupbyCols %d", pQueryMsg, pQueryMsg->numOfGroupCols); return -1; } @@ -1102,11 +1111,13 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { pSids[0] = (SMeterSidExtInfo *)pMsg; pSids[0]->sid = htonl(pSids[0]->sid); pSids[0]->uid = htobe64(pSids[0]->uid); + pSids[0]->key = htobe64(pSids[0]->key); for (int32_t j = 1; j < pQueryMsg->numOfSids; ++j) { pSids[j] = (SMeterSidExtInfo *)((char *)pSids[j - 1] + sizeof(SMeterSidExtInfo) + pQueryMsg->tagLength); pSids[j]->sid = htonl(pSids[j]->sid); pSids[j]->uid = htobe64(pSids[j]->uid); + pSids[j]->key = htobe64(pSids[j]->key); } pMsg = (char *)pSids[pQueryMsg->numOfSids - 1]; @@ -1141,9 +1152,9 @@ int32_t vnodeConvertQueryMeterMsg(SQueryMeterMsg *pQueryMsg) { } } - dTrace("qmsg:%p query on %d meter(s), qrange:%lld-%lld, numOfGroupbyTagCols:%d, numOfTagCols:%d, timestamp order:%d, " - "tags order:%d, tags order col:%d, numOfOutputCols:%d, numOfCols:%d, interval:%lld, fillType:%d, comptslen:%d, limit:%lld, " - "offset:%lld", + dTrace("qmsg:%p query on %d meter(s), qrange:%" PRId64 "-%" PRId64 ", numOfGroupbyTagCols:%d, numOfTagCols:%d, timestamp order:%d, " + "tags order:%d, tags order col:%d, numOfOutputCols:%d, numOfCols:%d, interval:%" PRId64 ", fillType:%d, comptslen:%d, limit:%" PRId64 ", " + "offset:%" PRId64, pQueryMsg, pQueryMsg->numOfSids, pQueryMsg->skey, pQueryMsg->ekey, pQueryMsg->numOfGroupCols, pQueryMsg->numOfTagsCols, pQueryMsg->order, pQueryMsg->orderType, pQueryMsg->orderByIdx, pQueryMsg->numOfOutputCols, pQueryMsg->numOfCols, pQueryMsg->nAggTimeInterval, pQueryMsg->interpoType, diff --git a/src/system/detail/src/vnodeShell.c b/src/system/detail/src/vnodeShell.c index 4732867d7e..63fe24b06a 100644 --- a/src/system/detail/src/vnodeShell.c +++ b/src/system/detail/src/vnodeShell.c @@ -417,6 +417,7 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { int code = 0; pRetrieve = (SRetrieveMeterMsg *)pMsg; + SQInfo* pQInfo = (SQInfo*)pRetrieve->qhandle; pRetrieve->free = htons(pRetrieve->free); if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE) { @@ -443,7 +444,15 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { size = vnodeGetResultSize((void *)(pRetrieve->qhandle), &numOfRows); } - pStart = taosBuildRspMsgWithSize(pObj->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, size + 100); + // buffer size for progress information, including meter count, + // and for each meter, including 'uid' and 'TSKEY'. + int progressSize = 0; + if (pQInfo->pMeterQuerySupporter != NULL) + progressSize = pQInfo->pMeterQuerySupporter->numOfMeters * (sizeof(int64_t) + sizeof(TSKEY)) + sizeof(int32_t); + else if (pQInfo->pObj != NULL) + progressSize = sizeof(int64_t) + sizeof(TSKEY) + sizeof(int32_t); + + pStart = taosBuildRspMsgWithSize(pObj->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, progressSize + size + 100); if (pStart == NULL) { taosSendSimpleRsp(pObj->thandle, TSDB_MSG_TYPE_RETRIEVE_RSP, TSDB_CODE_SERV_OUT_OF_MEMORY); goto _exit; @@ -473,11 +482,36 @@ void vnodeExecuteRetrieveReq(SSchedMsg *pSched) { } pMsg += size; + + // write the progress information of each meter to response + // this is required by subscriptions + if (pQInfo->pMeterQuerySupporter != NULL && pQInfo->pMeterQuerySupporter->pMeterSidExtInfo != NULL) { + *((int32_t*)pMsg) = htonl(pQInfo->pMeterQuerySupporter->numOfMeters); + pMsg += sizeof(int32_t); + for (int32_t i = 0; i < pQInfo->pMeterQuerySupporter->numOfMeters; i++) { + *((int64_t*)pMsg) = htobe64(pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[i]->uid); + pMsg += sizeof(int64_t); + *((TSKEY*)pMsg) = htobe64(pQInfo->pMeterQuerySupporter->pMeterSidExtInfo[i]->key); + pMsg += sizeof(TSKEY); + } + } else if (pQInfo->pObj != NULL) { + *((int32_t*)pMsg) = htonl(1); + pMsg += sizeof(int32_t); + *((int64_t*)pMsg) = htobe64(pQInfo->pObj->uid); + pMsg += sizeof(int64_t); + if (pQInfo->pointsRead > 0) { + *((TSKEY*)pMsg) = htobe64(pQInfo->query.lastKey + 1); + } else { + *((TSKEY*)pMsg) = htobe64(pQInfo->query.lastKey); + } + pMsg += sizeof(TSKEY); + } + msgLen = pMsg - pStart; assert(code != TSDB_CODE_ACTION_IN_PROGRESS); - if (numOfRows == 0 && (pRetrieve->qhandle == (uint64_t)pObj->qhandle) && (code != TSDB_CODE_ACTION_IN_PROGRESS)) { + if (numOfRows == 0 && (pRetrieve->qhandle == (uint64_t)pObj->qhandle) && (code != TSDB_CODE_ACTION_IN_PROGRESS) && pRetrieve->qhandle != NULL) { dTrace("QInfo:%p %s free qhandle code:%d", pObj->qhandle, __FUNCTION__, code); vnodeDecRefCount(pObj->qhandle); pObj->qhandle = NULL; @@ -519,7 +553,7 @@ static int vnodeCheckSubmitBlockContext(SShellSubmitBlock *pBlocks, SVnodeObj *p } if (pMeterObj->uid != uid) { - dError("vid:%d sid:%d id:%s, uid:%lld, uid in msg:%lld, uid mismatch", pVnode->vnode, sid, pMeterObj->meterId, + dError("vid:%d sid:%d id:%s, uid:%" PRIu64 ", uid in msg:%" PRIu64 ", uid mismatch", pVnode->vnode, sid, pMeterObj->meterId, pMeterObj->uid, uid); return TSDB_CODE_INVALID_SUBMIT_MSG; } diff --git a/src/system/detail/src/vnodeStore.c b/src/system/detail/src/vnodeStore.c index f184381d25..5949b1636d 100644 --- a/src/system/detail/src/vnodeStore.c +++ b/src/system/detail/src/vnodeStore.c @@ -56,7 +56,7 @@ static int vnodeInitStoreVnode(int vnode) { } pthread_mutex_init(&(pVnode->vmutex), NULL); - dPrint("vid:%d, storage initialized, version:%ld fileId:%d numOfFiles:%d", vnode, pVnode->version, pVnode->fileId, + dPrint("vid:%d, storage initialized, version:%" PRIu64 " fileId:%d numOfFiles:%d", vnode, pVnode->version, pVnode->fileId, pVnode->numOfFiles); return TSDB_CODE_SUCCESS; diff --git a/src/system/detail/src/vnodeTagMgmt.c b/src/system/detail/src/vnodeTagMgmt.c index adf4e544bb..cea4f75f83 100644 --- a/src/system/detail/src/vnodeTagMgmt.c +++ b/src/system/detail/src/vnodeTagMgmt.c @@ -323,7 +323,7 @@ void tTagsPrints(SMeterSidExtInfo *pMeterInfo, tTagSchema *pSchema, tOrderIdx *p printf("%f, ", GET_TAG_VAL(pMeterInfo, colIndex, pSchema, float)); break; case TSDB_DATA_TYPE_BIGINT: - printf("%ld, ", GET_TAG_VAL(pMeterInfo, colIndex, pSchema, int64_t)); + printf("%" PRId64 ", ", GET_TAG_VAL(pMeterInfo, colIndex, pSchema, int64_t)); break; case TSDB_DATA_TYPE_SMALLINT: printf("%d, ", GET_TAG_VAL(pMeterInfo, colIndex, pSchema, int16_t)); diff --git a/src/system/detail/src/vnodeUtil.c b/src/system/detail/src/vnodeUtil.c index 943bec4250..b16c5f60df 100644 --- a/src/system/detail/src/vnodeUtil.c +++ b/src/system/detail/src/vnodeUtil.c @@ -556,7 +556,7 @@ int32_t vnodeIncQueryRefCount(SQueryMeterMsg* pQueryMsg, SMeterSidExtInfo** pSid continue; } else if (pMeter->uid != pSids[i]->uid || pMeter->sid != pSids[i]->sid) { code = TSDB_CODE_TABLE_ID_MISMATCH; - dError("qmsg:%p, vid:%d sid:%d id:%s uid:%lld, id mismatch. sid:%d uid:%lld in msg", pQueryMsg, + dError("qmsg:%p, vid:%d sid:%d id:%s uid:%" PRIu64 ", id mismatch. sid:%d uid:%" PRId64 " in msg", pQueryMsg, pQueryMsg->vnode, pMeter->sid, pMeter->meterId, pMeter->uid, pSids[i]->sid, pSids[i]->uid); vnodeSendMeterCfgMsg(pQueryMsg->vnode, pSids[i]->sid); diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index f88e5d6396..c36e3f6e34 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -68,9 +68,36 @@ ELSEIF (TD_WINDOWS_64) TARGET_LINK_LIBRARIES(tutil iconv regex pthread os winmm IPHLPAPI ws2_32) ELSEIF(TD_DARWIN_64) ADD_DEFINITIONS(-DUSE_LIBICONV) - AUX_SOURCE_DIRECTORY(src SRC) - LIST(REMOVE_ITEM SRC ./src/tcrc32c.c) - LIST(REMOVE_ITEM SRC ./src/tdes.c) + LIST(APPEND SRC ./src/ihash.c) + LIST(APPEND SRC ./src/lz4.c) + LIST(APPEND SRC ./src/shash.c) + LIST(APPEND SRC ./src/tbase64.c) + LIST(APPEND SRC ./src/tcache.c) + LIST(APPEND SRC ./src/tcompression.c) + LIST(APPEND SRC ./src/textbuffer.c) + LIST(APPEND SRC ./src/tglobalcfg.c) + LIST(APPEND SRC ./src/thash.c) + LIST(APPEND SRC ./src/thashutil.c) + LIST(APPEND SRC ./src/thistogram.c) + LIST(APPEND SRC ./src/tidpool.c) + LIST(APPEND SRC ./src/tinterpolation.c) + LIST(APPEND SRC ./src/tlog.c) + LIST(APPEND SRC ./src/tlosertree.c) + LIST(APPEND SRC ./src/tmd5.c) + LIST(APPEND SRC ./src/tmem.c) + LIST(APPEND SRC ./src/tmempool.c) + LIST(APPEND SRC ./src/tmodule.c) + LIST(APPEND SRC ./src/tnote.c) + LIST(APPEND SRC ./src/tsched.c) + LIST(APPEND SRC ./src/tskiplist.c) + LIST(APPEND SRC ./src/tsocket.c) + LIST(APPEND SRC ./src/tstrbuild.c) + LIST(APPEND SRC ./src/ttime.c) + LIST(APPEND SRC ./src/ttimer.c) + LIST(APPEND SRC ./src/ttokenizer.c) + LIST(APPEND SRC ./src/ttypes.c) + LIST(APPEND SRC ./src/tutil.c) + LIST(APPEND SRC ./src/version.c) ADD_LIBRARY(tutil ${SRC}) TARGET_LINK_LIBRARIES(tutil iconv pthread os) ENDIF() diff --git a/src/util/src/tbase64.c b/src/util/src/tbase64.c index 02ec756e04..937adfde5c 100644 --- a/src/util/src/tbase64.c +++ b/src/util/src/tbase64.c @@ -98,7 +98,7 @@ unsigned char *base64_decode(const char *value, int inlen, int *outlen) { base64_decode_error: free(result); - *result = 0; + result = 0; *outlen = 0; return result; diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 8a2f1347df..e2d755ecb9 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -587,8 +587,8 @@ void *taosAddDataIntoCache(void *handle, char *key, char *pData, int dataSize, i pNode = taosAddToCacheImpl(pObj, key, keyLen, pData, dataSize, keepTime * 1000L); if (NULL != pNode) { pTrace( - "key:%s %p added into cache, slot:%d, addTime:%lld, expireTime:%lld, cache total:%d, " - "size:%lldbytes, collision:%d", + "key:%s %p added into cache, slot:%d, addTime:%" PRIu64 ", expireTime:%" PRIu64 ", cache total:%d, " + "size:%" PRId64 " bytes, collision:%d", pNode->key, pNode, HASH_INDEX(pNode->hashVal, pObj->capacity), pNode->addTime, pNode->time, pObj->size, pObj->totalSize, pObj->statistics.numOfCollision); } @@ -711,7 +711,7 @@ void *taosUpdateDataFromCache(void *handle, char *key, char *pData, int size, in pObj->totalSize); } else { pNew = taosUpdateCacheImpl(pObj, pNode, key, keyLen, pData, size, duration * 1000L); - pTrace("key:%s updated.expireTime:%lld.refCnt:%d", key, pNode->time, pNode->refCount); + pTrace("key:%s updated.expireTime:%" PRIu64 ".refCnt:%d", key, pNode->time, pNode->refCount); } __cache_unlock(pObj); diff --git a/src/util/src/textbuffer.c b/src/util/src/textbuffer.c index 3e71d90147..5c4ab1e74d 100644 --- a/src/util/src/textbuffer.c +++ b/src/util/src/textbuffer.c @@ -516,20 +516,20 @@ tMemBucket* tMemBucketCreate(int32_t totalSlots, int32_t nBufferSize, int16_t nE if (pDesc->pSchema->numOfCols != 1 || pDesc->pSchema->colOffset[0] != 0) { pError("MemBucket:%p,only consecutive data is allowed,invalid numOfCols:%d or offset:%d", - *pBucket, pDesc->pSchema->numOfCols, pDesc->pSchema->colOffset[0]); + pBucket, pDesc->pSchema->numOfCols, pDesc->pSchema->colOffset[0]); tfree(pBucket); return NULL; } if (pDesc->pSchema->pFields[0].type != dataType) { - pError("MemBucket:%p,data type is not consistent,%d in schema, %d in param", *pBucket, + pError("MemBucket:%p,data type is not consistent,%d in schema, %d in param", pBucket, pDesc->pSchema->pFields[0].type, dataType); tfree(pBucket); return NULL; } if (pBucket->numOfTotalPages < pBucket->nTotalSlots) { - pWarn("MemBucket:%p,total buffer pages %d are not enough for all slots", *pBucket, pBucket->numOfTotalPages); + pWarn("MemBucket:%p,total buffer pages %d are not enough for all slots", pBucket, pBucket->numOfTotalPages); } pBucket->pSegs = (tMemBucketSegment *)malloc(pBucket->numOfSegs * sizeof(tMemBucketSegment)); @@ -540,7 +540,7 @@ tMemBucket* tMemBucketCreate(int32_t totalSlots, int32_t nBufferSize, int16_t nE pBucket->pSegs[i].pBoundingEntries = NULL; } - pTrace("MemBucket:%p,created,buffer size:%d,elem size:%d", *pBucket, pBucket->numOfTotalPages * DEFAULT_PAGE_SIZE, + pTrace("MemBucket:%p,created,buffer size:%d,elem size:%d", pBucket, pBucket->numOfTotalPages * DEFAULT_PAGE_SIZE, pBucket->nElemSize); return pBucket; @@ -1258,6 +1258,7 @@ static tFilePage *loadIntoBucketFromDisk(tMemBucket *pMemBucket, int32_t segIdx, for (uint32_t j = 0; j < pFlushInfo->numOfPages; ++j) { ret = fread(pPage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); + UNUSED(ret); assert(pPage->numOfElems > 0); tColModelAppend(pDesc->pSchema, buffer, pPage->data, 0, pPage->numOfElems, pPage->numOfElems); @@ -1917,6 +1918,7 @@ double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction) for (uint32_t jx = 0; jx < pFlushInfo->numOfPages; ++jx) { ret = fread(pPage, pMemBuffer->nPageSize, 1, pMemBuffer->dataFile); + UNUSED(ret); tMemBucketPut(pMemBucket, pPage->data, pPage->numOfElems); } diff --git a/src/util/src/tglobalcfg.c b/src/util/src/tglobalcfg.c index d9d5b221a5..66b0377531 100644 --- a/src/util/src/tglobalcfg.c +++ b/src/util/src/tglobalcfg.c @@ -56,11 +56,7 @@ int tscEmbedded = 0; */ int64_t tsMsPerDay[] = {86400000L, 86400000000L}; -#ifdef CLUSTER char tsMasterIp[TSDB_IPv4ADDR_LEN] = {0}; -#else -char tsMasterIp[TSDB_IPv4ADDR_LEN] = "127.0.0.1"; -#endif char tsSecondIp[TSDB_IPv4ADDR_LEN] = {0}; uint16_t tsMgmtShellPort = 6030; // udp[6030-6034] tcp[6030] uint16_t tsVnodeShellPort = 6035; // udp[6035-6039] tcp[6035] @@ -450,7 +446,7 @@ static void doInitGlobalConfig() { // ip address tsInitConfigOption(cfg++, "masterIp", tsMasterIp, TSDB_CFG_VTYPE_IPSTR, - TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_CLUSTER, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT, 0, 0, TSDB_IPv4ADDR_LEN, TSDB_CFG_UTYPE_NONE); tsInitConfigOption(cfg++, "secondIp", tsSecondIp, TSDB_CFG_VTYPE_IPSTR, TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_CLUSTER, @@ -796,11 +792,9 @@ static void doInitGlobalConfig() { TSDB_CFG_CTYPE_B_CONFIG, 0, 1, 0, TSDB_CFG_UTYPE_NONE); -#ifdef CLUSTER tsInitConfigOption(cfg++, "anyIp", &tsAnyIp, TSDB_CFG_VTYPE_INT, - TSDB_CFG_CTYPE_B_CONFIG, + TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLUSTER, 0, 1, 0, TSDB_CFG_UTYPE_NONE); -#endif // version info tsInitConfigOption(cfg++, "gitinfo", gitinfo, TSDB_CFG_VTYPE_STRING, diff --git a/src/util/src/thistogram.c b/src/util/src/thistogram.c index a3f6e7203c..93046cf796 100644 --- a/src/util/src/thistogram.c +++ b/src/util/src/thistogram.c @@ -453,7 +453,7 @@ void tHistogramPrint(SHistogramInfo* pHisto) { for (int32_t i = 0; i < pHisto->numOfEntries; ++i) { SHistBin* pEntry = (SHistBin*)pNode->pData; - printf("%d: (%f, %lld)\n", i + 1, pEntry->val, pEntry->num); + printf("%d: (%f, %" PRId64 ")\n", i + 1, pEntry->val, pEntry->num); pNode = pNode->pForward[0]; } #endif diff --git a/src/util/src/tnote.c b/src/util/src/tnote.c index d12cc6e613..7a133590d2 100644 --- a/src/util/src/tnote.c +++ b/src/util/src/tnote.c @@ -231,8 +231,13 @@ void taosNotePrint(taosNoteInfo * pNote, const char * const format, ...) gettimeofday(&timeSecs, NULL); curTime = timeSecs.tv_sec; ptm = localtime_r(&curTime, &Tm); - len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); - +#ifndef LINUX + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%lld ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, + ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, taosGetPthreadId()); +#else + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, + ptm->tm_sec, (int)timeSecs.tv_usec, pthread_self()); +#endif va_start(argpointer, format); len += vsnprintf(buffer + len, MAX_NOTE_LINE_SIZE - len, format, argpointer); va_end(argpointer); diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index 0ebee5a8f4..7ab004646e 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -516,7 +516,7 @@ int taosCopyFds(int sfd, int dfd, int64_t len) { int retLen = taosReadMsg(sfd, temp, (int)readLen); if (readLen != retLen) { - pError("read error, readLen:%d retLen:%d len:%ld leftLen:%ld, reason:%s", readLen, retLen, len, leftLen, + pError("read error, readLen:%d retLen:%d len:%" PRId64 " leftLen:%" PRId64 ", reason:%s", readLen, retLen, len, leftLen, strerror(errno)); return -1; } @@ -524,7 +524,7 @@ int taosCopyFds(int sfd, int dfd, int64_t len) { writeLen = taosWriteMsg(dfd, temp, readLen); if (readLen != writeLen) { - pError("copy error, readLen:%d writeLen:%d len:%ld leftLen:%ld, reason:%s", readLen, writeLen, len, leftLen, + pError("copy error, readLen:%d writeLen:%d len:%" PRId64 " leftLen:%" PRId64 ", reason:%s", readLen, writeLen, len, leftLen, strerror(errno)); return -1; } diff --git a/src/util/src/ttime.c b/src/util/src/ttime.c index 05ba01979e..52faa0cac4 100644 --- a/src/util/src/ttime.c +++ b/src/util/src/ttime.c @@ -24,6 +24,61 @@ #include "ttime.h" #include "tutil.h" + +// ==== mktime() kernel code =================// +static int64_t m_deltaUtc = 0; +void deltaToUtcInitOnce() { + struct tm tm = {0}; + + (void)strptime("1970-01-01 00:00:00", (const char *)("%Y-%m-%d %H:%M:%S"), &tm); + m_deltaUtc = (int64_t)mktime(&tm); + //printf("====delta:%lld\n\n", seconds); + return; +} + +int64_t user_mktime(struct tm * tm) +{ +#define TAOS_MINUTE 60 +#define TAOS_HOUR (60*TAOS_MINUTE) +#define TAOS_DAY (24*TAOS_HOUR) +#define TAOS_YEAR (365*TAOS_DAY) + +static int month[12] = { + 0, + TAOS_DAY*(31), + TAOS_DAY*(31+29), + TAOS_DAY*(31+29+31), + TAOS_DAY*(31+29+31+30), + TAOS_DAY*(31+29+31+30+31), + TAOS_DAY*(31+29+31+30+31+30), + TAOS_DAY*(31+29+31+30+31+30+31), + TAOS_DAY*(31+29+31+30+31+30+31+31), + TAOS_DAY*(31+29+31+30+31+30+31+31+30), + TAOS_DAY*(31+29+31+30+31+30+31+31+30+31), + TAOS_DAY*(31+29+31+30+31+30+31+31+30+31+30) +}; + + int64_t res; + int year; + + year= tm->tm_year - 70; + res= TAOS_YEAR*year + TAOS_DAY*((year+1)/4); + res+= month[tm->tm_mon]; + + if(tm->tm_mon > 1 && ((year+2)%4)) { + res-= TAOS_DAY; + } + + res+= TAOS_DAY*(tm->tm_mday-1); + res+= TAOS_HOUR*tm->tm_hour; + res+= TAOS_MINUTE*tm->tm_min; + res+= tm->tm_sec; + + return res + m_deltaUtc; + +} + + static int64_t parseFraction(char* str, char** end, int32_t timePrec); static int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec); static int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec); @@ -237,7 +292,9 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) { } /* mktime will be affected by TZ, set by using taos_options */ - int64_t seconds = mktime(&tm); + //int64_t seconds = mktime(&tm); + int64_t seconds = (int64_t)user_mktime(&tm); + int64_t fraction = 0; if (*str == '.') { diff --git a/src/util/src/ttypes.c b/src/util/src/ttypes.c index 5fcbe8605c..b5198468a9 100644 --- a/src/util/src/ttypes.c +++ b/src/util/src/ttypes.c @@ -725,7 +725,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, char type) { *((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL; return 0; } else { - double value; + double value = 0; int32_t ret; ret = convertToDouble(pVariant->pz, pVariant->nLen, &value); if ((errno == ERANGE && value == -1) || (ret != 0)) { diff --git a/src/util/src/version.c b/src/util/src/version.c index 8556be974c..82c85cff5c 100644 --- a/src/util/src/version.c +++ b/src/util/src/version.c @@ -1,5 +1,7 @@ char version[64] = "1.6.5.3"; char compatible_version[64] = "1.6.1.0"; -char gitinfo[128] = "2ea714387009421beb35e7f03b94c6a87d22529a"; -char gitinfoOfInternal[128] = "950f54ac026bc05bcec5cff356f4964a18d635bd"; -char buildinfo[512] = "Built by ubuntu at 2019-12-21 11:14"; +char gitinfo[128] = "751fa0239baa49c3aaa9b49e15f7812b17519800"; +char gitinfoOfInternal[128] = ""; +char buildinfo[512] = "Built by guanshengliang at 2020-01-08 15:21"; + +void libtaos_edge_1_6_5_1_Darwin_x64() {}; diff --git a/tests/examples/c/CMakeLists.txt b/tests/examples/c/CMakeLists.txt index af0b8cd18d..287fca7d41 100644 --- a/tests/examples/c/CMakeLists.txt +++ b/tests/examples/c/CMakeLists.txt @@ -1,6 +1,6 @@ PROJECT(TDengine) -IF (TD_WINDOWS) +IF (TD_WINDOWS_64) INCLUDE_DIRECTORIES(${TD_ROOT_DIR}/deps/pthread) ENDIF () diff --git a/tests/examples/c/makefile b/tests/examples/c/makefile index ac8ff21aaf..0a4b8ee9d2 100644 --- a/tests/examples/c/makefile +++ b/tests/examples/c/makefile @@ -3,21 +3,23 @@ ROOT=./ TARGET=exe -LFLAGS = '-Wl,-rpath,/usr/local/taos/driver' -ltaos -lpthread -lm -lrt -CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX -msse4.2 -Wno-unused-function -D_M_X64 -std=gnu99 +LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt +#LFLAGS = '-Wl,-rpath,/home/zbm/project/td/debug/build/lib/' -L/home/zbm/project/td/debug/build/lib -ltaos -lpthread -lm -lrt +CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion -Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX -msse4.2 -Wno-unused-function -D_M_X64 \ + -I/usr/local/taos/include -std=gnu99 all: $(TARGET) exe: gcc $(CFLAGS) ./asyncdemo.c -o $(ROOT)/asyncdemo $(LFLAGS) gcc $(CFLAGS) ./demo.c -o $(ROOT)/demo $(LFLAGS) + gcc $(CFLAGS) ./prepare.c -o $(ROOT)/prepare $(LFLAGS) gcc $(CFLAGS) ./stream.c -o $(ROOT)/stream $(LFLAGS) - gcc $(CFLAGS) ./subscribe.c -o $(ROOT)/subscribe $(LFLAGS) + gcc $(CFLAGS) ./subscribe.c -o $(ROOT)subscribe $(LFLAGS) clean: - rm $(ROOT)asyncdemo - rm $(ROOT)demo - rm $(ROOT)stream - rm $(ROOT)subscribe - - \ No newline at end of file + rm $(ROOT)/asyncdemo + rm $(ROOT)/demo + rm $(ROOT)/prepare + rm $(ROOT)/stream + rm $(ROOT)/subscribe diff --git a/tests/examples/c/subscribe.c b/tests/examples/c/subscribe.c index 219fa133e0..0bf93f6f2d 100644 --- a/tests/examples/c/subscribe.c +++ b/tests/examples/c/subscribe.c @@ -1,18 +1,3 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - // sample code for TDengine subscribe/consume API // to compile: gcc -o subscribe subscribe.c -ltaos @@ -20,43 +5,239 @@ #include #include #include // include TDengine header file +#include -int main(int argc, char *argv[]) -{ - TAOS_SUB *tsub; +void print_result(TAOS_RES* res, int blockFetch) { + TAOS_ROW row = NULL; + int num_fields = taos_num_fields(res); + TAOS_FIELD* fields = taos_fetch_fields(res); + int nRows = 0; + + if (blockFetch) { + nRows = taos_fetch_block(res, &row); + for (int i = 0; i < nRows; i++) { + char temp[256]; + taos_print_row(temp, row + i, fields, num_fields); + puts(temp); + } + } else { + while ((row = taos_fetch_row(res))) { + char temp[256]; + taos_print_row(temp, row, fields, num_fields); + puts(temp); + nRows++; + } + } + + printf("%d rows consumed.\n", nRows); +} + + +void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { + print_result(res, *(int*)param); +} + + +void check_row_count(int line, TAOS_RES* res, int expected) { + int actual = 0; TAOS_ROW row; - char dbname[64], table[64]; - char temp[256]; + while ((row = taos_fetch_row(res))) { + actual++; + } + if (actual != expected) { + printf("line %d: row count mismatch, expected: %d, actual: %d\n", line, expected, actual); + } else { + printf("line %d: %d rows consumed as expected\n", line, actual); + } +} - if ( argc == 1 ) { - printf("usage: %s server-ip db-name table-name \n", argv[0]); - exit(0); - } - if ( argc >= 2 ) strcpy(dbname, argv[2]); - if ( argc >= 3 ) strcpy(table, argv[3]); +void run_test(TAOS* taos) { + taos_query(taos, "drop database if exists test;"); + + usleep(100000); + taos_query(taos, "create database test tables 5;"); + usleep(100000); + taos_query(taos, "use test;"); + usleep(100000); + taos_query(taos, "create table meters(ts timestamp, a int, b binary(20)) tags(loc binary(20), area int);"); - tsub = taos_subscribe(argv[1], "root", "taosdata", dbname, table, 0, 1000); - if ( tsub == NULL ) { - printf("failed to connet to db:%s\n", dbname); + taos_query(taos, "insert into t0 using meters tags('beijing', 0) values('2020-01-01 00:00:00.000', 0, 'china');"); + taos_query(taos, "insert into t0 using meters tags('beijing', 0) values('2020-01-01 00:01:00.000', 0, 'china');"); + taos_query(taos, "insert into t0 using meters tags('beijing', 0) values('2020-01-01 00:02:00.000', 0, 'china');"); + taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:00:00.000', 0, 'china');"); + taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:01:00.000', 0, 'china');"); + taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:02:00.000', 0, 'china');"); + taos_query(taos, "insert into t1 using meters tags('shanghai', 0) values('2020-01-01 00:03:00.000', 0, 'china');"); + taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:00:00.000', 0, 'UK');"); + taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:01:00.000', 0, 'UK');"); + taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:01:01.000', 0, 'UK');"); + taos_query(taos, "insert into t2 using meters tags('london', 0) values('2020-01-01 00:01:02.000', 0, 'UK');"); + taos_query(taos, "insert into t3 using meters tags('tianjin', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t4 using meters tags('wuhan', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t5 using meters tags('jinan', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t6 using meters tags('haikou', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t7 using meters tags('nanjing', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t8 using meters tags('lanzhou', 0) values('2020-01-01 00:01:02.000', 0, 'china');"); + taos_query(taos, "insert into t9 using meters tags('tokyo', 0) values('2020-01-01 00:01:02.000', 0, 'japan');"); + + // super tables subscription + + TAOS_SUB* tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0); + TAOS_RES* res = taos_consume(tsub); + check_row_count(__LINE__, res, 18); + + res = taos_consume(tsub); + check_row_count(__LINE__, res, 0); + + taos_query(taos, "insert into t0 values('2020-01-01 00:03:00.000', 0, 'china');"); + taos_query(taos, "insert into t8 values('2020-01-01 00:01:03.000', 0, 'china');"); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 2); + + taos_query(taos, "insert into t2 values('2020-01-01 00:01:02.001', 0, 'UK');"); + taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.001', 0, 'UK');"); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 2); + + taos_query(taos, "insert into t1 values('2020-01-01 00:03:00.002', 0, 'china');"); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 1); + + // keep progress information and restart subscription + taos_unsubscribe(tsub, 1); + taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.000', 0, 'china');"); + tsub = taos_subscribe(taos, 1, "test", "select * from meters;", NULL, NULL, 0); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 24); + + // keep progress information and continue previous subscription + taos_unsubscribe(tsub, 1); + tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 0); + + // don't keep progress information and continue previous subscription + taos_unsubscribe(tsub, 0); + tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 24); + + // single meter subscription + + taos_unsubscribe(tsub, 0); + tsub = taos_subscribe(taos, 0, "test", "select * from t0;", NULL, NULL, 0); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 5); + + res = taos_consume(tsub); + check_row_count(__LINE__, res, 0); + + taos_query(taos, "insert into t0 values('2020-01-01 00:04:00.001', 0, 'china');"); + res = taos_consume(tsub); + check_row_count(__LINE__, res, 1); + + taos_unsubscribe(tsub, 0); +} + + +int main(int argc, char *argv[]) { + const char* host = "127.0.0.1"; + const char* user = "root"; + const char* passwd = "taosdata"; + const char* sql = "select * from meters;"; + const char* topic = "test-multiple"; + int async = 1, restart = 0, keep = 1, test = 0, blockFetch = 0; + + for (int i = 1; i < argc; i++) { + if (strncmp(argv[i], "-h=", 3) == 0) { + host = argv[i] + 3; + continue; + } + if (strncmp(argv[i], "-u=", 3) == 0) { + user = argv[i] + 3; + continue; + } + if (strncmp(argv[i], "-p=", 3) == 0) { + passwd = argv[i] + 3; + continue; + } + if (strcmp(argv[i], "-sync") == 0) { + async = 0; + continue; + } + if (strcmp(argv[i], "-restart") == 0) { + restart = 1; + continue; + } + if (strcmp(argv[i], "-single") == 0) { + sql = "select * from t0;"; + topic = "test-single"; + continue; + } + if (strcmp(argv[i], "-nokeep") == 0) { + keep = 0; + continue; + } + if (strncmp(argv[i], "-sql=", 5) == 0) { + sql = argv[i] + 5; + topic = "test-custom"; + continue; + } + if (strcmp(argv[i], "-test") == 0) { + test = 1; + continue; + } + if (strcmp(argv[i], "-block-fetch") == 0) { + blockFetch = 1; + continue; + } + } + + // init TAOS + taos_init(); + + TAOS* taos = taos_connect(host, user, passwd, "test", 0); + if (taos == NULL) { + printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); exit(1); } - TAOS_FIELD *fields = taos_fetch_subfields(tsub); - int fcount = taos_subfields_count(tsub); - - printf("start to retrieve data\n"); - printf("please use other taos client, insert rows into %s.%s\n", dbname, table); - while ( 1 ) { - row = taos_consume(tsub); - if ( row == NULL ) break; - - taos_print_row(temp, row, fields, fcount); - printf("%s\n", temp); + if (test) { + run_test(taos); + taos_close(taos); + exit(0); } - taos_unsubscribe(tsub); + TAOS_SUB* tsub = NULL; + if (async) { + // create an asynchronized subscription, the callback function will be called every 1s + tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000); + } else { + // create an synchronized subscription, need to call 'taos_consume' manually + tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0); + } + + if (tsub == NULL) { + printf("failed to create subscription.\n"); + exit(0); + } + + if (async) { + getchar(); + } else while(1) { + TAOS_RES* res = taos_consume(tsub); + if (res == NULL) { + printf("failed to consume data."); + break; + } else { + print_result(res, blockFetch); + getchar(); + } + } + + taos_unsubscribe(tsub, keep); + taos_close(taos); return 0; } -