diff --git a/docs/en/14-reference/05-connectors/14-java.mdx b/docs/en/14-reference/05-connectors/14-java.mdx index 93b72ca026..8192807c6d 100644 --- a/docs/en/14-reference/05-connectors/14-java.mdx +++ b/docs/en/14-reference/05-connectors/14-java.mdx @@ -69,7 +69,7 @@ REST connection supports all platforms that can run Java. After an error is reported, the error message and error code can be obtained through SQLException. ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java:jdbc_exception}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java:jdbc_exception}} ``` There are four types of error codes that the JDBC client library can report: @@ -150,7 +150,7 @@ TDengine currently supports timestamp, number, character, Boolean type, and the Due to historical reasons, the BINARY type data in TDengine is not truly binary data and is no longer recommended for use. Please use VARBINARY type instead. GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type](../../taos-sql/data-type/) For WKB specifications, please refer to [Well Known Binary (WKB)](https://libgeos.org/specifications/wkb/) -For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example](https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java) +For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java) ## Installation Steps @@ -395,7 +395,7 @@ For example, if you specify the password as `taosdata` in the URL and specify th ### Create database and tables ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java:create_db_and_table}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java:create_db_and_table}} ``` > **Note**: If you do not use `USE power` to specify the database, all subsequent operations on the table need to add the database name as a prefix, such as power.meters. @@ -403,7 +403,7 @@ For example, if you specify the password as `taosdata` in the URL and specify th ### Insert data ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java:insert_data}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java:insert_data}} ``` > NOW is an internal function. The default is the current time of the client's computer. @@ -412,7 +412,7 @@ For example, if you specify the password as `taosdata` in the URL and specify th ### Querying data ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java:query_data}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java:query_data}} ``` > The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. @@ -422,7 +422,7 @@ For example, if you specify the password as `taosdata` in the URL and specify th ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java:with_reqid}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java:with_reqid}} ``` ### Writing data via parameter binding @@ -440,20 +440,20 @@ TDengine has significantly improved the bind APIs to support data writing (INSER ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java:para_bind}} +{{#include docs/examples/java/src/main/java/com/taos/example/ParameterBindingBasicDemo.java:para_bind}} ``` -This is the [Detailed Example](https://github.com/taosdata/TDengine/blob/main/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingFullDemo.java) +This is the [Detailed Example](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java) ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java:para_bind}} +{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java:para_bind}} ``` -This is the [Detailed Example](https://github.com/taosdata/TDengine/blob/main/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingFullDemo.java) +This is the [Detailed Example](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java) @@ -503,14 +503,14 @@ TDengine supports schemaless writing. It is compatible with InfluxDB's Line Prot ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java:schemaless}} +{{#include docs/examples/java/src/main/java/com/taos/example/SchemalessJniTest.java:schemaless}} ``` ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java:schemaless}} +{{#include docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java:schemaless}} ``` @@ -531,7 +531,7 @@ The TDengine Java client library supports subscription functionality with the fo #### Create a Topic ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopImp.java:create_topic}} +{{#include docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java:create_topic}} ``` The preceding example uses the SQL statement `SELECT ts, current, voltage, phase, groupid, location FROM meters` and creates a topic named `topic_meters`. @@ -540,7 +540,7 @@ The preceding example uses the SQL statement `SELECT ts, current, voltage, phase #### Create a Consumer ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/AbsConsumerLoop.java:create_consumer}} +{{#include docs/examples/java/src/main/java/com/taos/example/AbsConsumerLoop.java:create_consumer}} ``` - bootstrap.servers: `ip:port` where the TDengine server is located, or `ip:port` where the taosAdapter is located if WebSocket connection is used. @@ -561,7 +561,7 @@ For more information, see [Consumer Parameters](../../../develop/tmq/#create-a-c #### Subscribe to consume data ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/AbsConsumerLoop.java:poll_data}} +{{#include docs/examples/java/src/main/java/com/taos/example/AbsConsumerLoop.java:poll_data}} ``` The parameters of the subscribe method are defined as: a list of topics to subscribe, and it supports subscribing to multiple topics at the same time. @@ -588,7 +588,7 @@ void seekToEnd(Collection partitions) throws SQLException; Example usage is as follows. ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerOffsetSeek.java:consumer_seek}} +{{#include docs/examples/java/src/main/java/com/taos/example/ConsumerOffsetSeek.java:consumer_seek}} ``` #### Commit offset @@ -622,14 +622,14 @@ For more information, see [Data Subscription](../../../develop/tmq). In addition to the native connection, the Java client library also supports subscribing via websocket. ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java:consumer_demo}} +{{#include docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java:consumer_demo}} ``` ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:consumer_demo}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:consumer_demo}} ``` @@ -644,7 +644,7 @@ In addition to the native connection, the Java client library also supports subs Example usage is as follows. ```java -{{#include examples/JDBC/connectionPools/src/main/java/com/taosdata/example/HikariDemo.java:connection_pool}} +{{#include docs/examples/java/src/main/java/com/taos/example/HikariDemo.java:connection_pool}} ``` > getConnection(), you need to call the close() method after you finish using it. It doesn't close the connection. It just puts it back into the connection pool. @@ -655,7 +655,7 @@ Example usage is as follows. Example usage is as follows. ```java -{{#include examples/JDBC/connectionPools/src/main/java/com/taosdata/example/DruidDemo.java:connection_pool}} +{{#include docs/examples/java/src/main/java/com/taos/example/DruidDemo.java:connection_pool}} ``` > For more questions about using druid, please see [Official Instructions](https://github.com/alibaba/druid). diff --git a/docs/en/14-reference/05-connectors/60-rest-api.mdx b/docs/en/14-reference/05-connectors/60-rest-api.mdx index b059e647ed..2c3cd21f41 100644 --- a/docs/en/14-reference/05-connectors/60-rest-api.mdx +++ b/docs/en/14-reference/05-connectors/60-rest-api.mdx @@ -448,6 +448,7 @@ Response body: ```bash curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql + curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql ``` Response body: @@ -499,6 +500,7 @@ Response body: ```bash curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql + curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "create database demo" 192.168.0.1:6041/rest/sql ``` Response body: diff --git a/docs/en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md index e8e1386197..468b340abf 100644 --- a/docs/en/27-train-faq/01-faq.md +++ b/docs/en/27-train-faq/01-faq.md @@ -69,7 +69,7 @@ This error indicates that the client could not connect to the server. Perform th 11. You can also use the TDengine CLI to diagnose network issues. For more information, see [Problem Diagnostics](https://docs.tdengine.com/operation/diagnose/). -### 3. How can I resolve the "Unable to resolve FQDN" error? +### 3. How can I resolve the "Unable to resolve FQDN" error? Clients and dnodes must be able to resolve the FQDN of each required node. You can confirm your configuration as follows: @@ -164,3 +164,7 @@ For more information, see [taosAdapter](https://docs.tdengine.com/reference/taos OOM errors are thrown by the operating system when its memory, including swap, becomes insufficient and it needs to terminate processes to remain operational. Most OOM errors in TDengine occur for one of the following reasons: free memory is less than the value of `vm.min_free_kbytes` or free memory is less than the size of the request. If TDengine occupies reserved memory, an OOM error can occur even when free memory is sufficient. TDengine preallocates memory to each vnode. The number of vnodes per database is determined by the `vgroups` parameter, and the amount of memory per vnode is determined by the `buffer` parameter. To prevent OOM errors from occurring, ensure that you prepare sufficient memory on your hosts to support the number of vnodes that your deployment requires. Configure an appropriately sized swap space. If you continue to receive OOM errors, your SQL statements may be querying too much data for your system. TDengine Enterprise Edition includes optimized memory management that increases stability for enterprise customers. + +### 14. How can I resolve the "some vnode/qnode/mnode(s) out of service" error? + +The client has not configured FQDN for all servers. For example, the server has 3 nodes, while the client has only configured FQDN for 1 node. FQDN configuration refer to [How can I resolve the "Unable to resolve FQDN" error?](#FQDN) diff --git a/docs/examples/c/connect_example.c b/docs/examples/c/connect_example.c index 53d2d0d59b..ef07c54122 100644 --- a/docs/examples/c/connect_example.c +++ b/docs/examples/c/connect_example.c @@ -12,11 +12,11 @@ int main() { uint16_t port = 6030; // 0 means use the default port TAOS *taos = taos_connect(host, user, passwd, db, port); if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); return -1; } - printf("Connected to %s:%hu successfully.\n", host, port); + fprintf(stdout, "Connected to %s:%hu successfully.\n", host, port); /* put your code here for read and write */ diff --git a/docs/examples/c/create_db_demo.c b/docs/examples/c/create_db_demo.c index 45d4a17c5c..7ae41da65c 100644 --- a/docs/examples/c/create_db_demo.c +++ b/docs/examples/c/create_db_demo.c @@ -33,7 +33,7 @@ static int DemoCreateDB() { // connect TAOS *taos = taos_connect(host, user, password, NULL, port); if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); return -1; @@ -43,14 +43,13 @@ static int DemoCreateDB() { TAOS_RES *result = taos_query(taos, "CREATE DATABASE IF NOT EXISTS power"); code = taos_errno(result); if (code != 0) { - printf("Failed to create database power, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, code, - taos_errstr(result)); + fprintf(stderr, "Failed to create database power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(result)); taos_close(taos); taos_cleanup(); return -1; } taos_free_result(result); - printf("Create database power successfully.\n"); + fprintf(stdout, "Create database power successfully.\n"); // create table const char *sql = @@ -59,14 +58,13 @@ static int DemoCreateDB() { result = taos_query(taos, sql); code = taos_errno(result); if (code != 0) { - printf("Failed to create stable power.meters, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, - taos_errstr(result)); + fprintf(stderr, "Failed to create stable power.meters, ErrCode: 0x%x, ErrMessage: %s\n.", code, taos_errstr(result)); taos_close(taos); taos_cleanup(); return -1; } taos_free_result(result); - printf("Create stable power.meters successfully.\n"); + fprintf(stdout, "Create stable power.meters successfully.\n"); // close & clean taos_close(taos); @@ -75,4 +73,6 @@ static int DemoCreateDB() { // ANCHOR_END: create_db_and_table } -int main(int argc, char *argv[]) { return DemoCreateDB(); } +int main(int argc, char *argv[]) { + return DemoCreateDB(); +} diff --git a/docs/examples/c/insert_data_demo.c b/docs/examples/c/insert_data_demo.c index 3d655edca3..7570af02ad 100644 --- a/docs/examples/c/insert_data_demo.c +++ b/docs/examples/c/insert_data_demo.c @@ -23,50 +23,52 @@ #include "taos.h" static int DemoInsertData() { -// ANCHOR: insert_data -const char *host = "localhost"; -const char *user = "root"; -const char *password = "taosdata"; -uint16_t port = 6030; -int code = 0; + // ANCHOR: insert_data + const char *host = "localhost"; + const char *user = "root"; + const char *password = "taosdata"; + uint16_t port = 6030; + int code = 0; -// connect -TAOS *taos = taos_connect(host, user, password, NULL, port); -if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); - taos_cleanup(); - return -1; -} + // connect + TAOS *taos = taos_connect(host, user, password, NULL, port); + if (taos == NULL) { + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + taos_errstr(NULL)); + taos_cleanup(); + return -1; + } -// insert data, please make sure the database and table are already created -const char* sql = "INSERT INTO " \ - "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " \ - "VALUES " \ - "(NOW + 1a, 10.30000, 219, 0.31000) " \ - "(NOW + 2a, 12.60000, 218, 0.33000) " \ - "(NOW + 3a, 12.30000, 221, 0.31000) " \ - "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " \ - "VALUES " \ - "(NOW + 1a, 10.30000, 218, 0.25000) "; -TAOS_RES *result = taos_query(taos, sql); -code = taos_errno(result); -if (code != 0) { - printf("Failed to insert data to power.meters, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, taos_errstr(result)); + // insert data, please make sure the database and table are already created + const char *sql = + "INSERT INTO " + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + "VALUES " + "(NOW + 1a, 10.30000, 219, 0.31000) " + "(NOW + 2a, 12.60000, 218, 0.33000) " + "(NOW + 3a, 12.30000, 221, 0.31000) " + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + "VALUES " + "(NOW + 1a, 10.30000, 218, 0.25000) "; + TAOS_RES *result = taos_query(taos, sql); + code = taos_errno(result); + if (code != 0) { + fprintf(stderr, "Failed to insert data to power.meters, sql: %s, ErrCode: 0x%x, ErrMessage: %s\n.", sql, code, taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } + taos_free_result(result); + + // you can check affectedRows here + int rows = taos_affected_rows(result); + fprintf(stdout, "Successfully inserted %d rows into power.meters.\n", rows); + + // close & clean taos_close(taos); taos_cleanup(); - return -1; -} -taos_free_result(result); - -// you can check affectedRows here -int rows = taos_affected_rows(result); -printf("Successfully inserted %d rows into power.meters.\n", rows); - -// close & clean -taos_close(taos); -taos_cleanup(); -return 0; -// ANCHOR_END: insert_data + return 0; + // ANCHOR_END: insert_data } int main(int argc, char *argv[]) { diff --git a/docs/examples/c/query_data_demo.c b/docs/examples/c/query_data_demo.c index e58c467ccf..0e13f57e3f 100644 --- a/docs/examples/c/query_data_demo.c +++ b/docs/examples/c/query_data_demo.c @@ -22,59 +22,56 @@ #include #include "taos.h" - static int DemoQueryData() { -// ANCHOR: query_data -const char *host = "localhost"; -const char *user = "root"; -const char *password = "taosdata"; -uint16_t port = 6030; -int code = 0; + // ANCHOR: query_data + const char *host = "localhost"; + const char *user = "root"; + const char *password = "taosdata"; + uint16_t port = 6030; + int code = 0; -// connect -TAOS *taos = taos_connect(host, user, password, NULL, port); -if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); - taos_cleanup(); - return -1; -} + // connect + TAOS *taos = taos_connect(host, user, password, NULL, port); + if (taos == NULL) { + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + taos_errstr(NULL)); + taos_cleanup(); + return -1; + } + // query data, please make sure the database and table are already created + const char *sql = "SELECT ts, current, location FROM power.meters limit 100"; + TAOS_RES *result = taos_query(taos, sql); + code = taos_errno(result); + if (code != 0) { + fprintf(stderr, "Failed to query data from power.meters, sql: %s, ErrCode: 0x%x, ErrMessage: %s\n.", sql, code, + taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } + TAOS_ROW row = NULL; + int rows = 0; + int num_fields = taos_field_count(result); + TAOS_FIELD *fields = taos_fetch_fields(result); -// query data, please make sure the database and table are already created -const char* sql = "SELECT ts, current, location FROM power.meters limit 100"; -TAOS_RES *result = taos_query(taos, sql); -code = taos_errno(result); -if (code != 0) { - printf("Failed to query data from power.meters, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, taos_errstr(result)); + fprintf(stdout, "query successfully, got %d fields, the sql is: %s.\n", num_fields, sql); + + // fetch the records row by row + while ((row = taos_fetch_row(result))) { + // Add your data processing logic here + + rows++; + } + fprintf(stdout, "total rows: %d\n", rows); + taos_free_result(result); + + // close & clean taos_close(taos); taos_cleanup(); - return -1; -} - -TAOS_ROW row = NULL; -int rows = 0; -int num_fields = taos_field_count(result); -TAOS_FIELD *fields = taos_fetch_fields(result); - -printf("fields: %d\n", num_fields); -printf("sql: %s, result:\n", sql); - -// fetch the records row by row -while ((row = taos_fetch_row(result))) { - char temp[1024] = {0}; - rows++; - taos_print_row(temp, row, fields, num_fields); - printf("%s\n", temp); -} -printf("total rows: %d\n", rows); -taos_free_result(result); - -// close & clean -taos_close(taos); -taos_cleanup(); -return 0; -// ANCHOR_END: query_data + return 0; + // ANCHOR_END: query_data } int main(int argc, char *argv[]) { diff --git a/docs/examples/c/sml_insert_demo.c b/docs/examples/c/sml_insert_demo.c index 807fd10501..63870d90b6 100644 --- a/docs/examples/c/sml_insert_demo.c +++ b/docs/examples/c/sml_insert_demo.c @@ -21,112 +21,119 @@ #include #include "taos.h" - static int DemoSmlInsert() { -// ANCHOR: schemaless -const char *host = "localhost"; -const char *user = "root"; -const char *password = "taosdata"; -uint16_t port = 6030; -int code = 0; + // ANCHOR: schemaless + const char *host = "localhost"; + const char *user = "root"; + const char *password = "taosdata"; + uint16_t port = 6030; + int code = 0; -// connect -TAOS *taos = taos_connect(host, user, password, NULL, port); -if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); - taos_cleanup(); - return -1; -} + // connect + TAOS *taos = taos_connect(host, user, password, NULL, port); + if (taos == NULL) { + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), + taos_errstr(NULL)); + taos_cleanup(); + return -1; + } -// create database -TAOS_RES *result = taos_query(taos, "CREATE DATABASE IF NOT EXISTS power"); -code = taos_errno(result); -if (code != 0) { - printf("Failed to create database power, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, code, taos_errstr(result)); - taos_close(taos); - taos_cleanup(); - return -1; -} -taos_free_result(result); -printf("Create database power successfully.\n"); + // create database + TAOS_RES *result = taos_query(taos, "CREATE DATABASE IF NOT EXISTS power"); + code = taos_errno(result); + if (code != 0) { + fprintf(stderr, "Failed to create database power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } + taos_free_result(result); -// use database -result = taos_query(taos, "USE power"); -code = taos_errno(result); -if (code != 0) { - printf("Failed to execute use power, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, taos_errstr(result)); - taos_close(taos); - taos_cleanup(); - return -1; -} -taos_free_result(result); + // use database + result = taos_query(taos, "USE power"); + code = taos_errno(result); + if (code != 0) { + fprintf(stderr, "Failed to execute use power, ErrCode: 0x%x, ErrMessage: %s\n.", code, taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } + taos_free_result(result); -// schemaless demo data -char * line_demo = "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639"; -char * telnet_demo = "metric_telnet 1707095283260 4 host=host0 interface=eth0"; -char * json_demo = "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; + // schemaless demo data + char *line_demo = + "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 " + "1626006833639"; + char *telnet_demo = "metric_telnet 1707095283260 4 host=host0 interface=eth0"; + char *json_demo = + "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, " + "\"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"; -// influxdb line protocol -char *lines[] = {line_demo}; -result = taos_schemaless_insert(taos, lines, 1, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); -code = taos_errno(result); -if (code != 0) { - printf("Failed to insert schemaless line data, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, taos_errstr(result)); - taos_close(taos); - taos_cleanup(); - return -1; -} + // influxdb line protocol + char *lines[] = {line_demo}; + result = taos_schemaless_insert(taos, lines, 1, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); + code = taos_errno(result); + if (code != 0) { + fprintf(stderr, "Failed to insert schemaless line data, data: %s, ErrCode: 0x%x, ErrMessage: %s\n.", line_demo, code, + taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } -int rows = taos_affected_rows(result); -printf("Insert %d rows of schemaless line data successfully.\n", rows); -taos_free_result(result); + int rows = taos_affected_rows(result); + fprintf(stdout, "Insert %d rows of schemaless line data successfully.\n", rows); + taos_free_result(result); -// opentsdb telnet protocol -char *telnets[] = {telnet_demo}; -result = taos_schemaless_insert(taos, telnets, 1, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); -code = taos_errno(result); -if (code != 0) { - printf("Failed to insert schemaless telnet data, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, taos_errstr(result)); - taos_close(taos); - taos_cleanup(); - return -1; -} + // opentsdb telnet protocol + char *telnets[] = {telnet_demo}; + result = taos_schemaless_insert(taos, telnets, 1, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS); + code = taos_errno(result); + if (code != 0) { + fprintf(stderr, "Failed to insert schemaless telnet data, data: %s, ErrCode: 0x%x, ErrMessage: %s\n.", telnet_demo, code, + taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } -rows = taos_affected_rows(result); -printf("Insert %d rows of schemaless telnet data successfully.\n", rows); -taos_free_result(result); + rows = taos_affected_rows(result); + fprintf(stdout, "Insert %d rows of schemaless telnet data successfully.\n", rows); + taos_free_result(result); -// opentsdb json protocol -char *jsons[1] = {0}; -// allocate memory for json data. can not use static memory. -jsons[0] = malloc(1024); -if (jsons[0] == NULL) { - printf("Failed to allocate memory\n"); - taos_close(taos); - taos_cleanup(); - return -1; -} -(void)strncpy(jsons[0], json_demo, 1023); -result = taos_schemaless_insert(taos, jsons, 1, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED); -code = taos_errno(result); -if (code != 0) { + // opentsdb json protocol + char *jsons[1] = {0}; + // allocate memory for json data. can not use static memory. + size_t size = 1024; + jsons[0] = malloc(size); + if (jsons[0] == NULL) { + fprintf(stderr, "Failed to allocate memory: %zu bytes.\n", size); + taos_close(taos); + taos_cleanup(); + return -1; + } + (void)strncpy(jsons[0], json_demo, 1023); + result = taos_schemaless_insert(taos, jsons, 1, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED); + code = taos_errno(result); + if (code != 0) { + free(jsons[0]); + fprintf(stderr, "Failed to insert schemaless json data, Server: %s, ErrCode: 0x%x, ErrMessage: %s\n.", json_demo, code, + taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } free(jsons[0]); - printf("Failed to insert schemaless json data, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", host, port, code, taos_errstr(result)); + + rows = taos_affected_rows(result); + fprintf(stdout, "Insert %d rows of schemaless json data successfully.\n", rows); + taos_free_result(result); + + // close & clean taos_close(taos); taos_cleanup(); - return -1; -} -free(jsons[0]); - -rows = taos_affected_rows(result); -printf("Insert %d rows of schemaless json data successfully.\n", rows); -taos_free_result(result); - -// close & clean -taos_close(taos); -taos_cleanup(); -return 0; -// ANCHOR_END: schemaless + return 0; + // ANCHOR_END: schemaless } int main(int argc, char *argv[]) { diff --git a/docs/examples/c/stmt_insert_demo.c b/docs/examples/c/stmt_insert_demo.c index 854c9f86e2..f89a0fc8da 100644 --- a/docs/examples/c/stmt_insert_demo.c +++ b/docs/examples/c/stmt_insert_demo.c @@ -32,7 +32,7 @@ void executeSQL(TAOS *taos, const char *sql) { TAOS_RES *res = taos_query(taos, sql); int code = taos_errno(res); if (code != 0) { - printf("%s\n", taos_errstr(res)); + fprintf(stderr, "%s\n", taos_errstr(res)); taos_free_result(res); taos_close(taos); exit(EXIT_FAILURE); @@ -49,7 +49,7 @@ void executeSQL(TAOS *taos, const char *sql) { */ void checkErrorCode(TAOS_STMT *stmt, int code, const char *msg) { if (code != 0) { - printf("%s. code: %d, error: %s\n", msg,code,taos_stmt_errstr(stmt)); + fprintf(stderr, "%s. code: %d, error: %s\n", msg,code,taos_stmt_errstr(stmt)); taos_stmt_close(stmt); exit(EXIT_FAILURE); } @@ -74,7 +74,7 @@ void insertData(TAOS *taos) { // init TAOS_STMT *stmt = taos_stmt_init(taos); if (stmt == NULL) { - printf("Failed to init taos_stmt, error: %s\n", taos_stmt_errstr(NULL)); + fprintf(stderr, "Failed to init taos_stmt, error: %s\n", taos_stmt_errstr(NULL)); exit(EXIT_FAILURE); } // prepare @@ -159,7 +159,7 @@ void insertData(TAOS *taos) { int affected = taos_stmt_affected_rows_once(stmt); total_affected += affected; } - printf("Successfully inserted %d rows to power.meters.\n", total_affected); + fprintf(stdout, "Successfully inserted %d rows to power.meters.\n", total_affected); taos_stmt_close(stmt); } @@ -170,7 +170,7 @@ int main() { uint16_t port = 6030; TAOS *taos = taos_connect(host, user, password, NULL, port); if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); exit(EXIT_FAILURE); } diff --git a/docs/examples/c/tmq_demo.c b/docs/examples/c/tmq_demo.c index e7a2425c7d..18eef02098 100644 --- a/docs/examples/c/tmq_demo.c +++ b/docs/examples/c/tmq_demo.c @@ -26,6 +26,7 @@ volatile int thread_stop = 0; static int running = 1; +static int count = 0; const char* topic_name = "topic_meters"; typedef struct { @@ -40,13 +41,25 @@ typedef struct { const char* auto_offset_reset; } ConsumerConfig; +ConsumerConfig config = { + .enable_auto_commit = "true", + .auto_commit_interval_ms = "1000", + .group_id = "group1", + .client_id = "client1", + .td_connect_host = "localhost", + .td_connect_port = "6030", + .td_connect_user = "root", + .td_connect_pass = "taosdata", + .auto_offset_reset = "latest" +}; + void* prepare_data(void* arg) { - const char *host = "localhost"; - const char *user = "root"; - const char *password = "taosdata"; - uint16_t port = 6030; - int code = 0; - TAOS *pConn = taos_connect(host, user, password, NULL, port); + const char* host = "localhost"; + const char* user = "root"; + const char* password = "taosdata"; + uint16_t port = 6030; + int code = 0; + TAOS* pConn = taos_connect(host, user, password, NULL, port); if (pConn == NULL) { fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); @@ -78,62 +91,46 @@ void* prepare_data(void* arg) { } // ANCHOR: msg_process -static int32_t msg_process(TAOS_RES* msg) { - char buf[1024]; // buf to store the row content +int32_t msg_process(TAOS_RES* msg) { int32_t rows = 0; const char* topicName = tmq_get_topic_name(msg); - const char* dbName = tmq_get_db_name(msg); - int32_t vgroupId = tmq_get_vgroup_id(msg); + const char* dbName = tmq_get_db_name(msg); + int32_t vgroupId = tmq_get_vgroup_id(msg); - fprintf(stdout, "topic: %s\n", topicName); - fprintf(stdout, "db: %s\n", dbName); - fprintf(stdout, "vgroup id: %d\n", vgroupId); - - while (1) { + while (true) { // get one row data from message TAOS_ROW row = taos_fetch_row(msg); if (row == NULL) break; - // get the field information - TAOS_FIELD* fields = taos_fetch_fields(msg); - // get the number of fields - int32_t numOfFields = taos_field_count(msg); - // get the precision of the result - int32_t precision = taos_result_precision(msg); + // Add your data processing logic here + rows++; - // print the row content - if (taos_print_row(buf, row, fields, numOfFields) < 0) { - fprintf(stderr, "Failed to print row\n"); - break; - } - // print the precision and row content to the console - fprintf(stdout, "precision: %d, data: %s\n", precision, buf); } return rows; } // ANCHOR_END: msg_process -static int32_t init_env() { - const char *host = "localhost"; - const char *user = "root"; - const char *password = "taosdata"; - uint16_t port = 6030; - int code = 0; - TAOS *pConn = taos_connect(host, user, password, NULL, port); +TAOS* init_env() { + const char* host = "localhost"; + const char* user = "root"; + const char* password = "taosdata"; + uint16_t port = 6030; + int code = 0; + TAOS* pConn = taos_connect(host, user, password, NULL, port); if (pConn == NULL) { fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); taos_cleanup(); - return -1; + return NULL; } TAOS_RES* pRes; // drop database if exists - fprintf(stdout, "Create database.\n"); pRes = taos_query(pConn, "DROP TOPIC IF EXISTS topic_meters"); code = taos_errno(pRes); if (code != 0) { fprintf(stderr, "Failed to drop topic_meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); + goto END; } taos_free_result(pRes); @@ -141,6 +138,7 @@ static int32_t init_env() { code = taos_errno(pRes); if (code != 0) { fprintf(stderr, "Failed to drop database power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); + goto END; } taos_free_result(pRes); @@ -148,13 +146,12 @@ static int32_t init_env() { pRes = taos_query(pConn, "CREATE DATABASE power PRECISION 'ms' WAL_RETENTION_PERIOD 3600"); code = taos_errno(pRes); if (code != 0) { - fprintf(stderr, "Failed to create tmqdb, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); + fprintf(stderr, "Failed to create power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); goto END; } taos_free_result(pRes); // create super table - fprintf(stdout, "Create super table.\n"); pRes = taos_query( pConn, "CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS " @@ -164,56 +161,78 @@ static int32_t init_env() { fprintf(stderr, "Failed to create super table meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); goto END; } - taos_free_result(pRes); - taos_close(pConn); - return 0; + + return pConn; END: taos_free_result(pRes); taos_close(pConn); - return -1; + return NULL; } -int32_t create_topic() { - fprintf(stdout, "Create topic.\n"); - TAOS_RES* pRes; - const char *host = "localhost"; - const char *user = "root"; - const char *password = "taosdata"; - uint16_t port = 6030; - int code = 0; - TAOS *pConn = taos_connect(host, user, password, NULL, port); - if (pConn == NULL) { - fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); - taos_cleanup(); +void deinit_env(TAOS* pConn) { + if (pConn) + taos_close(pConn); +} + +int32_t create_topic(TAOS* pConn) { + TAOS_RES* pRes; + int code = 0; + + if (!pConn) { + fprintf(stderr, "Invalid input parameter.\n"); return -1; } - pRes = taos_query(pConn, "USE POWER"); + pRes = taos_query(pConn, "USE power"); code = taos_errno(pRes); if (taos_errno(pRes) != 0) { - fprintf(stderr, "Failed to use tmqdb, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); + fprintf(stderr, "Failed to use power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); return -1; } taos_free_result(pRes); - pRes = taos_query( - pConn, - "CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters"); + pRes = taos_query(pConn, "CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters"); code = taos_errno(pRes); if (code != 0) { fprintf(stderr, "Failed to create topic topic_meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); return -1; } taos_free_result(pRes); + return 0; +} - taos_close(pConn); +int32_t drop_topic(TAOS* pConn) { + TAOS_RES* pRes; + int code = 0; + + if (!pConn) { + fprintf(stderr, "Invalid input parameter.\n"); + return -1; + } + + pRes = taos_query(pConn, "USE power"); + code = taos_errno(pRes); + if (taos_errno(pRes) != 0) { + fprintf(stderr, "Failed to use power, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "DROP TOPIC IF EXISTS topic_meters"); + code = taos_errno(pRes); + if (code != 0) { + fprintf(stderr, "Failed to drop topic topic_meters, ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); return 0; } void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { - fprintf(stdout, "tmq_commit_cb_print() code: %d, tmq: %p, param: %p\n", code, tmq, param); + count +=1; + fprintf(stdout, "tmq_commit_cb_print() code: %d, tmq: %p, param: %p, count: %d.\n", code, tmq, param, count); } // ANCHOR: create_consumer_1 @@ -288,14 +307,14 @@ _end: tmq_list_t* build_topic_list() { // create a empty topic list tmq_list_t* topicList = tmq_list_new(); - const char* topic_name = "topic_meters"; // append topic name to the list int32_t code = tmq_list_append(topicList, topic_name); if (code) { // if failed, destroy the list and return NULL tmq_list_destroy(topicList); - fprintf(stderr, "Failed to create topic_list, ErrCode: 0x%x, ErrMessage: %s.\n", code, tmq_err2str(code)); + fprintf(stderr, "Failed to create topic_list, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); return NULL; } // if success, return the list @@ -314,8 +333,10 @@ void basic_consume_loop(tmq_t* tmq) { TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout); if (tmqmsg) { msgCnt++; - // process the message + + // Add your data processing logic here totalRows += msg_process(tmqmsg); + // free the message taos_free_result(tmqmsg); } @@ -326,7 +347,7 @@ void basic_consume_loop(tmq_t* tmq) { } // print the result: total messages and total rows consumed - fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows); + fprintf(stdout, "%d msg consumed, include %d rows\n", msgCnt, totalRows); } // ANCHOR_END: basic_consume_loop @@ -338,7 +359,8 @@ void consume_repeatly(tmq_t* tmq) { // get the topic assignment int32_t code = tmq_get_topic_assignment(tmq, topic_name, &pAssign, &numOfAssignment); if (code != 0 || pAssign == NULL || numOfAssignment == 0) { - fprintf(stderr, "Failed to get assignment, ErrCode: 0x%x, ErrMessage: %s.\n", code, tmq_err2str(code)); + fprintf(stderr, "Failed to get assignment, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); return; } @@ -348,11 +370,13 @@ void consume_repeatly(tmq_t* tmq) { code = tmq_offset_seek(tmq, topic_name, p->vgId, p->begin); if (code != 0) { - fprintf(stderr, "Failed to seek assignment %d to beginning %ld, ErrCode: 0x%x, ErrMessage: %s.\n", i, p->begin, code, tmq_err2str(code)); - } else { - fprintf(stdout, "Seek assignment %d to beginning %ld successfully.\n", i, p->begin); + fprintf(stderr, "Failed to seek offset, topic: %s, groupId: %s, clientId: %s, vgId: %d, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, p->vgId, code, tmq_err2str(code)); + break; } } + if (code == 0) + fprintf(stdout, "Assignment seek to beginning successfully.\n"); // free the assignment array tmq_free_assignment(pAssign); @@ -377,9 +401,9 @@ void manual_commit(tmq_t* tmq) { totalRows += msg_process(tmqmsg); // commit the message int32_t code = tmq_commit_sync(tmq, tmqmsg); - if (code) { - fprintf(stderr, "Failed to commit message, ErrCode: 0x%x, ErrMessage: %s.\n", code, tmq_err2str(code)); + fprintf(stderr, "Failed to commit offset, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); // free the message taos_free_result(tmqmsg); break; @@ -396,7 +420,7 @@ void manual_commit(tmq_t* tmq) { } // print the result: total messages and total rows consumed - fprintf(stderr, "%d msg consumed, include %d rows.\n", msgCnt, totalRows); + fprintf(stdout, "%d msg consumed, include %d rows.\n", msgCnt, totalRows); } // ANCHOR_END: manual_commit @@ -404,11 +428,14 @@ int main(int argc, char* argv[]) { int32_t code; pthread_t thread_id; - if (init_env() < 0) { + TAOS* pConn = init_env(); + if (pConn == NULL) { + fprintf(stderr, "Failed to init env.\n"); return -1; } - if (create_topic() < 0) { + if (create_topic(pConn) < 0) { + fprintf(stderr, "Failed to create topic.\n"); return -1; } @@ -417,25 +444,15 @@ int main(int argc, char* argv[]) { return -1; } - ConsumerConfig config = { - .enable_auto_commit = "true", - .auto_commit_interval_ms = "1000", - .group_id = "group1", - .client_id = "client1", - .td_connect_host = "localhost", - .td_connect_port = "6030", - .td_connect_user = "root", - .td_connect_pass = "taosdata", - .auto_offset_reset = "latest" - }; - // ANCHOR: create_consumer_2 tmq_t* tmq = build_consumer(&config); if (NULL == tmq) { - fprintf(stderr, "Failed to create native consumer, host: %s, groupId: %s, , clientId: %s.\n", config.td_connect_host, config.group_id, config.client_id); + fprintf(stderr, "Failed to create native consumer, host: %s, groupId: %s, , clientId: %s.\n", + config.td_connect_host, config.group_id, config.client_id); return -1; } else { - fprintf(stdout, "Create consumer successfully, host: %s, groupId: %s, , clientId: %s.\n", config.td_connect_host, config.group_id, config.client_id); + fprintf(stdout, "Create consumer successfully, host: %s, groupId: %s, clientId: %s.\n", + config.td_connect_host, config.group_id, config.client_id); } // ANCHOR_END: create_consumer_2 @@ -443,12 +460,14 @@ int main(int argc, char* argv[]) { // ANCHOR: subscribe_3 tmq_list_t* topic_list = build_topic_list(); if (NULL == topic_list) { - fprintf(stderr, "Failed to create topic_list.\n"); + fprintf(stderr, "Failed to create topic_list, topic: %s, groupId: %s, clientId: %s.\n", + topic_name, config.group_id, config.client_id); return -1; } if ((code = tmq_subscribe(tmq, topic_list))) { - fprintf(stderr, "Failed to subscribe topic_list, ErrCode: 0x%x, ErrMessage: %s.\n", code, tmq_err2str(code)); + fprintf(stderr, "Failed to subscribe topic_list, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); } else { fprintf(stdout, "Subscribe topics successfully.\n"); } @@ -466,22 +485,30 @@ int main(int argc, char* argv[]) { // unsubscribe the topic code = tmq_unsubscribe(tmq); if (code) { - fprintf(stderr, "Failed to unsubscribe consumer, ErrCode: 0x%x, ErrMessage: %s.\n", code, tmq_err2str(code)); + fprintf(stderr, "Failed to unsubscribe consumer, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); } else { - fprintf(stderr, "Consumer unsubscribed successfully.\n"); + fprintf(stdout, "Consumer unsubscribed successfully.\n"); } // close the consumer code = tmq_consumer_close(tmq); if (code) { - fprintf(stderr, "Failed to close consumer: %s.\n", tmq_err2str(code)); + fprintf(stderr, "Failed to close consumer, topic: %s, groupId: %s, clientId: %s, ErrCode: 0x%x, ErrMessage: %s.\n", + topic_name, config.group_id, config.client_id, code, tmq_err2str(code)); } else { - fprintf(stderr, "Consumer closed successfully.\n"); + fprintf(stdout, "Consumer closed successfully.\n"); } // ANCHOR_END: unsubscribe_and_close thread_stop = 1; pthread_join(thread_id, NULL); + if (drop_topic(pConn) < 0) { + fprintf(stderr, "Failed to drop topic.\n"); + return -1; + } + + deinit_env(pConn); return 0; } diff --git a/docs/examples/c/with_reqid_demo.c b/docs/examples/c/with_reqid_demo.c index 724b682850..1a1a53acc6 100644 --- a/docs/examples/c/with_reqid_demo.c +++ b/docs/examples/c/with_reqid_demo.c @@ -23,59 +23,56 @@ #include "taos.h" static int DemoWithReqId() { -// ANCHOR: with_reqid -const char *host = "localhost"; -const char *user = "root"; -const char *password = "taosdata"; -uint16_t port = 6030; -int code = 0; + // ANCHOR: with_reqid + const char *host = "localhost"; + const char *user = "root"; + const char *password = "taosdata"; + uint16_t port = 6030; + int code = 0; -// connect -TAOS *taos = taos_connect(host, user, password, NULL, port); -if (taos == NULL) { - printf("Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); - taos_cleanup(); - return -1; -} + // connect + TAOS *taos = taos_connect(host, user, password, NULL, port); + if (taos == NULL) { + fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL), taos_errstr(NULL)); + taos_cleanup(); + return -1; + } -const char *sql = "SELECT ts, current, location FROM power.meters limit 1"; -// query data with reqid -long reqid = 3L; -TAOS_RES *result = taos_query_with_reqid(taos, sql, reqid); -code = taos_errno(result); -if (code != 0) { - printf("Failed to execute sql with reqId: %ld, Server: %s:%hu, ErrCode: 0x%x, ErrMessage: %s\n.", reqid, host, port, code, taos_errstr(result)); + const char *sql = "SELECT ts, current, location FROM power.meters limit 1"; + // query data with reqid + long reqid = 3L; + TAOS_RES *result = taos_query_with_reqid(taos, sql, reqid); + code = taos_errno(result); + if (code != 0) { + fprintf(stderr, "Failed to execute sql with reqId: %ld, ErrCode: 0x%x, ErrMessage: %s\n.", reqid, code, taos_errstr(result)); + taos_close(taos); + taos_cleanup(); + return -1; + } + + TAOS_ROW row = NULL; + int rows = 0; + int num_fields = taos_field_count(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + fprintf(stdout, "query successfully, got %d fields, the sql is: %s.\n", num_fields, sql); + + // fetch the records row by row + while ((row = taos_fetch_row(result))) { + // Add your data processing logic here + + rows++; + } + fprintf(stdout, "total rows: %d\n", rows); + taos_free_result(result); + + // close & clean taos_close(taos); taos_cleanup(); - return -1; -} - -TAOS_ROW row = NULL; -int rows = 0; -int num_fields = taos_field_count(result); -TAOS_FIELD *fields = taos_fetch_fields(result); - -printf("fields: %d\n", num_fields); -printf("sql: %s, result:\n", sql); - -// fetch the records row by row -while ((row = taos_fetch_row(result))) { - char temp[1024] = {0}; - rows++; - taos_print_row(temp, row, fields, num_fields); - printf("%s\n", temp); -} -printf("total rows: %d\n", rows); -taos_free_result(result); - -// close & clean -taos_close(taos); -taos_cleanup(); -return 0; -// ANCHOR_END: with_reqid + return 0; + // ANCHOR_END: with_reqid } int main(int argc, char *argv[]) { return DemoWithReqId(); } - diff --git a/docs/examples/csharp/nativesml/Program.cs b/docs/examples/csharp/nativesml/Program.cs index cfee07eee0..047222c4f4 100644 --- a/docs/examples/csharp/nativesml/Program.cs +++ b/docs/examples/csharp/nativesml/Program.cs @@ -42,16 +42,16 @@ namespace TDengineExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to insert data with schemaless; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to insert data with schemaless, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to insert data with schemaless; Err:" + e.Message); + Console.WriteLine("Failed to insert data with schemaless, ErrMessage:" + e.Message); throw; } } // ANCHOR_END: main } -} \ No newline at end of file +} diff --git a/docs/examples/csharp/stmtInsert/Program.cs b/docs/examples/csharp/stmtInsert/Program.cs index 60d1a58e55..b317da2fc1 100644 --- a/docs/examples/csharp/stmtInsert/Program.cs +++ b/docs/examples/csharp/stmtInsert/Program.cs @@ -62,16 +62,16 @@ namespace TDengineExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to insert to table meters using stmt, url: " + connectionString + "; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to insert to table meters using stmt, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to insert to table meters using stmt, url: " + connectionString + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to insert to table meters using stmt, ErrMessage: " + e.Message); throw; } } // ANCHOR_END: main } -} \ No newline at end of file +} diff --git a/docs/examples/csharp/subscribe/Program.cs b/docs/examples/csharp/subscribe/Program.cs index fa61435b03..50988d0c5d 100644 --- a/docs/examples/csharp/subscribe/Program.cs +++ b/docs/examples/csharp/subscribe/Program.cs @@ -6,6 +6,11 @@ namespace TMQExample { internal class SubscribeDemo { + private static string _host = ""; + private static string _groupId = ""; + private static string _clientId = ""; + private static string _topic = ""; + public static void Main(string[] args) { try @@ -64,9 +69,9 @@ namespace TMQExample { // ANCHOR: create_consumer // consumer config - var host = "127.0.0.1"; - var groupId = "group1"; - var clientId = "client1"; + _host = "127.0.0.1"; + _groupId = "group1"; + _clientId = "client1"; var cfg = new Dictionary() { { "td.connect.port", "6030" }, @@ -74,9 +79,9 @@ namespace TMQExample { "msg.with.table.name", "true" }, { "enable.auto.commit", "true" }, { "auto.commit.interval.ms", "1000" }, - { "group.id", groupId }, - { "client.id", clientId }, - { "td.connect.ip", host }, + { "group.id", _groupId }, + { "client.id", _clientId }, + { "td.connect.ip", _host }, { "td.connect.user", "root" }, { "td.connect.pass", "taosdata" }, }; @@ -85,20 +90,33 @@ namespace TMQExample { // create consumer consumer = new ConsumerBuilder>(cfg).Build(); - Console.WriteLine("Create consumer successfully, host: " + host + ", groupId: " + groupId + - ", clientId: " + clientId); + Console.WriteLine( + $"Create consumer successfully, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}"); } catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to create native consumer, host : " + host + "; ErrCode:" + e.Code + - "; ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to create native consumer, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to create native consumer, host : " + host + "; ErrMessage: " + e.Message); + Console.WriteLine( + $"Failed to create native consumer, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } @@ -109,11 +127,12 @@ namespace TMQExample static void Consume(IConsumer> consumer) { // ANCHOR: subscribe + _topic = "topic_meters"; try { // subscribe - consumer.Subscribe(new List() { "topic_meters" }); - Console.WriteLine("subscribe topics successfully"); + consumer.Subscribe(new List() { _topic }); + Console.WriteLine("Subscribe topics successfully"); for (int i = 0; i < 50; i++) { // consume message with using block to ensure the result is disposed @@ -133,13 +152,24 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to poll data; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to poll data, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to poll data; ErrMessage:" + e.Message); + Console.WriteLine( + $"Failed to poll data, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } // ANCHOR_END: subscribe @@ -152,24 +182,38 @@ namespace TMQExample { // get assignment var assignment = consumer.Assignment; - Console.WriteLine($"now assignment: {assignment}"); + Console.WriteLine($"Now assignment: {assignment}"); // seek to the beginning foreach (var topicPartition in assignment) { consumer.Seek(new TopicPartitionOffset(topicPartition.Topic, topicPartition.Partition, 0)); } + Console.WriteLine("Assignment seek to beginning successfully"); } catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Seek example failed; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to seek offset, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: 0, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Seek example failed; ErrMessage: " + e.Message); + Console.WriteLine( + $"Failed to seek offset, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: 0, " + + $"ErrMessage: {e.Message}"); throw; } // ANCHOR_END: seek @@ -180,6 +224,7 @@ namespace TMQExample // ANCHOR: commit_offset for (int i = 0; i < 5; i++) { + TopicPartitionOffset topicPartitionOffset = null; try { // consume message with using block to ensure the result is disposed @@ -187,9 +232,10 @@ namespace TMQExample { if (cr == null) continue; // commit offset + topicPartitionOffset = cr.TopicPartitionOffset; consumer.Commit(new List { - cr.TopicPartitionOffset, + topicPartitionOffset, }); Console.WriteLine("Commit offset manually successfully."); } @@ -197,13 +243,26 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to execute consumer functions. ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to commit offset, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: {topicPartitionOffset}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to execute consumer functions. ErrMessage:" + e.Message); + Console.WriteLine( + $"Failed to commit offset, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: {topicPartitionOffset}, " + + $"ErrMessage: {e.Message}"); throw; } } @@ -221,13 +280,24 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to unsubscribe consumer. ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to unsubscribe consumer, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to unsubscribe consumer. Err: " + e.Message); + Console.WriteLine( + $"Failed to execute commit example, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } finally diff --git a/docs/examples/csharp/wsInsert/Program.cs b/docs/examples/csharp/wsInsert/Program.cs index cdddf27f42..9bc47d97f1 100644 --- a/docs/examples/csharp/wsInsert/Program.cs +++ b/docs/examples/csharp/wsInsert/Program.cs @@ -16,10 +16,10 @@ namespace Examples var builder = new ConnectionStringBuilder(connectionString); using (var client = DbDriver.Open(builder)) { - CreateDatabaseAndTable(client,connectionString); - InsertData(client,connectionString); - QueryData(client,connectionString); - QueryWithReqId(client,connectionString); + CreateDatabaseAndTable(client, connectionString); + InsertData(client, connectionString); + QueryData(client, connectionString); + QueryWithReqId(client, connectionString); } } catch (TDengineError e) @@ -52,62 +52,67 @@ namespace Examples catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to create db and table,url:" + connectionString +"; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to create database power or stable meters, ErrCode: " + e.Code + + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to create db and table, url:" + connectionString + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to create database power or stable meters, ErrMessage: " + e.Message); throw; } // ANCHOR_END: create_db_and_table } - private static void InsertData(ITDengineClient client,string connectionString) + private static void InsertData(ITDengineClient client, string connectionString) { // ANCHOR: insert_data + // insert data, please make sure the database and table are created before + var insertQuery = "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 219, 0.31000) " + + "(NOW + 2a, 12.60000, 218, 0.33000) " + + "(NOW + 3a, 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 218, 0.25000) "; try { - // insert data, please make sure the database and table are created before - var insertQuery = "INSERT INTO " + - "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 219, 0.31000) " + - "(NOW + 2a, 12.60000, 218, 0.33000) " + - "(NOW + 3a, 12.30000, 221, 0.31000) " + - "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 218, 0.25000) "; var affectedRows = client.Exec(insertQuery); Console.WriteLine("Successfully inserted " + affectedRows + " rows to power.meters."); } catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to insert data to power.meters, url:" + connectionString + "; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to insert data to power.meters, sql: " + insertQuery + ", ErrCode: " + + e.Code + ", ErrMessage: " + + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to insert data to power.meters, url:" + connectionString + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to insert data to power.meters, sql: " + insertQuery + ", ErrMessage: " + + e.Message); throw; } // ANCHOR_END: insert_data } - private static void QueryData(ITDengineClient client,string connectionString) + private static void QueryData(ITDengineClient client, string connectionString) { // ANCHOR: select_data + // query data, make sure the database and table are created before + var query = "SELECT ts, current, location FROM power.meters limit 100"; try { - // query data, make sure the database and table are created before - var query = "SELECT ts, current, location FROM power.meters limit 100"; using (var rows = client.Query(query)) { while (rows.Read()) { + // Add your data processing logic here var ts = (DateTime)rows.GetValue(0); var current = (float)rows.GetValue(1); var location = Encoding.UTF8.GetString((byte[])rows.GetValue(2)); @@ -119,28 +124,30 @@ namespace Examples catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to query data from power.meters, url:" + connectionString + "; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to query data from power.meters, sql: " + query + ", ErrCode: " + e.Code + + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to query data from power.meters, url:" + connectionString + "; ErrMessage: " + e.Message); + Console.WriteLine( + "Failed to query data from power.meters, sql: " + query + ", ErrMessage: " + e.Message); throw; } // ANCHOR_END: select_data } - private static void QueryWithReqId(ITDengineClient client,string connectionString) + private static void QueryWithReqId(ITDengineClient client, string connectionString) { // ANCHOR: query_id var reqId = (long)3; + // query data + var query = "SELECT ts, current, location FROM power.meters limit 1"; try { - // query data - var query = "SELECT ts, current, location FROM power.meters limit 1"; // query with request id 3 - using (var rows = client.Query(query,reqId)) + using (var rows = client.Query(query, reqId)) { while (rows.Read()) { @@ -155,13 +162,15 @@ namespace Examples catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to execute sql with reqId: " + reqId + ", url:" + connectionString + "; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to execute sql with reqId: " + reqId + ", sql: " + query + ", ErrCode: " + + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to execute sql with reqId: " + reqId + ", url:" + connectionString + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to execute sql with reqId: " + reqId + ", sql: " + query + ", ErrMessage: " + + e.Message); throw; } // ANCHOR_END: query_id diff --git a/docs/examples/csharp/wsStmt/Program.cs b/docs/examples/csharp/wsStmt/Program.cs index 394165f109..fa0365f418 100644 --- a/docs/examples/csharp/wsStmt/Program.cs +++ b/docs/examples/csharp/wsStmt/Program.cs @@ -63,16 +63,16 @@ namespace Examples catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to insert to table meters using stmt, url: " + connectionString + "; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to insert to table meters using stmt, ErrCode: " + e.Code + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to insert to table meters using stmt, url: " + connectionString + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to insert to table meters using stmt, ErrMessage: " + e.Message); throw; } } // ANCHOR_END: main } -} \ No newline at end of file +} diff --git a/docs/examples/csharp/wssml/Program.cs b/docs/examples/csharp/wssml/Program.cs index 94ee86919b..3bb5eea70c 100644 --- a/docs/examples/csharp/wssml/Program.cs +++ b/docs/examples/csharp/wssml/Program.cs @@ -44,17 +44,17 @@ namespace TDengineExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to insert data with schemaless, host:" + host + "; ErrCode:" + e.Code + - "; ErrMessage: " + e.Error); + Console.WriteLine("Failed to insert data with schemaless, ErrCode: " + e.Code + + ", ErrMessage: " + e.Error); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to insert data with schemaless, host:" + host + "; ErrMessage: " + e.Message); + Console.WriteLine("Failed to insert data with schemaless, ErrMessage: " + e.Message); throw; } } // ANCHOR_END: main } -} \ No newline at end of file +} diff --git a/docs/examples/csharp/wssubscribe/Program.cs b/docs/examples/csharp/wssubscribe/Program.cs index 9974c87906..939189cabd 100644 --- a/docs/examples/csharp/wssubscribe/Program.cs +++ b/docs/examples/csharp/wssubscribe/Program.cs @@ -6,6 +6,11 @@ namespace TMQExample { internal class SubscribeDemo { + private static string _host = ""; + private static string _groupId = ""; + private static string _clientId = ""; + private static string _topic = ""; + public static void Main(string[] args) { try @@ -68,9 +73,9 @@ namespace TMQExample { // ANCHOR: create_consumer // consumer config - var host = "127.0.0.1"; - var groupId = "group1"; - var clientId = "client1"; + _host = "127.0.0.1"; + _groupId = "group1"; + _clientId = "client1"; var cfg = new Dictionary() { { "td.connect.type", "WebSocket" }, @@ -79,9 +84,9 @@ namespace TMQExample { "msg.with.table.name", "true" }, { "enable.auto.commit", "true" }, { "auto.commit.interval.ms", "1000" }, - { "group.id", groupId }, - { "client.id", clientId }, - { "td.connect.ip", host }, + { "group.id", _groupId }, + { "client.id", _clientId }, + { "td.connect.ip", _host }, { "td.connect.user", "root" }, { "td.connect.pass", "taosdata" }, }; @@ -90,20 +95,33 @@ namespace TMQExample { // create consumer consumer = new ConsumerBuilder>(cfg).Build(); - Console.WriteLine("Create consumer successfully, host: " + host + ", groupId: " + groupId + - ", clientId: " + clientId); + Console.WriteLine( + $"Create consumer successfully, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}"); } catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to create websocket consumer, host : " + host + "; ErrCode:" + e.Code + - "; ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to create native consumer, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to create websocket consumer, host : " + host + "; ErrMessage: " + e.Message); + Console.WriteLine( + $"Failed to create native consumer, " + + $"host: {_host}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } @@ -114,10 +132,11 @@ namespace TMQExample static void Consume(IConsumer> consumer) { // ANCHOR: subscribe + _topic = "topic_meters"; try { // subscribe - consumer.Subscribe(new List() { "topic_meters" }); + consumer.Subscribe(new List() { _topic }); Console.WriteLine("Subscribe topics successfully"); for (int i = 0; i < 50; i++) { @@ -138,13 +157,23 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to poll data; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to poll data, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to poll data; ErrMessage:" + e.Message); + Console.WriteLine($"Failed to poll data, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } // ANCHOR_END: subscribe @@ -163,18 +192,32 @@ namespace TMQExample { consumer.Seek(new TopicPartitionOffset(topicPartition.Topic, topicPartition.Partition, 0)); } + Console.WriteLine("Assignment seek to beginning successfully"); } catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Seek example failed; ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to seek offset, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: 0, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Seek example failed; ErrMessage: " + e.Message); + Console.WriteLine( + $"Failed to seek offset, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: 0, " + + $"ErrMessage: {e.Message}"); throw; } // ANCHOR_END: seek @@ -185,6 +228,7 @@ namespace TMQExample // ANCHOR: commit_offset for (int i = 0; i < 5; i++) { + TopicPartitionOffset topicPartitionOffset = null; try { // consume message with using block to ensure the result is disposed @@ -192,9 +236,10 @@ namespace TMQExample { if (cr == null) continue; // commit offset + topicPartitionOffset = cr.TopicPartitionOffset; consumer.Commit(new List { - cr.TopicPartitionOffset, + topicPartitionOffset, }); Console.WriteLine("Commit offset manually successfully."); } @@ -202,13 +247,26 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to execute consumer functions. ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to commit offset, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: {topicPartitionOffset}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to execute consumer functions. ErrMessage:" + e.Message); + Console.WriteLine( + $"Failed to commit offset, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"offset: {topicPartitionOffset}, " + + $"ErrMessage: {e.Message}"); throw; } } @@ -226,13 +284,24 @@ namespace TMQExample catch (TDengineError e) { // handle TDengine error - Console.WriteLine("Failed to unsubscribe consumer. ErrCode:" + e.Code + "; ErrMessage: " + e.Error); + Console.WriteLine( + $"Failed to unsubscribe consumer, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrCode: {e.Code}, " + + $"ErrMessage: {e.Error}"); throw; } catch (Exception e) { // handle other exceptions - Console.WriteLine("Failed to unsubscribe consumer. Err: " + e.Message); + Console.WriteLine( + $"Failed to execute commit example, " + + $"topic: {_topic}, " + + $"groupId: {_groupId}, " + + $"clientId: {_clientId}, " + + $"ErrMessage: {e.Message}"); throw; } finally diff --git a/docs/examples/go/queryreqid/main.go b/docs/examples/go/queryreqid/main.go index 045f0f8901..0763feceff 100644 --- a/docs/examples/go/queryreqid/main.go +++ b/docs/examples/go/queryreqid/main.go @@ -1,58 +1,60 @@ -package main - -import ( - "context" - "database/sql" - "fmt" - "log" - "time" - - _ "github.com/taosdata/driver-go/v3/taosSql" -) - -func main() { - taosDSN := "root:taosdata@tcp(localhost:6030)/" - db, err := sql.Open("taosSql", taosDSN) - if err != nil { - log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error()) - } - defer db.Close() - initEnv(db) - // ANCHOR: query_id - // use context to set request id - reqId := int64(3) - ctx := context.WithValue(context.Background(), "taos_req_id", reqId) - // execute query with context - rows, err := db.QueryContext(ctx, "SELECT ts, current, location FROM power.meters limit 1") - if err != nil { - log.Fatalf("Failed to execute sql with reqId: %d, url: %s; ErrMessage: %s\n", reqId, taosDSN, err.Error()) - } - for rows.Next() { - var ( - ts time.Time - current float32 - location string - ) - err = rows.Scan(&ts, ¤t, &location) - if err != nil { - log.Fatal("Scan error: ", err) - } - fmt.Printf("ts: %s, current: %f, location: %s\n", ts, current, location) - } - // ANCHOR_END: query_id -} - -func initEnv(conn *sql.DB) { - _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") - if err != nil { - log.Fatal("Create database error: ", err) - } - _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") - if err != nil { - log.Fatal("Create table error: ", err) - } - _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") - if err != nil { - log.Fatal("Insert data error: ", err) - } -} +package main + +import ( + "context" + "database/sql" + "fmt" + "log" + "time" + + _ "github.com/taosdata/driver-go/v3/taosSql" +) + +func main() { + taosDSN := "root:taosdata@tcp(localhost:6030)/" + db, err := sql.Open("taosSql", taosDSN) + if err != nil { + log.Fatalln("Failed to connect to " + taosDSN + ", ErrMessage: " + err.Error()) + } + defer db.Close() + initEnv(db) + // ANCHOR: query_id + // use context to set request id + reqId := int64(3) + ctx := context.WithValue(context.Background(), "taos_req_id", reqId) + // execute query with context + querySql := "SELECT ts, current, location FROM power.meters limit 1" + rows, err := db.QueryContext(ctx, querySql) + if err != nil { + log.Fatalf("Failed to execute sql with reqId: %d, url: %s, sql: %s, ErrMessage: %s\n", reqId, taosDSN, querySql, err.Error()) + } + for rows.Next() { + // Add your data processing logic here + var ( + ts time.Time + current float32 + location string + ) + err = rows.Scan(&ts, ¤t, &location) + if err != nil { + log.Fatalf("Failed to scan data, reqId: %d, url:%s, sql: %s, ErrMessage: %s\n", reqId, taosDSN, querySql, err) + } + fmt.Printf("ts: %s, current: %f, location: %s\n", ts, current, location) + } + // ANCHOR_END: query_id +} + +func initEnv(conn *sql.DB) { + _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") + if err != nil { + log.Fatal("Create database power error: ", err) + } + _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") + if err != nil { + log.Fatal("Create stable meters error: ", err) + } + _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") + if err != nil { + log.Fatal("Insert data to power.meters error: ", err) + } +} diff --git a/docs/examples/go/schemaless/native/main.go b/docs/examples/go/schemaless/native/main.go index 6caa8bb4cb..36cdee70ac 100644 --- a/docs/examples/go/schemaless/native/main.go +++ b/docs/examples/go/schemaless/native/main.go @@ -20,26 +20,26 @@ func main() { defer conn.Close() _, err = conn.Exec("CREATE DATABASE IF NOT EXISTS power") if err != nil { - log.Fatalln("Failed to create db host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create database power, ErrMessage: " + err.Error()) } _, err = conn.Exec("USE power") if err != nil { - log.Fatalln("Failed to use db host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to use database power, ErrMessage: " + err.Error()) } // insert influxdb line protocol err = conn.InfluxDBInsertLines([]string{lineDemo}, "ms") if err != nil { - log.Fatalln("Failed to insert data with schemaless, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to insert data with schemaless, data:" + lineDemo + ", ErrMessage: " + err.Error()) } // insert opentsdb telnet protocol err = conn.OpenTSDBInsertTelnetLines([]string{telnetDemo}) if err != nil { - log.Fatalln("Failed to insert data with schemaless, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to insert data with schemaless, data:" + telnetDemo + ", ErrMessage: " + err.Error()) } // insert opentsdb json protocol err = conn.OpenTSDBInsertJsonPayload(jsonDemo) if err != nil { - log.Fatalln("Failed to insert data with schemaless, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to insert data with schemaless, data:" + jsonDemo + ", ErrMessage: " + err.Error()) } fmt.Println("Inserted data with schemaless successfully.") } diff --git a/docs/examples/go/schemaless/ws/main.go b/docs/examples/go/schemaless/ws/main.go index 8507a70811..df1a14ee09 100644 --- a/docs/examples/go/schemaless/ws/main.go +++ b/docs/examples/go/schemaless/ws/main.go @@ -25,7 +25,7 @@ func main() { defer db.Close() _, err = db.Exec("CREATE DATABASE IF NOT EXISTS power") if err != nil { - log.Fatalln("Failed to create db host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create database power, ErrMessage: " + err.Error()) } s, err := schemaless.NewSchemaless(schemaless.NewConfig("ws://localhost:6041", 1, schemaless.SetDb("power"), @@ -40,17 +40,17 @@ func main() { // insert influxdb line protocol err = s.Insert(lineDemo, schemaless.InfluxDBLineProtocol, "ms", 0, common.GetReqID()) if err != nil { - log.Fatalln("Failed to insert data with schemaless, host:" + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to insert data with schemaless, data:" + lineDemo + ", ErrMessage: " + err.Error()) } // insert opentsdb telnet line protocol err = s.Insert(telnetDemo, schemaless.OpenTSDBTelnetLineProtocol, "ms", 0, common.GetReqID()) if err != nil { - log.Fatalln("Failed to insert data with schemaless, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to insert data with schemaless, data: " + telnetDemo + ", ErrMessage: " + err.Error()) } // insert opentsdb json format protocol err = s.Insert(jsonDemo, schemaless.OpenTSDBJsonFormatProtocol, "s", 0, common.GetReqID()) if err != nil { - log.Fatalln("Failed to insert data with schemaless, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to insert data with schemaless, data: " + jsonDemo + ", ErrMessage: " + err.Error()) } fmt.Println("Inserted data with schemaless successfully.") } diff --git a/docs/examples/go/sqlquery/main.go b/docs/examples/go/sqlquery/main.go index 1301c79325..1bfb74ca87 100644 --- a/docs/examples/go/sqlquery/main.go +++ b/docs/examples/go/sqlquery/main.go @@ -1,85 +1,86 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - "time" - - _ "github.com/taosdata/driver-go/v3/taosSql" -) - -func main() { - var taosDSN = "root:taosdata@tcp(localhost:6030)/" - db, err := sql.Open("taosSql", taosDSN) - if err != nil { - log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error()) - } - defer db.Close() - // ANCHOR: create_db_and_table - // create database - res, err := db.Exec("CREATE DATABASE IF NOT EXISTS power") - if err != nil { - log.Fatalln("Failed to create db, url:" + taosDSN + "; ErrMessage: " + err.Error()) - } - rowsAffected, err := res.RowsAffected() - if err != nil { - log.Fatalln("Failed to get create db rowsAffected, url:" + taosDSN + "; ErrMessage: " + err.Error()) - } - // you can check rowsAffected here - fmt.Println("Create database power successfully, rowsAffected: ", rowsAffected) - // create table - res, err = db.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") - if err != nil { - log.Fatalln("Failed to create db and table, url:" + taosDSN + "; ErrMessage: " + err.Error()) - } - rowsAffected, err = res.RowsAffected() - if err != nil { - log.Fatalln("Failed to get create db rowsAffected, url:" + taosDSN + "; ErrMessage: " + err.Error()) - } - // you can check rowsAffected here - fmt.Println("Create stable power.meters successfully, rowsAffected:", rowsAffected) - // ANCHOR_END: create_db_and_table - // ANCHOR: insert_data - // insert data, please make sure the database and table are created before - insertQuery := "INSERT INTO " + - "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 219, 0.31000) " + - "(NOW + 2a, 12.60000, 218, 0.33000) " + - "(NOW + 3a, 12.30000, 221, 0.31000) " + - "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 218, 0.25000) " - res, err = db.Exec(insertQuery) - if err != nil { - log.Fatal("Failed to insert data to power.meters, url:" + taosDSN + "; ErrMessage: " + err.Error()) - } - rowsAffected, err = res.RowsAffected() - if err != nil { - log.Fatal("Failed to get insert rowsAffected, url:" + taosDSN + "; ErrMessage: " + err.Error()) - } - // you can check affectedRows here - fmt.Printf("Successfully inserted %d rows to power.meters.\n", rowsAffected) - // ANCHOR_END: insert_data - // ANCHOR: select_data - // query data, make sure the database and table are created before - rows, err := db.Query("SELECT ts, current, location FROM power.meters limit 100") - if err != nil { - log.Fatal("Failed to query data from power.meters, url:" + taosDSN + "; ErrMessage: " + err.Error()) - } - for rows.Next() { - var ( - ts time.Time - current float32 - location string - ) - err = rows.Scan(&ts, ¤t, &location) - if err != nil { - log.Fatal("Failed to scan data, url:" + taosDSN + "; ErrMessage: " + err.Error()) - } - // you can check data here - fmt.Printf("ts: %s, current: %f, location: %s\n", ts, current, location) - } - // ANCHOR_END: select_data -} +package main + +import ( + "database/sql" + "fmt" + "log" + "time" + + _ "github.com/taosdata/driver-go/v3/taosSql" +) + +func main() { + var taosDSN = "root:taosdata@tcp(localhost:6030)/" + db, err := sql.Open("taosSql", taosDSN) + if err != nil { + log.Fatalln("Failed to connect to " + taosDSN + ", ErrMessage: " + err.Error()) + } + defer db.Close() + // ANCHOR: create_db_and_table + // create database + res, err := db.Exec("CREATE DATABASE IF NOT EXISTS power") + if err != nil { + log.Fatalln("Failed to create database power, ErrMessage: " + err.Error()) + } + rowsAffected, err := res.RowsAffected() + if err != nil { + log.Fatalln("Failed to get create database rowsAffected, ErrMessage: " + err.Error()) + } + // you can check rowsAffected here + fmt.Println("Create database power successfully, rowsAffected: ", rowsAffected) + // create table + res, err = db.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") + if err != nil { + log.Fatalln("Failed to create stable meters, ErrMessage: " + err.Error()) + } + rowsAffected, err = res.RowsAffected() + if err != nil { + log.Fatalln("Failed to get create stable rowsAffected, ErrMessage: " + err.Error()) + } + // you can check rowsAffected here + fmt.Println("Create stable power.meters successfully, rowsAffected:", rowsAffected) + // ANCHOR_END: create_db_and_table + // ANCHOR: insert_data + // insert data, please make sure the database and table are created before + insertQuery := "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 219, 0.31000) " + + "(NOW + 2a, 12.60000, 218, 0.33000) " + + "(NOW + 3a, 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 218, 0.25000) " + res, err = db.Exec(insertQuery) + if err != nil { + log.Fatalf("Failed to insert data to power.meters, sql: %s, ErrMessage: %s\n", insertQuery, err.Error()) + } + rowsAffected, err = res.RowsAffected() + if err != nil { + log.Fatalf("Failed to get insert rowsAffected, sql: %s, ErrMessage: %s\n", insertQuery, err.Error()) + } + // you can check affectedRows here + fmt.Printf("Successfully inserted %d rows to power.meters.\n", rowsAffected) + // ANCHOR_END: insert_data + // ANCHOR: select_data + // query data, make sure the database and table are created before + sql := "SELECT ts, current, location FROM power.meters limit 100" + rows, err := db.Query(sql) + if err != nil { + log.Fatalf("Failed to query data from power.meters, sql: %s, ErrMessage: %s\n", sql, err.Error()) + } + for rows.Next() { + // Add your data processing logic here + var ( + ts time.Time + current float32 + location string + ) + err = rows.Scan(&ts, ¤t, &location) + if err != nil { + log.Fatalf("Failed to scan data, sql: %s, ErrMessage: %s\n", sql, err) + } + fmt.Printf("ts: %s, current: %f, location: %s\n", ts, current, location) + } + // ANCHOR_END: select_data +} diff --git a/docs/examples/go/stmt/native/main.go b/docs/examples/go/stmt/native/main.go index 46db6eee46..c61cdba57e 100644 --- a/docs/examples/go/stmt/native/main.go +++ b/docs/examples/go/stmt/native/main.go @@ -23,22 +23,22 @@ func main() { // prepare database and table _, err = db.Exec("CREATE DATABASE IF NOT EXISTS power") if err != nil { - log.Fatalln("Failed to create db, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create database power, ErrMessage: " + err.Error()) } _, err = db.Exec("USE power") if err != nil { - log.Fatalln("Failed to use db, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to use database power, ErrMessage: " + err.Error()) } _, err = db.Exec("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") if err != nil { - log.Fatalln("Failed to create table, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create stable meters, ErrMessage: " + err.Error()) } // prepare statement sql := "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)" stmt := db.Stmt() err = stmt.Prepare(sql) if err != nil { - log.Fatalln("Failed to prepare sql, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to prepare sql, sql: " + sql + ", ErrMessage: " + err.Error()) } for i := 1; i <= numOfSubTable; i++ { tableName := fmt.Sprintf("d_bind_%d", i) @@ -46,7 +46,7 @@ func main() { // set tableName and tags err = stmt.SetTableNameWithTags(tableName, tags) if err != nil { - log.Fatalln("Failed to set table name and tags, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to set table name and tags, tableName: " + tableName + "; ErrMessage: " + err.Error()) } // bind column data current := time.Now() @@ -58,18 +58,18 @@ func main() { AddFloat(rand.Float32()) err = stmt.BindRow(row) if err != nil { - log.Fatalln("Failed to bind params, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to bind params, ErrMessage: " + err.Error()) } } // add batch err = stmt.AddBatch() if err != nil { - log.Fatalln("Failed to add batch, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to add batch, ErrMessage: " + err.Error()) } // execute batch err = stmt.Execute() if err != nil { - log.Fatalln("Failed to exec, host: " + host + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to exec, ErrMessage: " + err.Error()) } // get affected rows affected := stmt.GetAffectedRows() diff --git a/docs/examples/go/stmt/ws/main.go b/docs/examples/go/stmt/ws/main.go index ae224c704f..289a2f1d57 100644 --- a/docs/examples/go/stmt/ws/main.go +++ b/docs/examples/go/stmt/ws/main.go @@ -27,11 +27,11 @@ func main() { // prepare database and table _, err = db.Exec("CREATE DATABASE IF NOT EXISTS power") if err != nil { - log.Fatalln("Failed to create db, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create database power, ErrMessage: " + err.Error()) } _, err = db.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") if err != nil { - log.Fatalln("Failed to create table, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to create stable power.meters, ErrMessage: " + err.Error()) } config := stmt.NewConfig(fmt.Sprintf("ws://%s:6041", host), 0) @@ -49,11 +49,11 @@ func main() { sql := "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)" stmt, err := connector.Init() if err != nil { - log.Fatalln("Failed to init stmt, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatalln("Failed to init stmt, sql: " + sql + ", ErrMessage: " + err.Error()) } err = stmt.Prepare(sql) if err != nil { - log.Fatal("Failed to prepare sql, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to prepare sql, sql: " + sql + ", ErrMessage: " + err.Error()) } for i := 1; i <= numOfSubTable; i++ { tableName := fmt.Sprintf("d_bind_%d", i) @@ -63,12 +63,12 @@ func main() { // set tableName err = stmt.SetTableName(tableName) if err != nil { - log.Fatal("Failed to set table name, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to set table name, tableName: " + tableName + "; ErrMessage: " + err.Error()) } // set tags err = stmt.SetTags(tags, tagsType) if err != nil { - log.Fatal("Failed to set tags, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to set tags, ErrMessage: " + err.Error()) } // bind column data current := time.Now() @@ -80,18 +80,18 @@ func main() { columnData[3] = param.NewParam(1).AddFloat(rand.Float32()) err = stmt.BindParam(columnData, columnType) if err != nil { - log.Fatal("Failed to bind params, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to bind params, ErrMessage: " + err.Error()) } } // add batch err = stmt.AddBatch() if err != nil { - log.Fatal("Failed to add batch, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to add batch, ErrMessage: " + err.Error()) } // execute batch err = stmt.Exec() if err != nil { - log.Fatal("Failed to exec, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to exec, ErrMessage: " + err.Error()) } // get affected rows affected := stmt.GetAffectedRows() @@ -100,6 +100,6 @@ func main() { } err = stmt.Close() if err != nil { - log.Fatal("Failed to close stmt, url: " + taosDSN + "; ErrMessage: " + err.Error()) + log.Fatal("Failed to close stmt, ErrMessage: " + err.Error()) } } diff --git a/docs/examples/go/tmq/native/main.go b/docs/examples/go/tmq/native/main.go index 4bf2fb4a60..8d667abc18 100644 --- a/docs/examples/go/tmq/native/main.go +++ b/docs/examples/go/tmq/native/main.go @@ -1,134 +1,179 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - "time" - - "github.com/taosdata/driver-go/v3/af/tmq" - tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" - _ "github.com/taosdata/driver-go/v3/taosSql" -) - -var done = make(chan struct{}) - -func main() { - // init env - taosDSN := "root:taosdata@tcp(127.0.0.1:6030)/" - conn, err := sql.Open("taosSql", taosDSN) - if err != nil { - log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error()) - } - defer func() { - conn.Close() - }() - initEnv(conn) - // ANCHOR: create_consumer - // create consumer - groupID := "group1" - clientID := "client1" - host := "127.0.0.1" - consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ - "td.connect.user": "root", - "td.connect.pass": "taosdata", - "auto.offset.reset": "latest", - "msg.with.table.name": "true", - "enable.auto.commit": "true", - "auto.commit.interval.ms": "1000", - "group.id": groupID, - "client.id": clientID, - }) - if err != nil { - log.Fatalln("Failed to create native consumer, host : " + host + "; ErrMessage: " + err.Error()) - } - log.Println("Create consumer successfully, host: " + host + ", groupId: " + groupID + ", clientId: " + clientID) - - // ANCHOR_END: create_consumer - // ANCHOR: subscribe - err = consumer.Subscribe("topic_meters", nil) - if err != nil { - log.Fatalln("Failed to subscribe, host : " + host + "; ErrMessage: " + err.Error()) - } - log.Println("Subscribe topics successfully") - for i := 0; i < 50; i++ { - ev := consumer.Poll(100) - if ev != nil { - switch e := ev.(type) { - case *tmqcommon.DataMessage: - // process your data here - fmt.Printf("data:%v\n", e) - // ANCHOR: commit_offset - // commit offset - _, err = consumer.CommitOffsets([]tmqcommon.TopicPartition{e.TopicPartition}) - if err != nil { - log.Fatalln("Failed to commit offset, host : " + host + "; ErrMessage: " + err.Error()) - } - log.Println("Commit offset manually successfully.") - // ANCHOR_END: commit_offset - case tmqcommon.Error: - fmt.Printf("%% Error: %v: %v\n", e.Code(), e) - log.Fatalln("Failed to poll data, host : " + host + "; ErrMessage: " + err.Error()) - } - } - } - // ANCHOR_END: subscribe - // ANCHOR: seek - // get assignment - partitions, err := consumer.Assignment() - if err != nil { - log.Fatal("Failed to get assignment; ErrMessage: " + err.Error()) - } - fmt.Println("Now assignment:", partitions) - for i := 0; i < len(partitions); i++ { - // seek to the beginning - err = consumer.Seek(tmqcommon.TopicPartition{ - Topic: partitions[i].Topic, - Partition: partitions[i].Partition, - Offset: 0, - }, 0) - if err != nil { - log.Fatalln("Seek example failed; ErrMessage: " + err.Error()) - } - } - fmt.Println("Assignment seek to beginning successfully") - // ANCHOR_END: seek - // ANCHOR: close - // unsubscribe - err = consumer.Unsubscribe() - if err != nil { - log.Fatal("Failed to unsubscribe consumer. ErrMessage: " + err.Error()) - } - // close consumer - err = consumer.Close() - if err != nil { - log.Fatal("Failed to close consumer. ErrMessage: " + err.Error()) - } - // ANCHOR_END: close - <-done -} - -func initEnv(conn *sql.DB) { - _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") - if err != nil { - log.Fatal("Failed to create database. ErrMessage: " + err.Error()) - } - _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") - if err != nil { - log.Fatal("Failed to create stable. ErrMessage: " + err.Error()) - } - _, err = conn.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters") - if err != nil { - log.Fatal("Failed to create topic. ErrMessage: " + err.Error()) - } - go func() { - for i := 0; i < 10; i++ { - time.Sleep(time.Second) - _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") - if err != nil { - log.Fatal("Failed to insert data. ErrMessage: " + err.Error()) - } - } - done <- struct{}{} - }() -} +package main + +import ( + "database/sql" + "fmt" + "log" + "time" + + "github.com/taosdata/driver-go/v3/af/tmq" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" + _ "github.com/taosdata/driver-go/v3/taosSql" +) + +var done = make(chan struct{}) +var groupID string +var clientID string +var host string +var topic string + +func main() { + // init env + taosDSN := "root:taosdata@tcp(127.0.0.1:6030)/" + conn, err := sql.Open("taosSql", taosDSN) + if err != nil { + log.Fatalln("Failed to connect to " + taosDSN + ", ErrMessage: " + err.Error()) + } + defer func() { + conn.Close() + }() + initEnv(conn) + // ANCHOR: create_consumer + // create consumer + groupID = "group1" + clientID = "client1" + host = "127.0.0.1" + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.offset.reset": "latest", + "msg.with.table.name": "true", + "enable.auto.commit": "true", + "auto.commit.interval.ms": "1000", + "group.id": groupID, + "client.id": clientID, + }) + if err != nil { + log.Fatalf( + "Failed to create native consumer, host: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + host, + groupID, + clientID, + err.Error(), + ) + } + log.Printf("Create consumer successfully, host: %s, groupId: %s, clientId: %s\n", host, groupID, clientID) + + // ANCHOR_END: create_consumer + // ANCHOR: subscribe + topic = "topic_meters" + err = consumer.Subscribe(topic, nil) + if err != nil { + log.Fatalf( + "Failed to subscribe topic_meters, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + log.Println("Subscribe topics successfully") + for i := 0; i < 50; i++ { + ev := consumer.Poll(100) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + // process your data here + fmt.Printf("data:%v\n", e) + // ANCHOR: commit_offset + // commit offset + _, err = consumer.CommitOffsets([]tmqcommon.TopicPartition{e.TopicPartition}) + if err != nil { + log.Fatalf( + "Failed to commit offset, topic: %s, groupId: %s, clientId: %s, offset %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + e.TopicPartition, + err.Error(), + ) + } + log.Println("Commit offset manually successfully.") + // ANCHOR_END: commit_offset + case tmqcommon.Error: + log.Fatalf("Failed to poll data, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", topic, groupID, clientID, e.Error()) + } + } + } + // ANCHOR_END: subscribe + // ANCHOR: seek + // get assignment + partitions, err := consumer.Assignment() + if err != nil { + log.Fatalf("Failed to get assignment, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", topic, groupID, clientID, err.Error()) + } + fmt.Println("Now assignment:", partitions) + for i := 0; i < len(partitions); i++ { + // seek to the beginning + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + log.Fatalf( + "Failed to execute seek offset, topic: %s, groupId: %s, clientId: %s, partition: %d, offset: %d, ErrMessage: %s\n", + topic, + groupID, + clientID, + partitions[i].Partition, + 0, + err.Error(), + ) + } + } + fmt.Println("Assignment seek to beginning successfully") + // ANCHOR_END: seek + // ANCHOR: close + // unsubscribe + err = consumer.Unsubscribe() + if err != nil { + log.Fatalf( + "Failed to unsubscribe consumer, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + fmt.Println("Consumer unsubscribed successfully.") + // close consumer + err = consumer.Close() + if err != nil { + log.Fatalf( + "Failed to close consumer, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + fmt.Println("Consumer closed successfully.") + // ANCHOR_END: close + <-done +} + +func initEnv(conn *sql.DB) { + _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") + if err != nil { + log.Fatal("Failed to create database, ErrMessage: " + err.Error()) + } + _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") + if err != nil { + log.Fatal("Failed to create stable, ErrMessage: " + err.Error()) + } + _, err = conn.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters") + if err != nil { + log.Fatal("Failed to create topic, ErrMessage: " + err.Error()) + } + go func() { + for i := 0; i < 10; i++ { + time.Sleep(time.Second) + _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") + if err != nil { + log.Fatal("Failed to insert data, ErrMessage: " + err.Error()) + } + } + done <- struct{}{} + }() +} diff --git a/docs/examples/go/tmq/ws/main.go b/docs/examples/go/tmq/ws/main.go index c6be0326b5..9ea4d72b39 100644 --- a/docs/examples/go/tmq/ws/main.go +++ b/docs/examples/go/tmq/ws/main.go @@ -1,139 +1,197 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - "time" - - "github.com/taosdata/driver-go/v3/common" - tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" - _ "github.com/taosdata/driver-go/v3/taosWS" - "github.com/taosdata/driver-go/v3/ws/tmq" -) - -var done = make(chan struct{}) - -func main() { - // init env - taosDSN := "root:taosdata@ws(127.0.0.1:6041)/" - conn, err := sql.Open("taosWS", taosDSN) - if err != nil { - log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error()) - } - defer func() { - conn.Close() - }() - initEnv(conn) - // ANCHOR: create_consumer - // create consumer - wsUrl := "ws://127.0.0.1:6041" - groupID := "group1" - clientID := "client1" - consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ - "ws.url": wsUrl, - "ws.message.channelLen": uint(0), - "ws.message.timeout": common.DefaultMessageTimeout, - "ws.message.writeWait": common.DefaultWriteWait, - "td.connect.user": "root", - "td.connect.pass": "taosdata", - "auto.offset.reset": "latest", - "msg.with.table.name": "true", - "enable.auto.commit": "true", - "auto.commit.interval.ms": "1000", - "group.id": groupID, - "client.id": clientID, - }) - if err != nil { - log.Fatalln("Failed to create websocket consumer, host : " + wsUrl + "; ErrMessage: " + err.Error()) - } - log.Println("Create consumer successfully, host: " + wsUrl + ", groupId: " + groupID + ", clientId: " + clientID) - - // ANCHOR_END: create_consumer - // ANCHOR: subscribe - err = consumer.Subscribe("topic_meters", nil) - if err != nil { - log.Fatalln("Failed to subscribe, host : " + wsUrl + "; ErrMessage: " + err.Error()) - } - log.Println("Subscribe topics successfully") - for i := 0; i < 50; i++ { - ev := consumer.Poll(100) - if ev != nil { - switch e := ev.(type) { - case *tmqcommon.DataMessage: - // process your data here - fmt.Printf("data:%v\n", e) - // ANCHOR: commit_offset - // commit offset - _, err = consumer.CommitOffsets([]tmqcommon.TopicPartition{e.TopicPartition}) - if err != nil { - log.Fatalln("Failed to commit offset, host : " + wsUrl + "; ErrMessage: " + err.Error()) - } - log.Println("Commit offset manually successfully.") - // ANCHOR_END: commit_offset - case tmqcommon.Error: - fmt.Printf("%% Error: %v: %v\n", e.Code(), e) - log.Fatalln("Failed to poll data, host : " + wsUrl + "; ErrMessage: " + err.Error()) - } - } - } - // ANCHOR_END: subscribe - // ANCHOR: seek - // get assignment - partitions, err := consumer.Assignment() - if err != nil { - log.Fatal("Failed to get assignment; ErrMessage: " + err.Error()) - } - fmt.Println("Now assignment:", partitions) - for i := 0; i < len(partitions); i++ { - // seek to the beginning - err = consumer.Seek(tmqcommon.TopicPartition{ - Topic: partitions[i].Topic, - Partition: partitions[i].Partition, - Offset: 0, - }, 0) - if err != nil { - log.Fatalln("Seek example failed; ErrMessage: " + err.Error()) - } - } - fmt.Println("Assignment seek to beginning successfully") - // ANCHOR_END: seek - // ANCHOR: close - // unsubscribe - err = consumer.Unsubscribe() - if err != nil { - log.Fatal("Failed to unsubscribe consumer. ErrMessage: " + err.Error()) - } - // close consumer - err = consumer.Close() - if err != nil { - log.Fatal("Failed to close consumer. ErrMessage: " + err.Error()) - } - // ANCHOR_END: close - <-done -} - -func initEnv(conn *sql.DB) { - _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") - if err != nil { - log.Fatal("Failed to create database. ErrMessage: " + err.Error()) - } - _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") - if err != nil { - log.Fatal("Failed to create stable. ErrMessage: " + err.Error()) - } - _, err = conn.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters") - if err != nil { - log.Fatal("Failed to create topic. ErrMessage: " + err.Error()) - } - go func() { - for i := 0; i < 10; i++ { - time.Sleep(time.Second) - _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") - if err != nil { - log.Fatal("Failed to insert data. ErrMessage: " + err.Error()) - } - } - done <- struct{}{} - }() -} +package main + +import ( + "database/sql" + "fmt" + "log" + "time" + + "github.com/taosdata/driver-go/v3/common" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" + _ "github.com/taosdata/driver-go/v3/taosWS" + "github.com/taosdata/driver-go/v3/ws/tmq" +) + +var done = make(chan struct{}) +var groupID string +var clientID string +var host string +var topic string + +func main() { + // init env + taosDSN := "root:taosdata@ws(127.0.0.1:6041)/" + conn, err := sql.Open("taosWS", taosDSN) + if err != nil { + log.Fatalln("Failed to connect to " + taosDSN + ", ErrMessage: " + err.Error()) + } + defer func() { + conn.Close() + }() + initEnv(conn) + // ANCHOR: create_consumer + // create consumer + wsUrl := "ws://127.0.0.1:6041" + groupID = "group1" + clientID = "client1" + host = "127.0.0.1" + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "ws.url": wsUrl, + "ws.message.channelLen": uint(0), + "ws.message.timeout": common.DefaultMessageTimeout, + "ws.message.writeWait": common.DefaultWriteWait, + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.offset.reset": "latest", + "msg.with.table.name": "true", + "enable.auto.commit": "true", + "auto.commit.interval.ms": "1000", + "group.id": groupID, + "client.id": clientID, + }) + if err != nil { + log.Fatalf( + "Failed to create websocket consumer, host: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + host, + groupID, + clientID, + err.Error(), + ) + } + log.Printf("Create consumer successfully, host: %s, groupId: %s, clientId: %s\n", host, groupID, clientID) + + // ANCHOR_END: create_consumer + // ANCHOR: subscribe + topic = "topic_meters" + err = consumer.Subscribe(topic, nil) + if err != nil { + log.Fatalf( + "Failed to subscribe topic_meters, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + log.Println("Subscribe topics successfully") + for i := 0; i < 50; i++ { + ev := consumer.Poll(100) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + // process your data here + fmt.Printf("data:%v\n", e) + // ANCHOR: commit_offset + // commit offset + _, err = consumer.CommitOffsets([]tmqcommon.TopicPartition{e.TopicPartition}) + if err != nil { + log.Fatalf( + "Failed to commit offset, topic: %s, groupId: %s, clientId: %s, offset %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + e.TopicPartition, + err.Error(), + ) + } + log.Println("Commit offset manually successfully.") + // ANCHOR_END: commit_offset + case tmqcommon.Error: + log.Fatalf( + "Failed to poll data, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + e.Error(), + ) + } + } + } + // ANCHOR_END: subscribe + // ANCHOR: seek + // get assignment + partitions, err := consumer.Assignment() + if err != nil { + log.Fatalf( + "Failed to get assignment, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + fmt.Println("Now assignment:", partitions) + for i := 0; i < len(partitions); i++ { + // seek to the beginning + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + log.Fatalf( + "Failed to seek offset, topic: %s, groupId: %s, clientId: %s, partition: %d, offset: %d, ErrMessage: %s\n", + topic, + groupID, + clientID, + partitions[i].Partition, + 0, + err.Error(), + ) + } + } + fmt.Println("Assignment seek to beginning successfully") + // ANCHOR_END: seek + // ANCHOR: close + // unsubscribe + err = consumer.Unsubscribe() + if err != nil { + log.Fatalf( + "Failed to unsubscribe consumer, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + fmt.Println("Consumer unsubscribed successfully.") + // close consumer + err = consumer.Close() + if err != nil { + log.Fatalf( + "Failed to close consumer, topic: %s, groupId: %s, clientId: %s, ErrMessage: %s\n", + topic, + groupID, + clientID, + err.Error(), + ) + } + fmt.Println("Consumer closed successfully.") + // ANCHOR_END: close + <-done +} + +func initEnv(conn *sql.DB) { + _, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power") + if err != nil { + log.Fatal("Failed to create database, ErrMessage: " + err.Error()) + } + _, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") + if err != nil { + log.Fatal("Failed to create stable, ErrMessage: " + err.Error()) + } + _, err = conn.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters") + if err != nil { + log.Fatal("Failed to create topic, ErrMessage: " + err.Error()) + } + go func() { + for i := 0; i < 10; i++ { + time.Sleep(time.Second) + _, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)") + if err != nil { + log.Fatal("Failed to insert data, ErrMessage: " + err.Error()) + } + } + done <- struct{}{} + }() +} diff --git a/docs/examples/java/pom.xml b/docs/examples/java/pom.xml index e7969bd4eb..35fe5f280c 100644 --- a/docs/examples/java/pom.xml +++ b/docs/examples/java/pom.xml @@ -25,6 +25,24 @@ 3.3.0 + + + + com.alibaba + druid + 1.1.17 + + + + com.zaxxer + HikariCP + 3.2.0 + + + org.locationtech.jts + jts-core + 1.19.0 + junit junit diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/AbsConsumerLoop.java b/docs/examples/java/src/main/java/com/taos/example/AbsConsumerLoop.java similarity index 98% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/AbsConsumerLoop.java rename to docs/examples/java/src/main/java/com/taos/example/AbsConsumerLoop.java index 842abb4086..52e294fcf7 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/AbsConsumerLoop.java +++ b/docs/examples/java/src/main/java/com/taos/example/AbsConsumerLoop.java @@ -1,4 +1,4 @@ -package com.taosdata.example; +package com.taos.example; import com.taosdata.jdbc.tmq.ConsumerRecord; import com.taosdata.jdbc.tmq.ConsumerRecords; @@ -33,7 +33,7 @@ public abstract class AbsConsumerLoop { config.setProperty("auto.commit.interval.ms", "1000"); config.setProperty("group.id", "group1"); config.setProperty("client.id", "client1"); - config.setProperty("value.deserializer", "com.taosdata.example.AbsConsumerLoop$ResultDeserializer"); + config.setProperty("value.deserializer", "com.taos.example.AbsConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); try { this.consumer = new TaosConsumer<>(config); diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java similarity index 69% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java rename to docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java index 34629fb8d2..a399f3aa6a 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java @@ -1,4 +1,4 @@ -package com.taosdata.example; +package com.taos.example; import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; @@ -19,8 +19,10 @@ public class ConsumerLoopFull { static private Connection connection; static private Statement statement; static private volatile boolean stopThread = false; + static private String groupId = "group1"; + static private String clientId = "clinet1"; - public static TaosConsumer getConsumer() throws SQLException { + public static TaosConsumer getConsumer() throws Exception { // ANCHOR: create_consumer Properties config = new Properties(); config.setProperty("td.connect.type", "jni"); @@ -30,33 +32,38 @@ public class ConsumerLoopFull { config.setProperty("enable.auto.commit", "true"); config.setProperty("auto.commit.interval.ms", "1000"); config.setProperty("group.id", "group1"); - config.setProperty("client.id", "1"); + config.setProperty("client.id", "clinet1"); config.setProperty("td.connect.user", "root"); config.setProperty("td.connect.pass", "taosdata"); - config.setProperty("value.deserializer", "com.taosdata.example.ConsumerLoopFull$ResultDeserializer"); + config.setProperty("value.deserializer", "com.taos.example.ConsumerLoopFull$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); try { TaosConsumer consumer= new TaosConsumer<>(config); - System.out.println("Create consumer successfully, host: " + config.getProperty("bootstrap.servers") + ", groupId: " + config.getProperty("group.id") + ", clientId: " + config.getProperty("client.id")); + System.out.printf("Create consumer successfully, host: %s, groupId: %s, clientId: %s%n", + config.getProperty("bootstrap.servers"), + config.getProperty("group.id"), + config.getProperty("client.id")); return consumer; - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to create native consumer, host: " + config.getProperty("bootstrap.servers") + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create consumer", ex); } catch (Exception ex) { - System.out.println("Failed to create native consumer, host: " + config.getProperty("bootstrap.servers") - + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create consumer", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create native consumer, host: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + config.getProperty("bootstrap.servers"), + config.getProperty("group.id"), + config.getProperty("client.id"), + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: create_consumer } public static void pollExample(TaosConsumer consumer) throws SQLException { // ANCHOR: poll_data_code_piece + List topics = Collections.singletonList("topic_meters"); try { - List topics = Collections.singletonList("topic_meters"); - // subscribe to the topics consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); @@ -65,27 +72,29 @@ public class ConsumerLoopFull { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); for (ConsumerRecord record : records) { ResultBean bean = record.value(); - // process the data here + // Add your data processing logic here System.out.println("data: " + JSON.toJSONString(bean)); } } - - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to poll data, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to poll data", ex); } catch (Exception ex) { - System.out.println("Failed to poll data, ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to poll data", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to poll data, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: poll_data_code_piece } public static void seekExample(TaosConsumer consumer) throws SQLException { // ANCHOR: consumer_seek + List topics = Collections.singletonList("topic_meters"); try { - List topics = Collections.singletonList("topic_meters"); - // subscribe to the topics consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); @@ -100,13 +109,17 @@ public class ConsumerLoopFull { consumer.seekToBeginning(assignment); System.out.println("Assignment seek to beginning successfully."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Seek example failed; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("seek example failed", ex); } catch (Exception ex) { - System.out.println("Seek example failed; ErrMessage: " + ex.getMessage()); - throw new SQLException("seek example failed", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to seek offset, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: consumer_seek } @@ -114,15 +127,14 @@ public class ConsumerLoopFull { public static void commitExample(TaosConsumer consumer) throws SQLException { // ANCHOR: commit_code_piece + List topics = Collections.singletonList("topic_meters"); try { - List topics = Collections.singletonList("topic_meters"); - consumer.subscribe(topics); for (int i = 0; i < 50; i++) { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); for (ConsumerRecord record : records) { ResultBean bean = record.value(); - // process your data here + // Add your data processing logic here System.out.println("data: " + JSON.toJSONString(bean)); } if (!records.isEmpty()) { @@ -131,13 +143,17 @@ public class ConsumerLoopFull { System.out.println("Commit offset manually successfully."); } } - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to execute consumer functions. ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to execute consumer functions", ex); } catch (Exception ex) { - System.out.println("Failed to execute consumer functions. ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to execute consumer functions", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to commit offset, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: commit_code_piece } @@ -150,13 +166,17 @@ public class ConsumerLoopFull { // unsubscribe the consumer consumer.unsubscribe(); System.out.println("Consumer unsubscribed successfully."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to unsubscribe consumer. ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to unsubscribe consumer", ex); } catch (Exception ex) { - System.out.println("Failed to unsubscribe consumer. ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to unsubscribe consumer", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to unsubscribe consumer, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } finally { // close the consumer @@ -238,9 +258,14 @@ public class ConsumerLoopFull { i++; Thread.sleep(1); } - } catch (SQLException ex) { - System.out.println("Failed to insert data to power.meters, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to insert data to power.meters", ex); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data to power.meters, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } } @@ -250,9 +275,14 @@ public class ConsumerLoopFull { statement.executeUpdate("USE power"); statement.executeUpdate("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); statement.executeUpdate("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters"); - } catch (SQLException ex) { - System.out.println("Failed to create db and table, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create db and table", ex); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create db and table, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } } @@ -311,7 +341,7 @@ public class ConsumerLoopFull { try { prepareData(); } catch (SQLException ex) { - System.out.println("Failed to prepare data, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + System.out.println("Failed to prepare data, ErrCode:" + ex.getErrorCode() + ", ErrMessage: " + ex.getMessage()); return; } catch (Exception ex) { System.out.println("Failed to prepare data, ErrMessage: " + ex.getMessage()); diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopImp.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java similarity index 98% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopImp.java rename to docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java index 84d84f062b..a59bfc282f 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopImp.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java @@ -1,4 +1,4 @@ -package com.taosdata.example; +package com.taos.example; import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerOffsetSeek.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerOffsetSeek.java similarity index 88% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerOffsetSeek.java rename to docs/examples/java/src/main/java/com/taos/example/ConsumerOffsetSeek.java index 73901aba49..5903cabd3c 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerOffsetSeek.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerOffsetSeek.java @@ -1,20 +1,14 @@ -package com.taosdata.example; +package com.taos.example; -import com.alibaba.fastjson.JSON; -import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.tmq.ConsumerRecords; import com.taosdata.jdbc.tmq.TaosConsumer; import com.taosdata.jdbc.tmq.TopicPartition; -import java.sql.Connection; -import java.sql.DriverManager; import java.sql.SQLException; -import java.sql.Statement; import java.time.Duration; import java.util.Collections; import java.util.Map; import java.util.Properties; -import java.util.Scanner; public class ConsumerOffsetSeek { @@ -31,7 +25,7 @@ public class ConsumerOffsetSeek { config.setProperty("auto.commit.interval.ms", "1000"); config.setProperty("group.id", "group1"); config.setProperty("client.id", "1"); - config.setProperty("value.deserializer", "com.taosdata.example.AbsConsumerLoop$ResultDeserializer"); + config.setProperty("value.deserializer", "com.taos.example.AbsConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); // ANCHOR: consumer_seek diff --git a/docs/examples/java/src/main/java/com/taos/example/DruidDemo.java b/docs/examples/java/src/main/java/com/taos/example/DruidDemo.java new file mode 100644 index 0000000000..a366efd419 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/DruidDemo.java @@ -0,0 +1,36 @@ +package com.taosdata.example; + +import com.alibaba.druid.pool.DruidDataSource; + +import java.sql.Connection; +import java.sql.Statement; + +public class DruidDemo { + // ANCHOR: connection_pool + public static void main(String[] args) throws Exception { + String url = "jdbc:TAOS://127.0.0.1:6030/log"; + + DruidDataSource dataSource = new DruidDataSource(); + // jdbc properties + dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + dataSource.setUrl(url); + dataSource.setUsername("root"); + dataSource.setPassword("taosdata"); + // pool configurations + dataSource.setInitialSize(10); + dataSource.setMinIdle(10); + dataSource.setMaxActive(10); + dataSource.setMaxWait(30000); + dataSource.setValidationQuery("SELECT SERVER_VERSION()"); + + Connection connection = dataSource.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + // query or insert + // ... + + statement.close(); + connection.close(); // put back to connection pool + dataSource.close(); + } + // ANCHOR_END: connection_pool +} diff --git a/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java b/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java new file mode 100644 index 0000000000..036125e7ea --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java @@ -0,0 +1,190 @@ +package com.taos.example; + +import com.taosdata.jdbc.TSDBPreparedStatement; +import org.locationtech.jts.geom.*; +import org.locationtech.jts.io.ByteOrderValues; +import org.locationtech.jts.io.ParseException; +import org.locationtech.jts.io.WKBReader; +import org.locationtech.jts.io.WKBWriter; + +import java.sql.*; +import java.util.ArrayList; +import java.util.Properties; + +public class GeometryDemo { + private static String host = "localhost"; + private static final String dbName = "test"; + private static final String tbName = "weather"; + private static final String user = "root"; + private static final String password = "taosdata"; + + private Connection connection; + + public static void main(String[] args) throws SQLException { + for (int i = 0; i < args.length; i++) { + if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1) + host = args[++i]; + } + if (host == null) { + printHelp(); + } + GeometryDemo demo = new GeometryDemo(); + demo.init(); + demo.createDatabase(); + demo.useDatabase(); + demo.dropTable(); + demo.createTable(); + + demo.insert(); + demo.stmtInsert(); + demo.select(); + + demo.dropTable(); + demo.close(); + } + + private void init() { + final String url = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; + // get connection + try { + Properties properties = new Properties(); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); + System.out.println("get connection starting..."); + connection = DriverManager.getConnection(url, properties); + if (connection != null) + System.out.println("[ OK ] Connection established."); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void createDatabase() { + String sql = "create database if not exists " + dbName; + execute(sql); + } + + private void useDatabase() { + String sql = "use " + dbName; + execute(sql); + } + + private void dropTable() { + final String sql = "drop table if exists " + dbName + "." + tbName + ""; + execute(sql); + } + + private void createTable() { + final String sql = "create table if not exists " + dbName + "." + tbName + " (ts timestamp, temperature float, humidity int, location geometry(50))"; + execute(sql); + } + + private void insert() { + final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity, location) values(now, 20.5, 34, 'POINT(1 2)')"; + execute(sql); + } + + private void stmtInsert() throws SQLException { + TSDBPreparedStatement preparedStatement = (TSDBPreparedStatement) connection.prepareStatement("insert into " + dbName + "." + tbName + " values (?, ?, ?, ?)"); + + long current = System.currentTimeMillis(); + ArrayList tsList = new ArrayList<>(); + tsList.add(current); + tsList.add(current + 1); + preparedStatement.setTimestamp(0, tsList); + ArrayList tempList = new ArrayList<>(); + tempList.add(20.1F); + tempList.add(21.2F); + preparedStatement.setFloat(1, tempList); + ArrayList humList = new ArrayList<>(); + humList.add(30); + humList.add(31); + preparedStatement.setInt(2, humList); + + + ArrayList list = new ArrayList<>(); + GeometryFactory gf = new GeometryFactory(); + Point p1 = gf.createPoint(new Coordinate(1,2)); + p1.setSRID(1234); + + // NOTE: TDengine current version only support 2D dimension and little endian byte order + WKBWriter w = new WKBWriter(2, ByteOrderValues.LITTLE_ENDIAN, true); + byte[] wkb = w.write(p1); + list.add(wkb); + + Coordinate[] coordinates = { new Coordinate(10, 20), + new Coordinate(30, 40)}; + LineString lineString = gf.createLineString(coordinates); + lineString.setSRID(2345); + byte[] wkb2 = w.write(lineString); + list.add(wkb2); + + preparedStatement.setGeometry(3, list, 50); + + preparedStatement.columnDataAddBatch(); + preparedStatement.columnDataExecuteBatch(); + } + + private void select() { + final String sql = "select * from " + dbName + "." + tbName; + executeQuery(sql); + } + + private void close() { + try { + if (connection != null) { + this.connection.close(); + System.out.println("connection closed."); + } + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void executeQuery(String sql) { + long start = System.currentTimeMillis(); + try (Statement statement = connection.createStatement()) { + ResultSet resultSet = statement.executeQuery(sql); + long end = System.currentTimeMillis(); + printSql(sql, true, (end - start)); + + while (resultSet.next()){ + byte[] result1 = resultSet.getBytes(4); + WKBReader reader = new WKBReader(); + Geometry g1 = reader.read(result1); + System.out.println("GEO OBJ: " + g1 + ", SRID: " + g1.getSRID()); + } + + } catch (SQLException e) { + long end = System.currentTimeMillis(); + printSql(sql, false, (end - start)); + e.printStackTrace(); + } catch (ParseException e) { + throw new RuntimeException(e); + } + } + + private void printSql(String sql, boolean succeed, long cost) { + System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql); + } + + private void execute(String sql) { + long start = System.currentTimeMillis(); + try (Statement statement = connection.createStatement()) { + boolean execute = statement.execute(sql); + long end = System.currentTimeMillis(); + printSql(sql, true, (end - start)); + } catch (SQLException e) { + long end = System.currentTimeMillis(); + printSql(sql, false, (end - start)); + e.printStackTrace(); + } + } + + private static void printHelp() { + System.out.println("Usage: java -jar JDBCDemo.jar -host "); + System.exit(0); + } + +} diff --git a/docs/examples/java/src/main/java/com/taos/example/HikariDemo.java b/docs/examples/java/src/main/java/com/taos/example/HikariDemo.java new file mode 100644 index 0000000000..50b20fdb0c --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/HikariDemo.java @@ -0,0 +1,37 @@ +package com.taosdata.example; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; + +import java.sql.Connection; +import java.sql.Statement; + +public class HikariDemo { + // ANCHOR: connection_pool + public static void main(String[] args) throws Exception { + HikariConfig config = new HikariConfig(); + // jdbc properties + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + // connection pool configurations + config.setMinimumIdle(10); // minimum number of idle connection + config.setMaximumPoolSize(10); // maximum number of connection in the pool + config.setConnectionTimeout(30000); // maximum wait milliseconds for get connection from pool + config.setMaxLifetime(0); // maximum life time for each connection + config.setIdleTimeout(0); // max idle time for recycle idle connection + config.setConnectionTestQuery("SELECT SERVER_VERSION()"); // validation query + + HikariDataSource dataSource = new HikariDataSource(config); // create datasource + + Connection connection = dataSource.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + // query or insert + // ... + statement.close(); + connection.close(); // put back to connection pool + dataSource.close(); + } + // ANCHOR_END: connection_pool +} diff --git a/docs/examples/java/src/main/java/com/taos/example/JNIConnectExample.java b/docs/examples/java/src/main/java/com/taos/example/JNIConnectExample.java index 42ac7bde85..4950654cf6 100644 --- a/docs/examples/java/src/main/java/com/taos/example/JNIConnectExample.java +++ b/docs/examples/java/src/main/java/com/taos/example/JNIConnectExample.java @@ -8,31 +8,33 @@ import java.util.Properties; import com.taosdata.jdbc.TSDBDriver; public class JNIConnectExample { -// ANCHOR: main -public static void main(String[] args) throws SQLException { - // use - // String jdbcUrl = "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata"; - // if you want to connect a specified database named "dbName". - String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; - Properties connProps = new Properties(); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + // ANCHOR: main + public static void main(String[] args) throws Exception { + // use + // String jdbcUrl = + // "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata"; + // if you want to connect a specified database named "dbName". + String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - try (Connection conn = DriverManager.getConnection(jdbcUrl, connProps)) { - System.out.println("Connected to " + jdbcUrl + " successfully."); + try (Connection conn = DriverManager.getConnection(jdbcUrl, connProps)) { + System.out.println("Connected to " + jdbcUrl + " successfully."); - // you can use the connection for execute SQL here + // you can use the connection for execute SQL here - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to connect to " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to connect to " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); - throw ex; + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to connect to %s, %sErrMessage: %s%n", + jdbcUrl, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } } + // ANCHOR_END: main } -// ANCHOR_END: main -} - diff --git a/docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java b/docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java new file mode 100644 index 0000000000..f9b30f52d9 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java @@ -0,0 +1,125 @@ +package com.taos.example; + +import com.taosdata.jdbc.AbstractStatement; + +import java.sql.*; +import java.util.Properties; + +public class JdbcBasicDemo { + private static final String host = "localhost"; + private static final String dbName = "test"; + private static final String tbName = "weather"; + private static final String user = "root"; + private static final String password = "taosdata"; + + + public static void main(String[] args) throws SQLException { + + final String url = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; + +// get connection + Properties properties = new Properties(); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); + System.out.println("get connection starting..."); + try (Connection connection = DriverManager.getConnection(url, properties)) { + + + if (connection != null) { + System.out.println("[ OK ] Connection established."); + } else { + System.out.println("[ ERR ] Connection can not be established."); + return; + } + + Statement stmt = connection.createStatement(); + +// ANCHOR: create_db_and_table +// create database + stmt.executeUpdate("CREATE DATABASE IF NOT EXISTS power"); + +// use database + stmt.executeUpdate("USE power"); + +// create table + stmt.executeUpdate("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); +// ANCHOR_END: create_db_and_table + +// ANCHOR: insert_data +// insert data + String insertQuery = "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 219, 0.31000) " + + "(NOW + 2a, 12.60000, 218, 0.33000) " + + "(NOW + 3a, 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 218, 0.25000) "; + int affectedRows = stmt.executeUpdate(insertQuery); + System.out.println("insert " + affectedRows + " rows."); +// ANCHOR_END: insert_data + + +// ANCHOR: query_data +// query data + ResultSet resultSet = stmt.executeQuery("SELECT * FROM meters"); + + Timestamp ts; + float current; + String location; + while (resultSet.next()) { + ts = resultSet.getTimestamp(1); + current = resultSet.getFloat(2); + location = resultSet.getString("location"); + + System.out.printf("%s, %f, %s\n", ts, current, location); + } +// ANCHOR_END: query_data + +// ANCHOR: with_reqid + AbstractStatement aStmt = (AbstractStatement) connection.createStatement(); + aStmt.execute("CREATE DATABASE IF NOT EXISTS power", 1L); + aStmt.executeUpdate("USE power", 2L); + try (ResultSet rs = aStmt.executeQuery("SELECT * FROM meters limit 1", 3L)) { + while (rs.next()) { + Timestamp timestamp = rs.getTimestamp(1); + System.out.println("timestamp = " + timestamp); + } + } + aStmt.close(); +// ANCHOR_END: with_reqid + + + String sql = "SELECT * FROM meters limit 2;"; + +// ANCHOR: jdbc_exception + try (Statement statement = connection.createStatement(); + // executeQuery + ResultSet tempResultSet = statement.executeQuery(sql)) { + + // print result + printResult(tempResultSet); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to execute statement, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: jdbc_exception + } catch (SQLException ex) { + // handle any errors, please refer to the JDBC specifications for detailed exceptions info + System.out.println("Error Code: " + ex.getErrorCode()); + System.out.println("Message: " + ex.getMessage()); + } + } + + private static void printResult(ResultSet resultSet) throws SQLException { + Util.printResult(resultSet); + } + +} diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcCreatDBDemo.java b/docs/examples/java/src/main/java/com/taos/example/JdbcCreatDBDemo.java similarity index 77% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcCreatDBDemo.java rename to docs/examples/java/src/main/java/com/taos/example/JdbcCreatDBDemo.java index ca50c4ca77..0293f15b53 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcCreatDBDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/JdbcCreatDBDemo.java @@ -1,6 +1,4 @@ -package com.taosdata.example; - -import com.taosdata.jdbc.AbstractStatement; +package com.taos.example; import java.sql.*; import java.util.Properties; @@ -35,13 +33,13 @@ public class JdbcCreatDBDemo { rowsAffected = stmt.executeUpdate("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); // you can check rowsAffected here System.out.println("Create stable power.meters successfully, rowsAffected: " + rowsAffected); - - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to create db and table, url:" + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to create db and table, url:" + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create database power or stable meters, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } // ANCHOR_END: create_db_and_table diff --git a/docs/examples/java/src/main/java/com/taos/example/JdbcInsertDataDemo.java b/docs/examples/java/src/main/java/com/taos/example/JdbcInsertDataDemo.java new file mode 100644 index 0000000000..5c3599d819 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/JdbcInsertDataDemo.java @@ -0,0 +1,56 @@ +package com.taos.example; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +public class JdbcInsertDataDemo { + private static final String host = "localhost"; + private static final String dbName = "test"; + private static final String tbName = "weather"; + private static final String user = "root"; + private static final String password = "taosdata"; + + + public static void main(String[] args) throws SQLException { + + final String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; + +// get connection + Properties properties = new Properties(); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); + System.out.println("get connection starting..."); +// ANCHOR: insert_data + // insert data, please make sure the database and table are created before + String insertQuery = "INSERT INTO " + + "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 219, 0.31000) " + + "(NOW + 2a, 12.60000, 218, 0.33000) " + + "(NOW + 3a, 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + + "VALUES " + + "(NOW + 1a, 10.30000, 218, 0.25000) "; + try (Connection connection = DriverManager.getConnection(jdbcUrl, properties); + Statement stmt = connection.createStatement()) { + + int affectedRows = stmt.executeUpdate(insertQuery); + // you can check affectedRows here + System.out.println("Successfully inserted " + affectedRows + " rows to power.meters."); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data to power.meters, sql: %s, %sErrMessage: %s%n", + insertQuery, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } +// ANCHOR_END: insert_data + } +} diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcQueryDemo.java b/docs/examples/java/src/main/java/com/taos/example/JdbcQueryDemo.java similarity index 66% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcQueryDemo.java rename to docs/examples/java/src/main/java/com/taos/example/JdbcQueryDemo.java index ea6d8346c6..4b28a7de28 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcQueryDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/JdbcQueryDemo.java @@ -1,6 +1,4 @@ -package com.taosdata.example; - -import com.taosdata.jdbc.AbstractStatement; +package com.taos.example; import java.sql.*; import java.util.Properties; @@ -24,10 +22,11 @@ public class JdbcQueryDemo { properties.setProperty("timezone", "UTC-8"); System.out.println("get connection starting..."); // ANCHOR: query_data + String sql = "SELECT ts, current, location FROM power.meters limit 100"; try (Connection connection = DriverManager.getConnection(jdbcUrl, properties); Statement stmt = connection.createStatement(); // query data, make sure the database and table are created before - ResultSet resultSet = stmt.executeQuery("SELECT ts, current, location FROM power.meters limit 100")) { + ResultSet resultSet = stmt.executeQuery(sql)) { Timestamp ts; float current; @@ -41,19 +40,16 @@ public class JdbcQueryDemo { // you can check data here System.out.printf("ts: %s, current: %f, location: %s %n", ts, current, location); } - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to query data from power.meters, url:" + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to query data from power.meters, url:" + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to query data from power.meters, sql: %s, %sErrMessage: %s%n", + sql, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } // ANCHOR_END: query_data } - - private static void printResult(ResultSet resultSet) throws SQLException { - Util.printResult(resultSet); - } - } diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcReqIdDemo.java b/docs/examples/java/src/main/java/com/taos/example/JdbcReqIdDemo.java similarity index 79% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcReqIdDemo.java rename to docs/examples/java/src/main/java/com/taos/example/JdbcReqIdDemo.java index 7dd99c58dc..ee0ff45962 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcReqIdDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/JdbcReqIdDemo.java @@ -1,4 +1,4 @@ -package com.taosdata.example; +package com.taos.example; import com.taosdata.jdbc.AbstractStatement; @@ -45,12 +45,13 @@ public class JdbcReqIdDemo { } } - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to execute sql with reqId: " + reqId + ", url:" + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to execute sql with reqId: " + reqId + ", url:" + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to execute sql with reqId: %s, %sErrMessage: %s%n", reqId, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } // ANCHOR_END: with_reqid diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java b/docs/examples/java/src/main/java/com/taos/example/ParameterBindingBasicDemo.java similarity index 86% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java rename to docs/examples/java/src/main/java/com/taos/example/ParameterBindingBasicDemo.java index ab93e324ea..8a8019e54e 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/ParameterBindingBasicDemo.java @@ -1,4 +1,4 @@ -package com.taosdata.example; +package com.taos.example; import com.taosdata.jdbc.TSDBPreparedStatement; @@ -68,12 +68,13 @@ public class ParameterBindingBasicDemo { // you can check exeResult here System.out.println("Successfully inserted " + (numOfSubTable * numOfRow) + " rows to power.meters."); } - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert to table meters using stmt, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } } diff --git a/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java b/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java new file mode 100644 index 0000000000..5eb0cf0a61 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java @@ -0,0 +1,325 @@ +package com.taos.example; + +import com.taosdata.jdbc.TSDBPreparedStatement; +import com.taosdata.jdbc.utils.StringUtils; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +// ANCHOR: para_bind +public class ParameterBindingFullDemo { + + private static final String host = "127.0.0.1"; + private static final Random random = new Random(System.currentTimeMillis()); + private static final int BINARY_COLUMN_SIZE = 50; + private static final String[] schemaList = { + "create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)", + "create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)", + "create table stable3(ts timestamp, f1 bool) tags(t1 bool)", + "create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))", + "create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))", + "create table stable6(ts timestamp, f1 varbinary(" + BINARY_COLUMN_SIZE + ")) tags(t1 varbinary(" + BINARY_COLUMN_SIZE + "))", + "create table stable7(ts timestamp, f1 geometry(" + BINARY_COLUMN_SIZE + ")) tags(t1 geometry(" + BINARY_COLUMN_SIZE + "))", + }; + private static final int numOfSubTable = 10, numOfRow = 10; + + public static void main(String[] args) throws SQLException { + + String jdbcUrl = "jdbc:TAOS://" + host + ":6030/"; + try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) { + + init(conn); + + bindInteger(conn); + bindFloat(conn); + bindBoolean(conn); + bindBytes(conn); + bindString(conn); + bindVarbinary(conn); + bindGeometry(conn); + + clean(conn); + } catch (SQLException ex) { + // handle any errors, please refer to the JDBC specifications for detailed exceptions info + System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + throw ex; + } catch (Exception ex){ + System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + throw ex; + } + } + + private static void init(Connection conn) throws SQLException { + clean(conn); + try (Statement stmt = conn.createStatement()) { + stmt.execute("create database if not exists test_parabind"); + stmt.execute("use test_parabind"); + for (int i = 0; i < schemaList.length; i++) { + stmt.execute(schemaList[i]); + } + } + } + private static void clean(Connection conn) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_parabind"); + } + } + + private static void bindInteger(Connection conn) throws SQLException { + String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t1_" + i); + // set tags + pstmt.setTagByte(0, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setTagShort(1, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setTagInt(2, random.nextInt(Integer.MAX_VALUE)); + pstmt.setTagLong(3, random.nextLong()); + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f1List.add(Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setByte(1, f1List); + + ArrayList f2List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f2List.add(Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setShort(2, f2List); + + ArrayList f3List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f3List.add(random.nextInt(Integer.MAX_VALUE)); + pstmt.setInt(3, f3List); + + ArrayList f4List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f4List.add(random.nextLong()); + pstmt.setLong(4, f4List); + + // add column + pstmt.columnDataAddBatch(); + } + // execute column + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindFloat(Connection conn) throws SQLException { + String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)"; + + TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class); + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t2_" + i); + // set tags + pstmt.setTagFloat(0, random.nextFloat()); + pstmt.setTagDouble(1, random.nextDouble()); + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f1List.add(random.nextFloat()); + pstmt.setFloat(1, f1List); + + ArrayList f2List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f2List.add(random.nextDouble()); + pstmt.setDouble(2, f2List); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + // close if no try-with-catch statement is used + pstmt.close(); + } + + private static void bindBoolean(Connection conn) throws SQLException { + String sql = "insert into ? using stable3 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t3_" + i); + // set tags + pstmt.setTagBoolean(0, random.nextBoolean()); + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) + f1List.add(random.nextBoolean()); + pstmt.setBoolean(1, f1List); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindBytes(Connection conn) throws SQLException { + String sql = "insert into ? using stable4 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t4_" + i); + // set tags + pstmt.setTagString(0, new String("abc")); + + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) { + f1List.add(new String("abc")); + } + pstmt.setString(1, f1List, BINARY_COLUMN_SIZE); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindString(Connection conn) throws SQLException { + String sql = "insert into ? using stable5 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t5_" + i); + // set tags + pstmt.setTagNString(0, "California.SanFrancisco"); + + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) { + f1List.add("California.LosAngeles"); + } + pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindVarbinary(Connection conn) throws SQLException { + String sql = "insert into ? using stable6 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t6_" + i); + // set tags + byte[] bTag = new byte[]{0,2,3,4,5}; + bTag[0] = (byte) i; + pstmt.setTagVarbinary(0, bTag); + + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) { + byte[] v = new byte[]{0,2,3,4,5,6}; + v[0] = (byte)j; + f1List.add(v); + } + pstmt.setVarbinary(1, f1List, BINARY_COLUMN_SIZE); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } + + private static void bindGeometry(Connection conn) throws SQLException { + String sql = "insert into ? using stable7 tags(?) values(?,?)"; + + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + + byte[] g1 = StringUtils.hexToBytes("0101000000000000000000F03F0000000000000040"); + byte[] g2 = StringUtils.hexToBytes("0102000020E610000002000000000000000000F03F000000000000004000000000000008400000000000001040"); + List listGeo = new ArrayList<>(); + listGeo.add(g1); + listGeo.add(g2); + + for (int i = 1; i <= 2; i++) { + // set table name + pstmt.setTableName("t7_" + i); + // set tags + pstmt.setTagGeometry(0, listGeo.get(i - 1)); + + // set columns + ArrayList tsList = new ArrayList<>(); + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) + tsList.add(current + j); + pstmt.setTimestamp(0, tsList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < numOfRow; j++) { + f1List.add(listGeo.get(i - 1)); + } + pstmt.setGeometry(1, f1List, BINARY_COLUMN_SIZE); + + // add column + pstmt.columnDataAddBatch(); + } + // execute + pstmt.columnDataExecuteBatch(); + } + } +} +// ANCHOR_END: para_bind diff --git a/docs/examples/java/src/main/java/com/taos/example/RESTConnectExample.java b/docs/examples/java/src/main/java/com/taos/example/RESTConnectExample.java index b1ec31ee86..441d478b07 100644 --- a/docs/examples/java/src/main/java/com/taos/example/RESTConnectExample.java +++ b/docs/examples/java/src/main/java/com/taos/example/RESTConnectExample.java @@ -5,22 +5,24 @@ import java.sql.DriverManager; import java.sql.SQLException; public class RESTConnectExample { -// ANCHOR: main -public static void main(String[] args) throws SQLException { - String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata"; - try (Connection conn = DriverManager.getConnection(jdbcUrl)){ - System.out.println("Connected to " + jdbcUrl + " successfully."); + // ANCHOR: main + public static void main(String[] args) throws Exception { + String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata"; + try (Connection conn = DriverManager.getConnection(jdbcUrl)) { + System.out.println("Connected to " + jdbcUrl + " successfully."); - // you can use the connection for execute SQL here + // you can use the connection for execute SQL here - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to connect to " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to connect to " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); - throw ex; + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to connect to %s, %sErrMessage: %s%n", + jdbcUrl, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; + } } + // ANCHOR_END: main } -// ANCHOR_END: main -} \ No newline at end of file diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java b/docs/examples/java/src/main/java/com/taos/example/SchemalessJniTest.java similarity index 79% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java rename to docs/examples/java/src/main/java/com/taos/example/SchemalessJniTest.java index d1f6eb3cea..818dda90d8 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java +++ b/docs/examples/java/src/main/java/com/taos/example/SchemalessJniTest.java @@ -1,4 +1,4 @@ -package com.taosdata.example; +package com.taos.example; import com.taosdata.jdbc.AbstractConnection; import com.taosdata.jdbc.enums.SchemalessProtocolType; @@ -26,12 +26,13 @@ public class SchemalessJniTest { conn.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); conn.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED); System.out.println("Inserted data with schemaless successfully."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to insert data with schemaless, host:" + host + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to insert data with schemaless, host:" + host + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data with schemaless, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } } diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java b/docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java similarity index 79% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java rename to docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java index 6432c8100e..08f66c2227 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java +++ b/docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java @@ -1,4 +1,4 @@ -package com.taosdata.example; +package com.taos.example; import com.taosdata.jdbc.AbstractConnection; import com.taosdata.jdbc.enums.SchemalessProtocolType; @@ -26,12 +26,13 @@ public class SchemalessWsTest { conn.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); conn.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS); System.out.println("Inserted data with schemaless successfully."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to insert data with schemaless, host:" + host + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to insert data with schemaless, host:" + host + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data with schemaless, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } } diff --git a/docs/examples/java/src/main/java/com/taos/example/Util.java b/docs/examples/java/src/main/java/com/taos/example/Util.java new file mode 100644 index 0000000000..79a1d8628b --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/Util.java @@ -0,0 +1,25 @@ +package com.taos.example; + +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; + +public class Util { + public static void printResult(ResultSet resultSet) throws SQLException { + ResultSetMetaData metaData = resultSet.getMetaData(); + for (int i = 1; i <= metaData.getColumnCount(); i++) { + String columnLabel = metaData.getColumnLabel(i); + System.out.printf(" %s |", columnLabel); + } + System.out.println(); + System.out.println("-------------------------------------------------------------"); + while (resultSet.next()) { + for (int i = 1; i <= metaData.getColumnCount(); i++) { + String value = resultSet.getString(i); + System.out.printf("%s, ", value); + } + System.out.println(); + } + } + +} diff --git a/docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java b/docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java index 21f184b45a..afe74ace83 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java +++ b/docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java @@ -9,7 +9,7 @@ import java.util.Properties; public class WSConnectExample { // ANCHOR: main - public static void main(String[] args) throws SQLException { + public static void main(String[] args) throws Exception { // use // String jdbcUrl = // "jdbc:TAOS-RS://localhost:6041/dbName?user=root&password=taosdata&batchfetch=true"; @@ -25,14 +25,14 @@ public class WSConnectExample { // you can use the connection for execute SQL here - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed - // exceptions info - System.out.println("Failed to connect to " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " - + ex.getMessage()); - throw ex; } catch (Exception ex) { - System.out.println("Failed to connect to " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to connect to %s, %sErrMessage: %s%n", + jdbcUrl, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } } diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java similarity index 79% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java rename to docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java index 29013014b9..eab8df06b9 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java +++ b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java @@ -1,10 +1,8 @@ -package com.taosdata.example; +package com.taos.example; -import com.taosdata.jdbc.TSDBPreparedStatement; import com.taosdata.jdbc.ws.TSWSPreparedStatement; import java.sql.*; -import java.util.ArrayList; import java.util.Random; // ANCHOR: para_bind @@ -47,12 +45,13 @@ public class WSParameterBindingBasicDemo { System.out.println("Successfully inserted " + exeResult.length + " rows to power.meters."); } } - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert to table meters using stmt, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); throw ex; } } diff --git a/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java new file mode 100644 index 0000000000..ec94f2ded6 --- /dev/null +++ b/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java @@ -0,0 +1,180 @@ +package com.taos.example; + +import com.taosdata.jdbc.ws.TSWSPreparedStatement; + +import java.sql.*; +import java.util.Random; + +// ANCHOR: para_bind +public class WSParameterBindingFullDemo { + private static final String host = "127.0.0.1"; + private static final Random random = new Random(System.currentTimeMillis()); + private static final int BINARY_COLUMN_SIZE = 30; + private static final String[] schemaList = { + "create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)", + "create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)", + "create table stable3(ts timestamp, f1 bool) tags(t1 bool)", + "create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))", + "create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))" + }; + private static final int numOfSubTable = 10, numOfRow = 10; + + public static void main(String[] args) throws SQLException { + + String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true"; + + try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) { + + init(conn); + + bindInteger(conn); + + bindFloat(conn); + + bindBoolean(conn); + + bindBytes(conn); + + bindString(conn); + + } catch (SQLException ex) { + // handle any errors, please refer to the JDBC specifications for detailed exceptions info + System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + throw ex; + } catch (Exception ex){ + System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrMessage: " + ex.getMessage()); + throw ex; + } + } + + private static void init(Connection conn) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_ws_parabind"); + stmt.execute("create database if not exists test_ws_parabind"); + stmt.execute("use test_ws_parabind"); + for (int i = 0; i < schemaList.length; i++) { + stmt.execute(schemaList[i]); + } + } + } + + private static void bindInteger(Connection conn) throws SQLException { + String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t1_" + i); + // set tags + pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE)); + pstmt.setTagLong(4, random.nextLong()); + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE)))); + pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE)))); + pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE)); + pstmt.setLong(5, random.nextLong()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindFloat(Connection conn) throws SQLException { + String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)"; + + try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t2_" + i); + // set tags + pstmt.setTagFloat(1, random.nextFloat()); + pstmt.setTagDouble(2, random.nextDouble()); + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setFloat(2, random.nextFloat()); + pstmt.setDouble(3, random.nextDouble()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindBoolean(Connection conn) throws SQLException { + String sql = "insert into ? using stable3 tags(?) values(?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t3_" + i); + // set tags + pstmt.setTagBoolean(1, random.nextBoolean()); + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setBoolean(2, random.nextBoolean()); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindBytes(Connection conn) throws SQLException { + String sql = "insert into ? using stable4 tags(?) values(?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t4_" + i); + // set tags + pstmt.setTagString(1, new String("abc")); + + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(1, new Timestamp(current + j)); + pstmt.setString(2, "abc"); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } + + private static void bindString(Connection conn) throws SQLException { + String sql = "insert into ? using stable5 tags(?) values(?,?)"; + + try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) { + + for (int i = 1; i <= numOfSubTable; i++) { + // set table name + pstmt.setTableName("t5_" + i); + // set tags + pstmt.setTagNString(1, "California.SanFrancisco"); + + // set columns + long current = System.currentTimeMillis(); + for (int j = 0; j < numOfRow; j++) { + pstmt.setTimestamp(0, new Timestamp(current + j)); + pstmt.setNString(1, "California.SanFrancisco"); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + } + } +} +// ANCHOR_END: para_bind diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java similarity index 69% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java rename to docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java index 241ab2df76..6db65f47f2 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java +++ b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java @@ -1,4 +1,4 @@ -package com.taosdata.example; +package com.taos.example; import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; @@ -6,19 +6,23 @@ import com.taosdata.jdbc.tmq.*; import java.sql.*; import java.time.Duration; -import java.util.*; +import java.util.Collections; +import java.util.List; +import java.util.Properties; +import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; // ANCHOR: consumer_demo public class WsConsumerLoopFull { static private Connection connection; static private Statement statement; static private volatile boolean stopThread = false; + static private String groupId = "group1"; + static private String clientId = "clinet1"; - public static TaosConsumer getConsumer() throws SQLException { + public static TaosConsumer getConsumer() throws Exception { // ANCHOR: create_consumer Properties config = new Properties(); config.setProperty("td.connect.type", "ws"); @@ -28,33 +32,38 @@ public class WsConsumerLoopFull { config.setProperty("enable.auto.commit", "true"); config.setProperty("auto.commit.interval.ms", "1000"); config.setProperty("group.id", "group1"); - config.setProperty("client.id", "1"); + config.setProperty("client.id", "clinet1"); config.setProperty("td.connect.user", "root"); config.setProperty("td.connect.pass", "taosdata"); - config.setProperty("value.deserializer", "com.taosdata.example.WsConsumerLoopFull$ResultDeserializer"); + config.setProperty("value.deserializer", "com.taos.example.WsConsumerLoopFull$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); try { TaosConsumer consumer= new TaosConsumer<>(config); - System.out.println("Create consumer successfully, host: " + config.getProperty("bootstrap.servers") + ", groupId: " + config.getProperty("group.id") + ", clientId: " + config.getProperty("client.id")); + System.out.printf("Create consumer successfully, host: %s, groupId: %s, clientId: %s%n", + config.getProperty("bootstrap.servers"), + config.getProperty("group.id"), + config.getProperty("client.id")); return consumer; - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to create websocket consumer, host: " + config.getProperty("bootstrap.servers") + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create consumer", ex); } catch (Exception ex) { - System.out.println("Failed to create websocket consumer, host: " + config.getProperty("bootstrap.servers") - + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create consumer", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create websocket consumer, host: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + config.getProperty("bootstrap.servers"), + config.getProperty("group.id"), + config.getProperty("client.id"), + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: create_consumer } public static void pollExample(TaosConsumer consumer) throws SQLException { // ANCHOR: poll_data_code_piece + List topics = Collections.singletonList("topic_meters"); try { - List topics = Collections.singletonList("topic_meters"); - // subscribe to the topics consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); @@ -63,27 +72,29 @@ public class WsConsumerLoopFull { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); for (ConsumerRecord record : records) { ResultBean bean = record.value(); - // process the data here + // Add your data processing logic here System.out.println("data: " + JSON.toJSONString(bean)); } } - - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to poll data, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to poll data", ex); } catch (Exception ex) { - System.out.println("Failed to poll data, ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to poll data", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to poll data, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: poll_data_code_piece } public static void seekExample(TaosConsumer consumer) throws SQLException { // ANCHOR: consumer_seek + List topics = Collections.singletonList("topic_meters"); try { - List topics = Collections.singletonList("topic_meters"); - // subscribe to the topics consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); @@ -98,13 +109,17 @@ public class WsConsumerLoopFull { consumer.seekToBeginning(assignment); System.out.println("Assignment seek to beginning successfully."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Seek example failed; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("seek example failed", ex); } catch (Exception ex) { - System.out.println("Seek example failed; ErrMessage: " + ex.getMessage()); - throw new SQLException("seek example failed", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to seek offset, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: consumer_seek } @@ -112,15 +127,14 @@ public class WsConsumerLoopFull { public static void commitExample(TaosConsumer consumer) throws SQLException { // ANCHOR: commit_code_piece + List topics = Collections.singletonList("topic_meters"); try { - List topics = Collections.singletonList("topic_meters"); - consumer.subscribe(topics); for (int i = 0; i < 50; i++) { ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); for (ConsumerRecord record : records) { ResultBean bean = record.value(); - // process your data here + // Add your data processing logic here System.out.println("data: " + JSON.toJSONString(bean)); } if (!records.isEmpty()) { @@ -129,13 +143,17 @@ public class WsConsumerLoopFull { System.out.println("Commit offset manually successfully."); } } - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to execute consumer functions. ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to execute consumer functions", ex); } catch (Exception ex) { - System.out.println("Failed to execute consumer functions. ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to execute consumer functions", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to commit offset, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } // ANCHOR_END: commit_code_piece } @@ -148,13 +166,17 @@ public class WsConsumerLoopFull { // unsubscribe the consumer consumer.unsubscribe(); System.out.println("Consumer unsubscribed successfully."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to unsubscribe consumer. ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to unsubscribe consumer", ex); } catch (Exception ex) { - System.out.println("Failed to unsubscribe consumer. ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to unsubscribe consumer", ex); + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to unsubscribe consumer, topic: %s, groupId: %s, clientId: %s, %sErrMessage: %s%n", + topics.get(0), + groupId, + clientId, + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } finally { // close the consumer @@ -236,9 +258,14 @@ public class WsConsumerLoopFull { i++; Thread.sleep(1); } - } catch (SQLException ex) { - System.out.println("Failed to insert data to power.meters, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to insert data to power.meters", ex); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to insert data to power.meters, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } } @@ -248,9 +275,14 @@ public class WsConsumerLoopFull { statement.executeUpdate("USE power"); statement.executeUpdate("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))"); statement.executeUpdate("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters"); - } catch (SQLException ex) { - System.out.println("Failed to create db and table, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw new SQLException("Failed to create db and table", ex); + } catch (Exception ex) { + // please refer to the JDBC specifications for detailed exceptions info + System.out.printf("Failed to create db and table, %sErrMessage: %s%n", + ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "", + ex.getMessage()); + // Print stack trace for context in examples. Use logging in production. + ex.printStackTrace(); + throw ex; } } @@ -309,7 +341,7 @@ public class WsConsumerLoopFull { try { prepareData(); } catch (SQLException ex) { - System.out.println("Failed to prepare data, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); + System.out.println("Failed to prepare data, ErrCode:" + ex.getErrorCode() + ", ErrMessage: " + ex.getMessage()); return; } catch (Exception ex) { System.out.println("Failed to prepare data, ErrMessage: " + ex.getMessage()); @@ -334,8 +366,7 @@ public class WsConsumerLoopFull { consumer.unsubscribe(); unsubscribeExample(consumer); - System.out.println("unsubscribeExample executed successfully."); - + System.out.println("unsubscribeExample executed successfully"); } catch (SQLException ex) { System.out.println("Failed to poll data from topic_meters, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); return; diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopImp.java b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java similarity index 93% rename from examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopImp.java rename to docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java index bf352f3b06..70e29503f8 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopImp.java +++ b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java @@ -1,9 +1,12 @@ -package com.taosdata.example; +package com.taos.example; import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; -import java.sql.*; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; import java.util.Properties; import java.util.Scanner; diff --git a/docs/examples/java/src/test/java/com/taos/test/TestAll.java b/docs/examples/java/src/test/java/com/taos/test/TestAll.java index f24156d8b1..6a45c5fd5b 100644 --- a/docs/examples/java/src/test/java/com/taos/test/TestAll.java +++ b/docs/examples/java/src/test/java/com/taos/test/TestAll.java @@ -40,12 +40,12 @@ public class TestAll { } @Test - public void testJNIConnect() throws SQLException { + public void testJNIConnect() throws Exception { JNIConnectExample.main(args); } @Test - public void testRestConnect() throws SQLException { + public void testRestConnect() throws Exception { RESTConnectExample.main(args); } diff --git a/docs/examples/node/websocketexample/line_example.js b/docs/examples/node/websocketexample/line_example.js index 7e76db2677..ac3083d358 100644 --- a/docs/examples/node/websocketexample/line_example.js +++ b/docs/examples/node/websocketexample/line_example.js @@ -28,7 +28,7 @@ async function test() { console.log("Inserted data with schemaless successfully.") } catch (err) { - console.error("Failed to insert data with schemaless, url:"+ dsn +", ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error(`Failed to insert data with schemaless, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (wsRows) { @@ -40,4 +40,4 @@ async function test() { taos.destroy(); } } -test() \ No newline at end of file +test() diff --git a/docs/examples/node/websocketexample/sql_example.js b/docs/examples/node/websocketexample/sql_example.js index e36bd037ed..5dc8ba6021 100644 --- a/docs/examples/node/websocketexample/sql_example.js +++ b/docs/examples/node/websocketexample/sql_example.js @@ -9,11 +9,11 @@ async function createConnect() { conf.setUser('root'); conf.setPwd('taosdata'); conf.setDb('power'); - conn = await taos.sqlConnect(conf); + conn = await taos.sqlConnect(conf); console.log("Connected to " + dsn + " successfully."); - return conn; + return conn; } catch (err) { - console.log("Failed to connect to " + dns + "; ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.log("Failed to connect to " + dsn + ", ErrCode: " + err.code + ", ErrMessage: " + err.message); throw err; } @@ -29,13 +29,13 @@ async function createDbAndTable() { await wsSql.exec('CREATE DATABASE IF NOT EXISTS power'); console.log("Create database power successfully."); // create table - await wsSql.exec('CREATE STABLE IF NOT EXISTS power.meters ' + - '(_ts timestamp, current float, voltage int, phase float) ' + - 'TAGS (location binary(64), groupId int);'); + await wsSql.exec('CREATE STABLE IF NOT EXISTS power.meters ' + + '(_ts timestamp, current float, voltage int, phase float) ' + + 'TAGS (location binary(64), groupId int);'); console.log("Create stable power.meters successfully"); } catch (err) { - console.error("Failed to create db and table, url:" + dns + "; ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error(`Failed to create database power or stable meters, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (wsSql) { await wsSql.close(); @@ -51,18 +51,18 @@ async function insertData() { try { wsSql = await createConnect(); let insertQuery = "INSERT INTO " + - "power.d1001 USING power.meters (location, groupId) TAGS('California.SanFrancisco', 2) " + - "VALUES " + - "(NOW + 1a, 10.30000, 219, 0.31000) " + - "(NOW + 2a, 12.60000, 218, 0.33000) " + - "(NOW + 3a, 12.30000, 221, 0.31000) " + - "power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) " + - "VALUES " + - "(NOW + 1a, 10.30000, 218, 0.25000) "; + "power.d1001 USING power.meters (location, groupId) TAGS('California.SanFrancisco', 2) " + + "VALUES " + + "(NOW + 1a, 10.30000, 219, 0.31000) " + + "(NOW + 2a, 12.60000, 218, 0.33000) " + + "(NOW + 3a, 12.30000, 221, 0.31000) " + + "power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) " + + "VALUES " + + "(NOW + 1a, 10.30000, 218, 0.25000) "; taosResult = await wsSql.exec(insertQuery); console.log("Successfully inserted " + taosResult.getAffectRows() + " rows to power.meters."); } catch (err) { - console.error("Failed to insert data to power.meters, url:" + dsn + "; ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error(`Failed to insert data to power.meters, sql: ${insertQuery}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (wsSql) { await wsSql.close(); @@ -75,21 +75,22 @@ async function insertData() { async function queryData() { let wsRows = null; let wsSql = null; + let sql = 'SELECT ts, current, location FROM power.meters limit 100'; try { wsSql = await createConnect(); - wsRows = await wsSql.query('SELECT ts, current, location FROM power.meters limit 100'); + wsRows = await wsSql.query(sql); while (await wsRows.next()) { let row = wsRows.getData(); - console.log('ts: ' + row[0] + ', current: ' + row[1] + ', location: ' + row[2]); + console.log('ts: ' + row[0] + ', current: ' + row[1] + ', location: ' + row[2]); } } catch (err) { - console.error("Failed to query data from power.meters, url:" + dsn + " ; ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error(`Failed to query data from power.meters, sql: ${sql}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (wsRows) { await wsRows.close(); - } + } if (wsSql) { await wsSql.close(); } @@ -107,16 +108,16 @@ async function sqlWithReqid() { wsRows = await wsSql.query('SELECT ts, current, location FROM power.meters limit 100', reqId); while (await wsRows.next()) { let row = wsRows.getData(); - console.log('ts: ' + row[0] + ', current: ' + row[1] + ', location: ' + row[2]); + console.log('ts: ' + row[0] + ', current: ' + row[1] + ', location: ' + row[2]); } } catch (err) { - console.error("Failed to execute sql with reqId: " + reqId + ", ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error(`Failed to query data from power.meters, reqId: ${reqId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (wsRows) { await wsRows.close(); - } + } if (wsSql) { await wsSql.close(); } @@ -129,7 +130,7 @@ async function test() { await insertData(); await queryData(); await sqlWithReqid(); - taos.destroy(); + taos.destroy(); } -test() \ No newline at end of file +test() diff --git a/docs/examples/node/websocketexample/stmt_example.js b/docs/examples/node/websocketexample/stmt_example.js index d159de6ef8..e3bb3c4dda 100644 --- a/docs/examples/node/websocketexample/stmt_example.js +++ b/docs/examples/node/websocketexample/stmt_example.js @@ -3,7 +3,7 @@ const taos = require("@tdengine/websocket"); let db = 'power'; let stable = 'meters'; let numOfSubTable = 10; -let numOfRow = 10; +let numOfRow = 10; let dsn = 'ws://localhost:6041' function getRandomInt(min, max) { min = Math.ceil(min); @@ -59,7 +59,7 @@ async function prepare() { } } catch (err) { - console.error("Failed to insert to table meters using stmt, url:" + dsn + "ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error(`Failed to insert to table meters using stmt, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (stmt) { diff --git a/docs/examples/node/websocketexample/tmq_example.js b/docs/examples/node/websocketexample/tmq_example.js index ff676fa972..5097402e6a 100644 --- a/docs/examples/node/websocketexample/tmq_example.js +++ b/docs/examples/node/websocketexample/tmq_example.js @@ -3,12 +3,16 @@ const taos = require("@tdengine/websocket"); // ANCHOR: create_consumer const db = 'power'; const stable = 'meters'; -const topics = ['power_meters_topic']; const url = 'ws://localhost:6041'; +const topic = 'topic_meters' +const topics = [topic]; +const groupId = "group1"; +const clientId = "client1"; + async function createConsumer() { let groupId = "group1"; - let clientId = "1"; + let clientId = "client1"; let configMap = new Map([ [taos.TMQConstants.GROUP_ID, groupId], [taos.TMQConstants.CLIENT_ID, clientId], @@ -23,11 +27,11 @@ async function createConsumer() { conn = await taos.tmqConnect(configMap); console.log(`Create consumer successfully, host: ${url}, groupId: ${groupId}, clientId: ${clientId}`) return conn; - }catch (err) { - console.log("Failed to create websocket consumer, ErrCode:" + err.code + "; ErrMessage: " + err.message); + } catch (err) { + console.error(`Failed to create websocket consumer, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); throw err; } - + } // ANCHOR_END: create_consumer @@ -38,7 +42,7 @@ async function prepare() { conf.setDb('power'); const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`; const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`; - + let wsSql = await taos.sqlConnect(conf); await wsSql.exec(createDB); await wsSql.exec(createStable); @@ -60,13 +64,14 @@ async function subscribe(consumer) { for (let i = 0; i < 50; i++) { let res = await consumer.poll(100); for (let [key, value] of res) { + // Add your data processing logic here console.log(`data: ${key} ${value}`); } - consumer.commit(); + await consumer.commit(); console.log("Commit offset manually successfully."); - } + } } catch (err) { - console.error("Failed to poll data; ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error(`Failed to poll data, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); throw err; } // ANCHOR_END: commit @@ -77,17 +82,18 @@ async function test() { let consumer = null; try { await prepare(); - let consumer = await createConsumer() - await subscribe(consumer) + consumer = await createConsumer() + await subscribe(consumer) await consumer.unsubscribe(); console.log("Consumer unsubscribed successfully."); } catch (err) { - console.error("Failed to unsubscribe consume, ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error(`Failed to unsubscribe consumer, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (consumer) { await consumer.close(); + console.log("Consumer closed successfully."); } taos.destroy(); } diff --git a/docs/examples/node/websocketexample/tmq_seek_example.js b/docs/examples/node/websocketexample/tmq_seek_example.js index da22217e14..b2bd569d92 100644 --- a/docs/examples/node/websocketexample/tmq_seek_example.js +++ b/docs/examples/node/websocketexample/tmq_seek_example.js @@ -2,7 +2,11 @@ const taos = require("@tdengine/websocket"); const db = 'power'; const stable = 'meters'; -const topics = ['power_meters_topic']; +const topic = 'topic_meters' +const topics = [topic]; +const groupId = "group1"; +const clientId = "client1"; + // ANCHOR: create_consumer async function createConsumer() { @@ -19,7 +23,7 @@ async function createConsumer() { try { return await taos.tmqConnect(configMap); } catch (err) { - console.log(err); + console.error(err); throw err; } @@ -31,7 +35,7 @@ async function prepare() { conf.setUser('root'); conf.setPwd('taosdata'); conf.setDb('power'); - const createDB = `CREATE DATABASE IF NOT EXISTS POWER ${db} KEEP 3650 DURATION 10 BUFFER 16 WAL_LEVEL 1;`; + const createDB = `CREATE DATABASE IF NOT EXISTS ${db} KEEP 3650 DURATION 10 BUFFER 16 WAL_LEVEL 1;`; const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`; let wsSql = await taos.sqlConnect(conf); @@ -45,7 +49,7 @@ async function prepare() { for (let i = 0; i < 10; i++) { await wsSql.exec(`INSERT INTO d1001 USING ${stable} (location, groupId) TAGS ("California.SanFrancisco", 3) VALUES (NOW, ${10 + i}, ${200 + i}, ${0.32 + i})`); } - wsSql.Close(); + await wsSql.close(); } // ANCHOR: subscribe @@ -55,11 +59,12 @@ async function subscribe(consumer) { for (let i = 0; i < 50; i++) { let res = await consumer.poll(100); for (let [key, value] of res) { + // Add your data processing logic here console.log(`data: ${key} ${value}`); } } } catch (err) { - console.error("Failed to poll data; ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error(`Failed to poll data, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); throw err; } @@ -83,7 +88,7 @@ async function test() { console.log("Assignment seek to beginning successfully"); } catch (err) { - console.error("Seek example failed, ErrCode:" + err.code + "; ErrMessage: " + err.message); + console.error(`Failed to seek offset, topic: ${topic}, groupId: ${groupId}, clientId: ${clientId}, ErrCode: ${err.code}, ErrMessage: ${err.message}`); } finally { if (consumer) { diff --git a/docs/examples/python/connect_example.py b/docs/examples/python/connect_example.py index 1f01a04e4c..ce8b306024 100644 --- a/docs/examples/python/connect_example.py +++ b/docs/examples/python/connect_example.py @@ -14,11 +14,11 @@ def create_connection(): ) print(f"Connected to {host}:{port} successfully."); except Exception as err: - print(f"Failed to connect to {host}:{port} ; ErrMessage:{err}") + print(f"Failed to connect to {host}:{port} , ErrMessage:{err}") finally: if conn: conn.close() if __name__ == "__main__": - create_connection() \ No newline at end of file + create_connection() diff --git a/docs/examples/python/connect_rest_example.py b/docs/examples/python/connect_rest_example.py index bd65ce7970..02daa60b2e 100644 --- a/docs/examples/python/connect_rest_example.py +++ b/docs/examples/python/connect_rest_example.py @@ -12,7 +12,7 @@ def create_connection(): print(f"Connected to {url} successfully."); except Exception as err: - print(f"Failed to connect to {url} ; ErrMessage:{err}") + print(f"Failed to connect to {url} , ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/connect_websocket_examples.py b/docs/examples/python/connect_websocket_examples.py index 79e96dd09c..56d208f5db 100644 --- a/docs/examples/python/connect_websocket_examples.py +++ b/docs/examples/python/connect_websocket_examples.py @@ -14,7 +14,7 @@ def create_connection(): ) print(f"Connected to {host}:{port} successfully."); except Exception as err: - print(f"Failed to connect to {host}:{port} ; ErrMessage:{err}") + print(f"Failed to connect to {host}:{port} , ErrMessage:{err}") return conn # ANCHOR_END: connect diff --git a/docs/examples/python/create_db_native.py b/docs/examples/python/create_db_native.py index 1a98a242f2..34dabfabe2 100644 --- a/docs/examples/python/create_db_native.py +++ b/docs/examples/python/create_db_native.py @@ -20,7 +20,7 @@ try: print(f"Create stable power.meters successfully, rowsAffected: {rowsAffected}"); except Exception as err: - print(f"Failed to create db and table, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to create database power or stable meters, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/create_db_rest.py b/docs/examples/python/create_db_rest.py index 2387b80195..4b98c991a3 100644 --- a/docs/examples/python/create_db_rest.py +++ b/docs/examples/python/create_db_rest.py @@ -19,7 +19,7 @@ try: print(f"Create stable power.meters successfully, rowsAffected: {rowsAffected}"); except Exception as err: - print(f"Failed to create db and table, url:{url} ; ErrMessage:{err}") + print(f"Failed to create database power or stable meters, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/create_db_ws.py b/docs/examples/python/create_db_ws.py index 7749ba617f..ddbacb4b1f 100644 --- a/docs/examples/python/create_db_ws.py +++ b/docs/examples/python/create_db_ws.py @@ -20,7 +20,7 @@ try: print(f"Create stable power.meters successfully, rowsAffected: {rowsAffected}"); except Exception as err: - print(f"Failed to create db and table, db addrr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to create database power or stable meters, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/insert_native.py b/docs/examples/python/insert_native.py index 5f1b6fbc21..19dafa3f23 100644 --- a/docs/examples/python/insert_native.py +++ b/docs/examples/python/insert_native.py @@ -21,7 +21,7 @@ try: print(f"Successfully inserted {affectedRows} rows to power.meters.") except Exception as err: - print(f"Failed to insert data to power.meters, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to insert data to power.meters, sql: {sql}, ErrMessage: {err}.") finally: if conn: conn.close() diff --git a/docs/examples/python/insert_rest.py b/docs/examples/python/insert_rest.py index d8e07323a9..526c3a6a69 100644 --- a/docs/examples/python/insert_rest.py +++ b/docs/examples/python/insert_rest.py @@ -20,7 +20,7 @@ try: print(f"Successfully inserted {affectedRows} rows to power.meters.") except Exception as err: - print(f"Failed to insert data to power.meters, url:{url} ; ErrMessage:{err}") + print(f"Failed to insert data to power.meters, sql:{sql}, ErrMessage:{err}.") finally: if conn: conn.close() diff --git a/docs/examples/python/insert_ws.py b/docs/examples/python/insert_ws.py index 0e1f74e2cf..886dda1c10 100644 --- a/docs/examples/python/insert_ws.py +++ b/docs/examples/python/insert_ws.py @@ -21,7 +21,7 @@ try: print(f"Successfully inserted {affectedRows} rows to power.meters.") except Exception as err: - print(f"Failed to insert data to power.meters, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to insert data to power.meters, sql: {sql}, ErrMessage: {err}.") finally: if conn: conn.close() diff --git a/docs/examples/python/query_native.py b/docs/examples/python/query_native.py index df19819de7..072807986e 100644 --- a/docs/examples/python/query_native.py +++ b/docs/examples/python/query_native.py @@ -7,15 +7,15 @@ try: port=port, user="root", password="taosdata") - - result = conn.query("SELECT ts, current, location FROM power.meters limit 100") + sql = "SELECT ts, current, location FROM power.meters limit 100" + result = conn.query(sql) # Get data from result as list of tuple data = result.fetch_all() for row in data: print(f"ts: {row[0]}, current: {row[1]}, location: {row[2]}") except Exception as err: - print(f"Failed to query data from power.meters, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to query data from power.meters, sql: {sql}, ErrMessage:{err}") finally: if conn: - conn.close() \ No newline at end of file + conn.close() diff --git a/docs/examples/python/query_rest.py b/docs/examples/python/query_rest.py index de16ace8be..85a70fd382 100644 --- a/docs/examples/python/query_rest.py +++ b/docs/examples/python/query_rest.py @@ -7,11 +7,11 @@ try: user="root", password="taosdata", timeout=30) - - result = client.sql(f"SELECT ts, current, location FROM power.meters limit 100") + sql = f"SELECT ts, current, location FROM power.meters limit 100" + result = client.sql(sql) if result["data"]: for row in result["data"]: print(f"ts: {row[0]}, current: {row[1]}, location: {row[2]}") except Exception as err: - print(f"Failed to query data from power.meters, url:{url} ; ErrMessage:{err}") + print(f"Failed to query data from power.meters, sql: {sql}, ErrMessage:{err}") diff --git a/docs/examples/python/query_ws.py b/docs/examples/python/query_ws.py index 8d177b167a..afab438ad9 100644 --- a/docs/examples/python/query_ws.py +++ b/docs/examples/python/query_ws.py @@ -8,13 +8,13 @@ try: password="taosdata", host=host, port=port) - - result = conn.query("SELECT ts, current, location FROM power.meters limit 100") + sql = "SELECT ts, current, location FROM power.meters limit 100" + result = conn.query(sql) for row in result: print(f"ts: {row[0]}, current: {row[1]}, location: {row[2]}") except Exception as err: - print(f"Failed to query data from power.meters, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to query data from power.meters, sql: {sql}, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/reqid_native.py b/docs/examples/python/reqid_native.py index 8f5bb3538c..7f16093835 100644 --- a/docs/examples/python/reqid_native.py +++ b/docs/examples/python/reqid_native.py @@ -17,7 +17,7 @@ try: print(f"ts: {row[0]}, current: {row[1]}, location: {row[2]}") except Exception as err: - print(f"Failed to execute sql with reqId:{reqId}, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to execute sql with reqId:{reqId}, ErrMessage:{err}") finally: if conn: diff --git a/docs/examples/python/reqid_rest.py b/docs/examples/python/reqid_rest.py index 76ba735234..570e671092 100644 --- a/docs/examples/python/reqid_rest.py +++ b/docs/examples/python/reqid_rest.py @@ -15,4 +15,4 @@ try: print(f"ts: {row[0]}, current: {row[1]}, location: {row[2]}") except Exception as err: - print(f"Failed to execute sql with reqId:{reqId}, url:{url} ; ErrMessage:{err}") + print(f"Failed to execute sql with reqId:{reqId}, ErrMessage:{err}") diff --git a/docs/examples/python/reqid_ws.py b/docs/examples/python/reqid_ws.py index 691dde710f..7c74104169 100644 --- a/docs/examples/python/reqid_ws.py +++ b/docs/examples/python/reqid_ws.py @@ -18,7 +18,7 @@ try: print(f"ts: {row[0]}, current: {row[1]}, location: {row[2]}") except Exception as err: - print(f"Failed to execute sql with reqId:{reqId}, db addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to execute sql with reqId:{reqId}, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/schemaless_native.py b/docs/examples/python/schemaless_native.py index 0cb4d1c94d..96d8f3177f 100644 --- a/docs/examples/python/schemaless_native.py +++ b/docs/examples/python/schemaless_native.py @@ -34,7 +34,7 @@ try: ) print("Inserted data with schemaless successfully."); except Exception as err: - print(f"Failed to insert data with schemaless, addr: {host}:{port} ErrMessage:{err}") + print(f"Failed to insert data with schemaless, ErrMessage:{err}") finally: if conn: conn.close() diff --git a/docs/examples/python/schemaless_ws.py b/docs/examples/python/schemaless_ws.py index f03f78b10f..39de55393d 100644 --- a/docs/examples/python/schemaless_ws.py +++ b/docs/examples/python/schemaless_ws.py @@ -1,19 +1,21 @@ import taosws +host = "localhost" +port = 6041 def prepare(): conn = None try: conn = taosws.connect(user="root", password="taosdata", - host="localhost", - port=6041) + host=host, + port=port) # create database rowsAffected = conn.execute(f"CREATE DATABASE IF NOT EXISTS power") assert rowsAffected == 0 except Exception as err: - print(f"Failed to create db and table, err:{err}") + print(f"Failed to create db and table, db addrr:{host}:{port} ; ErrMessage:{err}") raise err finally: if conn: @@ -32,8 +34,7 @@ def schemaless_insert(): jsonDemo = [ '{"metric": "metric_json","timestamp": 1626846400,"value": 10.3, "tags": {"groupid": 2, "location": "California.SanFrancisco", "id": "d1001"}}' ] - host = "localhost" - port = 6041 + try: conn = taosws.connect(user="root", password="taosdata", @@ -66,7 +67,7 @@ def schemaless_insert(): ) print("Inserted data with schemaless successfully."); except Exception as err: - print(f"Failed to insert data with schemaless, addr: {host}:{port} ErrMessage:{err}") + print(f"Failed to insert data with schemaless, ErrMessage:{err}") raise err finally: @@ -76,6 +77,6 @@ def schemaless_insert(): if __name__ == "__main__": try: prepare() - schemaless_insert + schemaless_insert() except Exception as err: - print(f"Failed to insert data with schemaless, err:{err}") \ No newline at end of file + print(f"Failed to insert data with schemaless, err:{err}") diff --git a/docs/examples/python/stmt_native.py b/docs/examples/python/stmt_native.py index ea0cf9c626..a1af7d1dd7 100644 --- a/docs/examples/python/stmt_native.py +++ b/docs/examples/python/stmt_native.py @@ -7,8 +7,8 @@ numOfRow = 10 conn = None stmt = None -host="localhost", -port=6030, +host="localhost" +port=6030 try: conn = taos.connect( user="root", @@ -56,7 +56,7 @@ try: print(f"Successfully inserted to power.meters.") except Exception as err: - print(f"Failed to insert to table meters using stmt, addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to insert to table meters using stmt, ErrMessage:{err}") finally: if stmt: stmt.close() diff --git a/docs/examples/python/stmt_ws.py b/docs/examples/python/stmt_ws.py index 8d992b32e6..45d9222315 100644 --- a/docs/examples/python/stmt_ws.py +++ b/docs/examples/python/stmt_ws.py @@ -13,8 +13,8 @@ port=6041 try: conn = taosws.connect(user="root", password="taosdata", - host="localhost", - port=6041) + host=host, + port=port) conn.execute("CREATE DATABASE IF NOT EXISTS power") conn.execute("USE power") @@ -61,7 +61,7 @@ try: print(f"Successfully inserted to power.meters.") except Exception as err: - print(f"Failed to insert to table meters using stmt, addr:{host}:{port} ; ErrMessage:{err}") + print(f"Failed to insert to table meters using stmt, ErrMessage:{err}") finally: if stmt: stmt.close() diff --git a/docs/examples/python/tmq_native.py b/docs/examples/python/tmq_native.py index 64e447384c..d4ccfda138 100644 --- a/docs/examples/python/tmq_native.py +++ b/docs/examples/python/tmq_native.py @@ -1,17 +1,24 @@ +#!/usr/bin/python3 import taos +db = "power" +topic = "topic_meters" +user = "root" +password = "taosdata" +host = "localhost" +port = 6030 +groupId = "group1" +clientId = "1" +tdConnWsScheme = "ws" +autoOffsetReset = "latest" +autoCommitState = "true" +autoCommitIntv = "1000" + + def prepareMeta(): conn = None try: - conn = taos.connect( - host="localhost", - user="root", - password="taosdata", - port=6030, - ) - - db = "power" - topic = "topic_meters" + conn = taos.connect(host=host, user=user, password=password, port=port) conn.execute(f"CREATE DATABASE IF NOT EXISTS {db}") # change database. same as execute "USE db" @@ -33,13 +40,13 @@ def prepareMeta(): power.d1001 USING power.meters (groupid, location) TAGS(2, 'California.SanFrancisco') VALUES (NOW + 1a, 10.30000, 219, 0.31000) (NOW + 2a, 12.60000, 218, 0.33000) (NOW + 3a, 12.30000, 221, 0.31000) - power.d1002 USING power.meters (groupid, location) TAGS(3, 'California.SanFrancisco') + power.d1002 USING power.meters (groupid, location) TAGS(3, 'California.SanFrancisco') VALUES (NOW + 1a, 10.30000, 218, 0.25000) """ affectedRows = conn.execute(sql) print(f"Inserted into {affectedRows} rows to power.meters successfully.") except Exception as err: - print(f"Prepare insert data error, ErrMessage:{err}") + print(f"Failed to prepareMeta, host: {host}:{port}, db: {db}, topic: {topic}, ErrMessage:{err}.") raise err finally: if conn: @@ -49,28 +56,24 @@ def prepareMeta(): from taos.tmq import Consumer def create_consumer(): - host = "localhost" - port = 6030 - groupId = "group1" - clientId = "1" try: consumer = Consumer( { "group.id": groupId, "client.id": clientId, - "td.connect.user": "root", - "td.connect.pass": "taosdata", - "enable.auto.commit": "true", - "auto.commit.interval.ms": "1000", - "auto.offset.reset": "latest", + "td.connect.user": user, + "td.connect.pass": password, + "enable.auto.commit": autoCommitState, + "auto.commit.interval.ms": autoCommitIntv, + "auto.offset.reset": autoOffsetReset, "td.connect.ip": host, - "td.connect.port": port, + "td.connect.port": str(port), } ) - print(f"Create consumer successfully, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}"); + print(f"Create consumer successfully, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}") return consumer except Exception as err: - print(f"Failed to create native consumer, host: {host}:{port} ; ErrMessage:{err}"); + print(f"Failed to create native consumer, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err # ANCHOR_END: create_consumer @@ -96,7 +99,7 @@ def subscribe(consumer): print(f"data: {data}") except Exception as err: - print(f"Failed to poll data, ErrMessage:{err}") + print(f"Failed to poll data, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err @@ -123,7 +126,7 @@ def commit_offset(consumer): print("Commit offset manually successfully."); except Exception as err: - print(f"Failed to poll data, ErrMessage:{err}") + print(f"Failed to commit offset, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err # ANCHOR_END: commit_offset @@ -136,9 +139,9 @@ def seek_offset(consumer): for partition in assignments: partition.offset = 0 consumer.seek(partition) - print(f"Assignment seek to beginning successfully"); + print(f"Assignment seek to beginning successfully.") except Exception as err: - print(f"Seek example failed; ErrMessage:{err}") + print(f"Failed to seek offset, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err # ANCHOR_END: assignment @@ -148,10 +151,11 @@ def unsubscribe(consumer): consumer.unsubscribe() print("Consumer unsubscribed successfully."); except Exception as err: - print(f"Failed to unsubscribe consumer. ErrMessage:{err}") + print(f"Failed to unsubscribe consumer. topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") finally: if consumer: - consumer.close() + consumer.close() + print("Consumer closed successfully."); # ANCHOR_END: unsubscribe if __name__ == "__main__": @@ -162,10 +166,7 @@ if __name__ == "__main__": subscribe(consumer) seek_offset(consumer) commit_offset(consumer) - consumer.unsubscribe() - print("Consumer unsubscribed successfully."); except Exception as err: - print(f"Failed to stmt consumer. ErrMessage:{err}") + print(f"Failed to execute consumer example, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") finally: - consumer.unsubscribe() - + unsubscribe(consumer); diff --git a/docs/examples/python/tmq_websocket_example.py b/docs/examples/python/tmq_websocket_example.py index aa9cbf6545..c9c7924661 100644 --- a/docs/examples/python/tmq_websocket_example.py +++ b/docs/examples/python/tmq_websocket_example.py @@ -1,18 +1,26 @@ #!/usr/bin/python3 import taosws -topic = "topic_meters" +db = "power" +topic = "topic_meters" +user = "root" +password = "taosdata" +host = "localhost" +port = 6041 +groupId = "group1" +clientId = "1" +tdConnWsScheme = "ws" +autoOffsetReset = "latest" +autoCommitState = "true" +autoCommitIntv = "1000" + def prepareMeta(): conn = None try: - conn = taosws.connect(user="root", - password="taosdata", - host="localhost", - port=6041) + conn = taosws.connect(user=user, password=password, host=host, port=port) - db = "power" # create database rowsAffected = conn.execute(f"CREATE DATABASE IF NOT EXISTS {db}") assert rowsAffected == 0 @@ -51,7 +59,7 @@ def prepareMeta(): print(f"Inserted into {affectedRows} rows to power.meters successfully.") except Exception as err: - print(f"Failed to prepareMeta ErrMessage:{err}") + print(f"Failed to prepareMeta, host: {host}:{port}, db: {db}, topic: {topic}, ErrMessage:{err}.") raise err finally: if conn: @@ -59,26 +67,22 @@ def prepareMeta(): # ANCHOR: create_consumer -def create_consumer(): - host = "localhost" - port = 6041 - groupId = "group1" - clientId = "1" +def create_consumer(): try: consumer = taosws.Consumer(conf={ - "td.connect.websocket.scheme": "ws", + "td.connect.websocket.scheme": tdConnWsScheme, "group.id": groupId, "client.id": clientId, - "auto.offset.reset": "latest", + "auto.offset.reset": autoOffsetReset, "td.connect.ip": host, "td.connect.port": port, - "enable.auto.commit": "true", - "auto.commit.interval.ms": "1000", + "enable.auto.commit": autoCommitState, + "auto.commit.interval.ms": autoCommitIntv, }) - print(f"Create consumer successfully, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}"); + print(f"Create consumer successfully, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}."); return consumer; except Exception as err: - print(f"Failed to create websocket consumer, host: {host}:{port} ; ErrMessage:{err}"); + print(f"Failed to create websocket consumer, host: {host}:{port}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}."); raise err @@ -95,10 +99,10 @@ def seek_offset(consumer): print( f"vg_id: {assign.vg_id()}, offset: {assign.offset()}, begin: {assign.begin()}, end: {assign.end()}") consumer.seek(topic, assign.vg_id(), assign.begin()) - print("Assignment seek to beginning successfully"); + print("Assignment seek to beginning successfully.") except Exception as err: - print(f"Seek example failed; ErrMessage:{err}") + print(f"Failed to seek offset, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err # ANCHOR_END: assignment @@ -116,7 +120,7 @@ def subscribe(consumer): print(f"data: {row}") except Exception as err: - print(f"Failed to poll data, ErrMessage:{err}") + print(f"Failed to poll data, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err @@ -134,10 +138,10 @@ def commit_offset(consumer): # after processing the data, commit the offset manually consumer.commit(records) - print("Commit offset manually successfully."); + print("Commit offset manually successfully.") except Exception as err: - print(f"Failed to poll data, ErrMessage:{err}") + print(f"Failed to commit offset, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") raise err @@ -150,10 +154,11 @@ def unsubscribe(consumer): consumer.unsubscribe() print("Consumer unsubscribed successfully."); except Exception as err: - print(f"Failed to unsubscribe consumer. ErrMessage:{err}") + print(f"Failed to unsubscribe consumer. topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") finally: if consumer: - consumer.close() + consumer.close() + print("Consumer closed successfully."); # ANCHOR_END: unsubscribe @@ -166,6 +171,6 @@ if __name__ == "__main__": seek_offset(consumer) commit_offset(consumer) except Exception as err: - print(f"Failed to stmt consumer. ErrorMessage:{err}") + print(f"Failed to execute consumer example, topic: {topic}, groupId: {groupId}, clientId: {clientId}, ErrMessage:{err}.") finally: - unsubscribe(consumer); \ No newline at end of file + unsubscribe(consumer) diff --git a/docs/examples/rust/nativeexample/examples/connect.rs b/docs/examples/rust/nativeexample/examples/connect.rs index 7da09ae7ec..ad2533d4c3 100644 --- a/docs/examples/rust/nativeexample/examples/connect.rs +++ b/docs/examples/rust/nativeexample/examples/connect.rs @@ -10,7 +10,7 @@ async fn main() -> anyhow::Result<()> { Ok(()) } Err(err) => { - eprintln!("Failed to connect to {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to connect to {}, ErrMessage: {}", dsn, err); return Err(err.into()); } } diff --git a/docs/examples/rust/nativeexample/examples/createdb.rs b/docs/examples/rust/nativeexample/examples/createdb.rs index d3b2f5178b..292f13de8f 100644 --- a/docs/examples/rust/nativeexample/examples/createdb.rs +++ b/docs/examples/rust/nativeexample/examples/createdb.rs @@ -13,7 +13,7 @@ async fn main() -> anyhow::Result<()> { ]).await { Ok(afffected_rows) => println!("Create database power successfully, rowsAffected: {}", afffected_rows), Err(err) => { - eprintln!("Failed to create database power; ErrMessage: {}", err); + eprintln!("Failed to create database power, ErrMessage: {}", err); return Err(err.into()); } } @@ -25,7 +25,7 @@ async fn main() -> anyhow::Result<()> { ]).await { Ok(afffected_rows) => println!("Create stable power.meters successfully, rowsAffected: {}", afffected_rows), Err(err) => { - eprintln!("Failed to create stable power.meters; ErrMessage: {}", err); + eprintln!("Failed to create stable power.meters, ErrMessage: {}", err); return Err(err.into()); } } diff --git a/docs/examples/rust/nativeexample/examples/insert.rs b/docs/examples/rust/nativeexample/examples/insert.rs index d551da436e..585cb69c52 100644 --- a/docs/examples/rust/nativeexample/examples/insert.rs +++ b/docs/examples/rust/nativeexample/examples/insert.rs @@ -9,22 +9,22 @@ async fn main() -> anyhow::Result<()> { // ANCHOR: insert_data - match taos.exec(r#"INSERT INTO - power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') - VALUES - (NOW + 1a, 10.30000, 219, 0.31000) - (NOW + 2a, 12.60000, 218, 0.33000) - (NOW + 3a, 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') - VALUES - (NOW + 1a, 10.30000, 218, 0.25000) "#).await{ + let insert_sql = r#"INSERT INTO + power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') + VALUES + (NOW + 1a, 10.30000, 219, 0.31000) + (NOW + 2a, 12.60000, 218, 0.33000) + (NOW + 3a, 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') + VALUES + (NOW + 1a, 10.30000, 218, 0.25000) "#; + match taos.exec(insert_sql).await{ Ok(affected_rows) => println!("Successfully inserted {} rows to power.meters.", affected_rows), Err(err) => { - eprintln!("Failed to insert data to power.meters, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to insert data to power.meters, sql: {}, ErrMessage: {}", insert_sql, err); return Err(err.into()); } } - // ANCHOR_END: insert_data Ok(()) diff --git a/docs/examples/rust/nativeexample/examples/query.rs b/docs/examples/rust/nativeexample/examples/query.rs index 6b6fde6a31..e4f7c45521 100644 --- a/docs/examples/rust/nativeexample/examples/query.rs +++ b/docs/examples/rust/nativeexample/examples/query.rs @@ -11,7 +11,8 @@ async fn main() -> anyhow::Result<()> { // ANCHOR: query_data // query data, make sure the database and table are created before - match taos.query("SELECT ts, current, location FROM power.meters limit 100").await{ + let sql = "SELECT ts, current, location FROM power.meters limit 100"; + match taos.query(sql).await{ Ok(mut result) => { for field in result.fields() { println!("got field: {}", field.name()); @@ -30,7 +31,7 @@ async fn main() -> anyhow::Result<()> { } } Err(err) => { - eprintln!("Failed to query data from power.meters, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to query data from power.meters, sql: {}, ErrMessage: {}", sql, err); return Err(err.into()); } } @@ -51,6 +52,7 @@ async fn main() -> anyhow::Result<()> { location: String, } + let sql = "SELECT ts, current, location FROM power.meters limit 100"; match taos.query("SELECT ts, current, location FROM power.meters limit 100").await { Ok(mut query) => { match query.deserialize::().try_collect::>().await { @@ -64,7 +66,7 @@ async fn main() -> anyhow::Result<()> { } } Err(err) => { - eprintln!("Failed to query data from power.meters, url: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to query data from power.meters, sql: {}, ErrMessage: {}", sql, err); return Err(err.into()); } } @@ -92,7 +94,7 @@ async fn main() -> anyhow::Result<()> { } } Err(err) => { - eprintln!("Failed to execute sql with reqId: {}, dsn: {}; ErrMessage: {}", req_id, dsn, err); + eprintln!("Failed to execute sql with reqId: {}, ErrMessage: {}", req_id, err); return Err(err.into()); } } diff --git a/docs/examples/rust/nativeexample/examples/schemaless.rs b/docs/examples/rust/nativeexample/examples/schemaless.rs index 88e9ef4584..75f806f54c 100644 --- a/docs/examples/rust/nativeexample/examples/schemaless.rs +++ b/docs/examples/rust/nativeexample/examples/schemaless.rs @@ -43,7 +43,7 @@ async fn main() -> anyhow::Result<()> { match client.put(&sml_data).await{ Ok(_) => {}, Err(err) => { - eprintln!("Failed to insert data with schemaless, host: {}; ErrMessage: {}", host, err); + eprintln!("Failed to insert data with schemaless, data:{:?}, ErrMessage: {}", data, err); return Err(err.into()); } } @@ -65,7 +65,7 @@ async fn main() -> anyhow::Result<()> { match client.put(&sml_data).await{ Ok(_) => {}, Err(err) => { - eprintln!("Failed to insert data with schemaless, host: {}; ErrMessage: {}", host, err); + eprintln!("Failed to insert data with schemaless, data:{:?}, ErrMessage: {}", data, err); return Err(err.into()); } } @@ -96,7 +96,7 @@ async fn main() -> anyhow::Result<()> { match client.put(&sml_data).await{ Ok(_) => {}, Err(err) => { - eprintln!("Failed to insert data with schemaless, host: {}; ErrMessage: {}", host, err); + eprintln!("Failed to insert data with schemaless, data:{:?}, ErrMessage: {}", data, err); return Err(err.into()); } } diff --git a/docs/examples/rust/nativeexample/examples/stmt.rs b/docs/examples/rust/nativeexample/examples/stmt.rs index de35ae2972..0091283a6b 100644 --- a/docs/examples/rust/nativeexample/examples/stmt.rs +++ b/docs/examples/rust/nativeexample/examples/stmt.rs @@ -20,7 +20,13 @@ async fn main() -> anyhow::Result<()> { let tags = vec![Value::Int(i as i32), Value::VarChar(format!("location_{}", i).into())]; // set table name and tags for the prepared statement. - stmt.set_tbname_tags(&table_name, &tags).await?; + match stmt.set_tbname_tags(&table_name, &tags).await{ + Ok(_) => {}, + Err(err) => { + eprintln!("Failed to set table name and tags, table_name:{}, tags:{:?}, ErrMessage: {}", table_name, tags, err); + return Err(err.into()); + } + } for j in 0..NUM_ROWS { let values = vec![ ColumnView::from_millis_timestamp(vec![1648432611249 + j as i64]), @@ -29,17 +35,29 @@ async fn main() -> anyhow::Result<()> { ColumnView::from_floats(vec![0.31 + j as f32]), ]; // bind values to the prepared statement. - stmt.bind(&values).await?; + match stmt.bind(&values).await{ + Ok(_) => {}, + Err(err) => { + eprintln!("Failed to bind values, values:{:?}, ErrMessage: {}", values, err); + return Err(err.into()); + } + } } - stmt.add_batch().await?; + match stmt.add_batch().await{ + Ok(_) => {}, + Err(err) => { + eprintln!("Failed to add batch, ErrMessage: {}", err); + return Err(err.into()); + } + } } // execute. match stmt.execute().await{ Ok(affected_rows) => println!("Successfully inserted {} rows to power.meters.", affected_rows), Err(err) => { - eprintln!("Failed to insert to table meters using stmt, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to insert to table meters using stmt, ErrMessage: {}", err); return Err(err.into()); } } diff --git a/docs/examples/rust/nativeexample/examples/tmq.rs b/docs/examples/rust/nativeexample/examples/tmq.rs index 7f3bd416d1..800b66e8fe 100644 --- a/docs/examples/rust/nativeexample/examples/tmq.rs +++ b/docs/examples/rust/nativeexample/examples/tmq.rs @@ -3,6 +3,8 @@ use std::str::FromStr; use chrono::Local; use chrono::DateTime; use taos::*; +use std::thread; +use tokio::runtime::Runtime; #[tokio::main] async fn main() -> anyhow::Result<()> { @@ -53,17 +55,38 @@ async fn main() -> anyhow::Result<()> { consumer } Err(err) => { - eprintln!("Failed to create consumer, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to create native consumer, dsn: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", dsn, group_id, client_id, err); return Err(err.into()); } }; // ANCHOR_END: create_consumer_ac + thread::spawn(move || { + let rt = Runtime::new().unwrap(); + + rt.block_on(async { + let taos_insert = TaosBuilder::from_dsn(&dsn).unwrap().build().await.unwrap(); + for i in 0..50 { + let insert_sql = format!(r#"INSERT INTO + power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') + VALUES + (NOW, 10.30000, {}, 0.31000)"#, i); + if let Err(e) = taos_insert.exec(insert_sql).await { + eprintln!("Failed to execute insert: {:?}", e); + } + tokio::time::sleep(Duration::from_millis(10)).await; + } + }); + + }).join().unwrap(); + + // ANCHOR: consume - match consumer.subscribe(["topic_meters"]).await{ + let topic = "topic_meters"; + match consumer.subscribe([topic]).await{ Ok(_) => println!("Subscribe topics successfully."), Err(err) => { - eprintln!("Failed to subscribe topic_meters, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to subscribe topic: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", topic, group_id, client_id, err); return Err(err.into()); } } @@ -94,13 +117,14 @@ async fn main() -> anyhow::Result<()> { if let Some(data) = message.into_data() { while let Some(block) = data.fetch_raw_block().await? { let records: Vec = block.deserialize().try_collect()?; + // Add your data processing logic here println!("** read {} records: {:#?}\n", records.len(), records); } } Ok(()) }) .await.map_err(|e| { - eprintln!("Failed to execute consumer functions. ErrMessage: {:?}", e); + eprintln!("Failed to poll data, topic: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", topic, group_id, client_id, e); e })?; @@ -110,14 +134,14 @@ async fn main() -> anyhow::Result<()> { consumer .stream() .try_for_each(|(offset, message)| async { - let topic = offset.topic(); // the vgroup id, like partition id in kafka. let vgroup_id = offset.vgroup_id(); - println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + println!("* in vgroup id {} of topic {}\n", vgroup_id, topic); if let Some(data) = message.into_data() { while let Some(block) = data.fetch_raw_block().await? { let records: Vec = block.deserialize().try_collect()?; + // Add your data processing logic here println!("** read {} records: {:#?}\n", records.len(), records); } } @@ -125,22 +149,30 @@ async fn main() -> anyhow::Result<()> { match consumer.commit(offset).await{ Ok(_) => println!("Commit offset manually successfully."), Err(err) => { - eprintln!("Failed to commit offset manually, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to commit offset manually, topic: {}, groupId: {}, clientId: {}, vGroupId: {}, ErrMessage: {:?}", + topic, group_id, client_id, vgroup_id, err); return Err(err.into()); } } Ok(()) }) .await.map_err(|e| { - eprintln!("Failed to execute consumer functions. ErrMessage: {:?}", e); + eprintln!("Failed to poll data, topic: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", topic, group_id, client_id, e); e })?; // ANCHOR_END: consumer_commit_manually // ANCHOR: seek_offset - let assignments = consumer.assignments().await.unwrap(); - println!("Now assignments: {:?}", assignments); + let assignments = match consumer.assignments().await{ + Some(assignments) => assignments, + None => { + let error_message = format!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id); + eprintln!("{}", error_message); + return Err(anyhow::anyhow!(error_message)); + } + }; + println!("assignments: {:?}", assignments); // seek offset for topic_vec_assignment in assignments { @@ -152,7 +184,7 @@ async fn main() -> anyhow::Result<()> { let begin = assignment.begin(); let end = assignment.end(); println!( - "topic: {}, vgroup_id: {}, current offset: {} begin {}, end: {}", + "topic: {}, vgroup_id: {}, current offset: {}, begin {}, end: {}", topic, vgroup_id, current, @@ -163,18 +195,26 @@ async fn main() -> anyhow::Result<()> { match consumer.offset_seek(topic, vgroup_id, begin).await{ Ok(_) => (), Err(err) => { - eprintln!("Seek example failed; ErrMessage: {}", err); + eprintln!("Failed to seek offset, topic: {}, groupId: {}, clientId: {}, vGroupId: {}, begin: {}, ErrMessage: {:?}", + topic, group_id, client_id, vgroup_id, begin, err); return Err(err.into()); } } } let topic_assignment = consumer.topic_assignment(topic).await; - println!("Topic assignment: {:?}", topic_assignment); + println!("topic assignment: {:?}", topic_assignment); } println!("Assignment seek to beginning successfully."); // after seek offset - let assignments = consumer.assignments().await.unwrap(); + let assignments = match consumer.assignments().await{ + Some(assignments) => assignments, + None => { + let error_message = format!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id); + eprintln!("{}", error_message); + return Err(anyhow::anyhow!(error_message)); + } + }; println!("After seek offset assignments: {:?}", assignments); // ANCHOR_END: seek_offset diff --git a/docs/examples/rust/restexample/examples/connect.rs b/docs/examples/rust/restexample/examples/connect.rs index 535e265a97..7051572296 100644 --- a/docs/examples/rust/restexample/examples/connect.rs +++ b/docs/examples/rust/restexample/examples/connect.rs @@ -10,7 +10,7 @@ async fn main() -> anyhow::Result<()> { Ok(()) } Err(err) => { - eprintln!("Failed to connect to {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to connect to {}, ErrMessage: {}", dsn, err); return Err(err.into()); } } diff --git a/docs/examples/rust/restexample/examples/createdb.rs b/docs/examples/rust/restexample/examples/createdb.rs index e061f1d835..896868ae15 100644 --- a/docs/examples/rust/restexample/examples/createdb.rs +++ b/docs/examples/rust/restexample/examples/createdb.rs @@ -13,7 +13,7 @@ async fn main() -> anyhow::Result<()> { ]).await { Ok(afffected_rows) => println!("Create database power successfully, rowsAffected: {}", afffected_rows), Err(err) => { - eprintln!("Failed to create database power; ErrMessage: {}", err); + eprintln!("Failed to create database power, ErrMessage: {}", err); return Err(err.into()); } } @@ -25,7 +25,7 @@ async fn main() -> anyhow::Result<()> { ]).await { Ok(afffected_rows) => println!("Create stable power.meters successfully, rowsAffected: {}", afffected_rows), Err(err) => { - eprintln!("Failed to create stable power.meters; ErrMessage: {}", err); + eprintln!("Failed to create stable power.meters, ErrMessage: {}", err); return Err(err.into()); } } diff --git a/docs/examples/rust/restexample/examples/insert.rs b/docs/examples/rust/restexample/examples/insert.rs index 62522575ee..be85c1f82c 100644 --- a/docs/examples/rust/restexample/examples/insert.rs +++ b/docs/examples/rust/restexample/examples/insert.rs @@ -9,22 +9,22 @@ async fn main() -> anyhow::Result<()> { // ANCHOR: insert_data - match taos.exec(r#"INSERT INTO - power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') - VALUES - (NOW + 1a, 10.30000, 219, 0.31000) - (NOW + 2a, 12.60000, 218, 0.33000) - (NOW + 3a, 12.30000, 221, 0.31000) - power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') - VALUES - (NOW + 1a, 10.30000, 218, 0.25000) "#).await{ + let insert_sql = r#"INSERT INTO + power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') + VALUES + (NOW + 1a, 10.30000, 219, 0.31000) + (NOW + 2a, 12.60000, 218, 0.33000) + (NOW + 3a, 12.30000, 221, 0.31000) + power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') + VALUES + (NOW + 1a, 10.30000, 218, 0.25000) "#; + match taos.exec(insert_sql).await{ Ok(affected_rows) => println!("Successfully inserted {} rows to power.meters.", affected_rows), Err(err) => { - eprintln!("Failed to insert data to power.meters, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to insert data to power.meters, sql: {}, ErrMessage: {}", insert_sql, err); return Err(err.into()); } } - // ANCHOR_END: insert_data Ok(()) diff --git a/docs/examples/rust/restexample/examples/query.rs b/docs/examples/rust/restexample/examples/query.rs index 4e69f9fc97..4366efd265 100644 --- a/docs/examples/rust/restexample/examples/query.rs +++ b/docs/examples/rust/restexample/examples/query.rs @@ -11,7 +11,8 @@ async fn main() -> anyhow::Result<()> { // ANCHOR: query_data // query data, make sure the database and table are created before - match taos.query("SELECT ts, current, location FROM power.meters limit 100").await{ + let sql = "SELECT ts, current, location FROM power.meters limit 100"; + match taos.query(sql).await{ Ok(mut result) => { for field in result.fields() { println!("got field: {}", field.name()); @@ -30,7 +31,7 @@ async fn main() -> anyhow::Result<()> { } } Err(err) => { - eprintln!("Failed to query data from power.meters, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to query data from power.meters, sql: {}, ErrMessage: {}", sql, err); return Err(err.into()); } } @@ -51,6 +52,7 @@ async fn main() -> anyhow::Result<()> { location: String, } + let sql = "SELECT ts, current, location FROM power.meters limit 100"; match taos.query("SELECT ts, current, location FROM power.meters limit 100").await { Ok(mut query) => { match query.deserialize::().try_collect::>().await { @@ -64,7 +66,7 @@ async fn main() -> anyhow::Result<()> { } } Err(err) => { - eprintln!("Failed to query data from power.meters, url: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to query data from power.meters, sql: {}, ErrMessage: {}", sql, err); return Err(err.into()); } } @@ -92,7 +94,7 @@ async fn main() -> anyhow::Result<()> { } } Err(err) => { - eprintln!("Failed to execute sql with reqId: {}, dsn: {}; ErrMessage: {}", req_id, dsn, err); + eprintln!("Failed to execute sql with reqId: {}, ErrMessage: {}", req_id, err); return Err(err.into()); } } diff --git a/docs/examples/rust/restexample/examples/schemaless.rs b/docs/examples/rust/restexample/examples/schemaless.rs index f629d66663..fe666cca6f 100644 --- a/docs/examples/rust/restexample/examples/schemaless.rs +++ b/docs/examples/rust/restexample/examples/schemaless.rs @@ -17,6 +17,15 @@ async fn main() -> anyhow::Result<()> { let client = TaosBuilder::from_dsn(dsn)?.build().await?; + let db = "power"; + + client + .exec(format!("create database if not exists {db}")) + .await?; + + // should specify database before insert + client.exec(format!("use {db}")).await?; + // SchemalessProtocol::Line let data = [ "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639", @@ -34,7 +43,7 @@ async fn main() -> anyhow::Result<()> { match client.put(&sml_data).await{ Ok(_) => {}, Err(err) => { - eprintln!("Failed to insert data with schemaless, host: {}; ErrMessage: {}", host, err); + eprintln!("Failed to insert data with schemaless, data:{:?}, ErrMessage: {}", data, err); return Err(err.into()); } } @@ -56,7 +65,7 @@ async fn main() -> anyhow::Result<()> { match client.put(&sml_data).await{ Ok(_) => {}, Err(err) => { - eprintln!("Failed to insert data with schemaless, host: {}; ErrMessage: {}", host, err); + eprintln!("Failed to insert data with schemaless, data:{:?}, ErrMessage: {}", data, err); return Err(err.into()); } } @@ -87,7 +96,7 @@ async fn main() -> anyhow::Result<()> { match client.put(&sml_data).await{ Ok(_) => {}, Err(err) => { - eprintln!("Failed to insert data with schemaless, host: {}; ErrMessage: {}", host, err); + eprintln!("Failed to insert data with schemaless, data:{:?}, ErrMessage: {}", data, err); return Err(err.into()); } } diff --git a/docs/examples/rust/restexample/examples/stmt.rs b/docs/examples/rust/restexample/examples/stmt.rs index 636b6780cd..4c3d5fb759 100644 --- a/docs/examples/rust/restexample/examples/stmt.rs +++ b/docs/examples/rust/restexample/examples/stmt.rs @@ -20,7 +20,13 @@ async fn main() -> anyhow::Result<()> { let tags = vec![Value::Int(i as i32), Value::VarChar(format!("location_{}", i).into())]; // set table name and tags for the prepared statement. - stmt.set_tbname_tags(&table_name, &tags).await?; + match stmt.set_tbname_tags(&table_name, &tags).await{ + Ok(_) => {}, + Err(err) => { + eprintln!("Failed to set table name and tags, table_name:{}, tags:{:?}, ErrMessage: {}", table_name, tags, err); + return Err(err.into()); + } + } for j in 0..NUM_ROWS { let values = vec![ ColumnView::from_millis_timestamp(vec![1648432611249 + j as i64]), @@ -29,17 +35,29 @@ async fn main() -> anyhow::Result<()> { ColumnView::from_floats(vec![0.31 + j as f32]), ]; // bind values to the prepared statement. - stmt.bind(&values).await?; + match stmt.bind(&values).await{ + Ok(_) => {}, + Err(err) => { + eprintln!("Failed to bind values, values:{:?}, ErrMessage: {}", values, err); + return Err(err.into()); + } + } } - stmt.add_batch().await?; + match stmt.add_batch().await{ + Ok(_) => {}, + Err(err) => { + eprintln!("Failed to add batch, ErrMessage: {}", err); + return Err(err.into()); + } + } } // execute. match stmt.execute().await{ Ok(affected_rows) => println!("Successfully inserted {} rows to power.meters.", affected_rows), Err(err) => { - eprintln!("Failed to insert to table meters using stmt, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to insert to table meters using stmt, ErrMessage: {}", err); return Err(err.into()); } } diff --git a/docs/examples/rust/restexample/examples/tmq.rs b/docs/examples/rust/restexample/examples/tmq.rs index 2abc0a36da..0a41025955 100644 --- a/docs/examples/rust/restexample/examples/tmq.rs +++ b/docs/examples/rust/restexample/examples/tmq.rs @@ -3,6 +3,8 @@ use std::str::FromStr; use chrono::Local; use chrono::DateTime; use taos::*; +use std::thread; +use tokio::runtime::Runtime; #[tokio::main] async fn main() -> anyhow::Result<()> { @@ -53,17 +55,38 @@ async fn main() -> anyhow::Result<()> { consumer } Err(err) => { - eprintln!("Failed to create consumer, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to create websocket consumer, dsn: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", dsn, group_id, client_id, err); return Err(err.into()); } }; // ANCHOR_END: create_consumer_ac + thread::spawn(move || { + let rt = Runtime::new().unwrap(); + + rt.block_on(async { + let taos_insert = TaosBuilder::from_dsn(&dsn).unwrap().build().await.unwrap(); + for i in 0..50 { + let insert_sql = format!(r#"INSERT INTO + power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') + VALUES + (NOW, 10.30000, {}, 0.31000)"#, i); + if let Err(e) = taos_insert.exec(insert_sql).await { + eprintln!("Failed to execute insert: {:?}", e); + } + tokio::time::sleep(Duration::from_millis(10)).await; + } + }); + + }).join().unwrap(); + + // ANCHOR: consume - match consumer.subscribe(["topic_meters"]).await{ + let topic = "topic_meters"; + match consumer.subscribe([topic]).await{ Ok(_) => println!("Subscribe topics successfully."), Err(err) => { - eprintln!("Failed to subscribe topic_meters, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to subscribe topic: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", topic, group_id, client_id, err); return Err(err.into()); } } @@ -94,13 +117,14 @@ async fn main() -> anyhow::Result<()> { if let Some(data) = message.into_data() { while let Some(block) = data.fetch_raw_block().await? { let records: Vec = block.deserialize().try_collect()?; + // Add your data processing logic here println!("** read {} records: {:#?}\n", records.len(), records); } } Ok(()) }) .await.map_err(|e| { - eprintln!("Failed to poll data; ErrMessage: {:?}", e); + eprintln!("Failed to poll data, topic: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", topic, group_id, client_id, e); e })?; @@ -110,14 +134,14 @@ async fn main() -> anyhow::Result<()> { consumer .stream() .try_for_each(|(offset, message)| async { - let topic = offset.topic(); // the vgroup id, like partition id in kafka. let vgroup_id = offset.vgroup_id(); - println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + println!("* in vgroup id {} of topic {}\n", vgroup_id, topic); if let Some(data) = message.into_data() { while let Some(block) = data.fetch_raw_block().await? { let records: Vec = block.deserialize().try_collect()?; + // Add your data processing logic here println!("** read {} records: {:#?}\n", records.len(), records); } } @@ -125,21 +149,29 @@ async fn main() -> anyhow::Result<()> { match consumer.commit(offset).await{ Ok(_) => println!("Commit offset manually successfully."), Err(err) => { - eprintln!("Failed to commit offset manually, dsn: {}; ErrMessage: {}", dsn, err); + eprintln!("Failed to commit offset manually, topic: {}, groupId: {}, clientId: {}, vGroupId: {}, ErrMessage: {:?}", + topic, group_id, client_id, vgroup_id, err); return Err(err.into()); } } Ok(()) }) .await.map_err(|e| { - eprintln!("Failed to execute consumer functions. ErrMessage: {:?}", e); + eprintln!("Failed to poll data, topic: {}, groupId: {}, clientId: {}, ErrMessage: {:?}", topic, group_id, client_id, e); e })?; // ANCHOR_END: consumer_commit_manually // ANCHOR: seek_offset - let assignments = consumer.assignments().await.unwrap(); + let assignments = match consumer.assignments().await{ + Some(assignments) => assignments, + None => { + let error_message = format!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id); + eprintln!("{}", error_message); + return Err(anyhow::anyhow!(error_message)); + } + }; println!("assignments: {:?}", assignments); // seek offset @@ -152,7 +184,7 @@ async fn main() -> anyhow::Result<()> { let begin = assignment.begin(); let end = assignment.end(); println!( - "topic: {}, vgroup_id: {}, current offset: {} begin {}, end: {}", + "topic: {}, vgroup_id: {}, current offset: {}, begin {}, end: {}", topic, vgroup_id, current, @@ -163,7 +195,8 @@ async fn main() -> anyhow::Result<()> { match consumer.offset_seek(topic, vgroup_id, begin).await{ Ok(_) => (), Err(err) => { - eprintln!("seek example failed; ErrMessage: {}", err); + eprintln!("Failed to seek offset, topic: {}, groupId: {}, clientId: {}, vGroupId: {}, begin: {}, ErrMessage: {:?}", + topic, group_id, client_id, vgroup_id, begin, err); return Err(err.into()); } } @@ -174,7 +207,14 @@ async fn main() -> anyhow::Result<()> { } println!("Assignment seek to beginning successfully."); // after seek offset - let assignments = consumer.assignments().await.unwrap(); + let assignments = match consumer.assignments().await{ + Some(assignments) => assignments, + None => { + let error_message = format!("Failed to get assignments. topic: {}, groupId: {}, clientId: {}", topic, group_id, client_id); + eprintln!("{}", error_message); + return Err(anyhow::anyhow!(error_message)); + } + }; println!("After seek offset assignments: {:?}", assignments); // ANCHOR_END: seek_offset diff --git a/docs/zh/08-develop/01-connect/index.md b/docs/zh/08-develop/01-connect/index.md index 160e4cd40c..d1aeb0ed8b 100644 --- a/docs/zh/08-develop/01-connect/index.md +++ b/docs/zh/08-develop/01-connect/index.md @@ -399,7 +399,8 @@ C/C++ 语言连接器使用 `taos_connect()` 函数用于建立与 TDengine 数 -使用 REST API 方式访问 TDengine,由应用程序去建立 HTTP 连接,自己控制 HTTP 连接参数。 +通过 REST API 方式访问 TDengine 时,应用程序直接与 taosAdapter 建立 HTTP 连接,建议使用连接池来管理连接。 +使用 REST API 的参数具体可以参考:[http-请求格式](../../reference/connector/rest-api/#http-请求格式) @@ -544,7 +545,7 @@ C/C++ 语言连接器使用 `taos_connect()` 函数用于建立与 TDengine 数 使用示例如下: ```java -{{#include examples/JDBC/connectionPools/src/main/java/com/taosdata/example/HikariDemo.java:connection_pool}} +{{#include docs/examples/java/src/main/java/com/taos/example/HikariDemo.java:connection_pool}} ``` > 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 @@ -555,7 +556,7 @@ C/C++ 语言连接器使用 `taos_connect()` 函数用于建立与 TDengine 数 使用示例如下: ```java -{{#include examples/JDBC/connectionPools/src/main/java/com/taosdata/example/DruidDemo.java:connection_pool}} +{{#include docs/examples/java/src/main/java/com/taos/example/DruidDemo.java:connection_pool}} ``` > 更多 druid 使用问题请查看[官方说明](https://github.com/alibaba/druid)。 diff --git a/docs/zh/08-develop/02-sql.md b/docs/zh/08-develop/02-sql.md index 7e42e38949..be44458c5b 100644 --- a/docs/zh/08-develop/02-sql.md +++ b/docs/zh/08-develop/02-sql.md @@ -27,7 +27,7 @@ REST API:直接调用 `taosadapter` 提供的 REST API 接口,进行数据 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcCreatDBDemo.java:create_db_and_table}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcCreatDBDemo.java:create_db_and_table}} ``` @@ -98,7 +98,7 @@ curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql/power' \ ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcInsertDataDemo.java:insert_data}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcInsertDataDemo.java:insert_data}} ``` **Note** @@ -170,7 +170,7 @@ curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \ ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcQueryDemo.java:query_data}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcQueryDemo.java:query_data}} ``` **Note** 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 @@ -251,7 +251,7 @@ reqId 可用于请求链路追踪,reqId 就像分布式系统中的 traceId ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcReqIdDemo.java:with_reqid}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcReqIdDemo.java:with_reqid}} ``` diff --git a/docs/zh/08-develop/04-schemaless.md b/docs/zh/08-develop/04-schemaless.md index 06dec726e9..130d012e8b 100644 --- a/docs/zh/08-develop/04-schemaless.md +++ b/docs/zh/08-develop/04-schemaless.md @@ -165,7 +165,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessWsTest.java:schemaless}} +{{#include docs/examples/java/src/main/java/com/taos/example/SchemalessWsTest.java:schemaless}} ``` @@ -216,7 +216,7 @@ writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/SchemalessJniTest.java:schemaless}} +{{#include docs/examples/java/src/main/java/com/taos/example/SchemalessJniTest.java:schemaless}} ``` 执行带有 reqId 的无模式写入,最后一个参数 reqId 可用于请求链路追踪。 diff --git a/docs/zh/08-develop/05-stmt.md b/docs/zh/08-develop/05-stmt.md index 4b8dbbce9b..e659177c94 100644 --- a/docs/zh/08-develop/05-stmt.md +++ b/docs/zh/08-develop/05-stmt.md @@ -27,11 +27,11 @@ import TabItem from "@theme/TabItem"; ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingBasicDemo.java:para_bind}} +{{#include docs/examples/java/src/main/java/com/taos/example/WSParameterBindingBasicDemo.java:para_bind}} ``` -这是一个[更详细的参数绑定示例](https://github.com/taosdata/TDengine/blob/main/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WSParameterBindingFullDemo.java) +这是一个[更详细的参数绑定示例](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/WSParameterBindingFullDemo.java) @@ -76,10 +76,10 @@ import TabItem from "@theme/TabItem"; ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingBasicDemo.java:para_bind}} +{{#include docs/examples/java/src/main/java/com/taos/example/ParameterBindingBasicDemo.java:para_bind}} ``` -这是一个[更详细的参数绑定示例](https://github.com/taosdata/TDengine/blob/main/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ParameterBindingFullDemo.java) +这是一个[更详细的参数绑定示例](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/ParameterBindingFullDemo.java) diff --git a/docs/zh/08-develop/07-tmq.md b/docs/zh/08-develop/07-tmq.md index be48c98261..7eb8628e56 100644 --- a/docs/zh/08-develop/07-tmq.md +++ b/docs/zh/08-develop/07-tmq.md @@ -109,7 +109,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:create_consumer}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:create_consumer}} ``` @@ -168,7 +168,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java:create_consumer}} +{{#include docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java:create_consumer}} ``` @@ -229,7 +229,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:poll_data_code_piece}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:poll_data_code_piece}} ``` - `subscribe` 方法的参数含义为:订阅的主题列表(即名称),支持同时订阅多个主题。 @@ -293,7 +293,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:poll_data_code_piece}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:poll_data_code_piece}} ``` - `subscribe` 方法的参数含义为:订阅的主题列表(即名称),支持同时订阅多个主题。 @@ -376,7 +376,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:consumer_seek}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:consumer_seek}} ``` 1. 使用 consumer.poll 方法轮询数据,直到获取到数据为止。 2. 对于轮询到的第一批数据,打印第一条数据的内容,并获取当前消费者的分区分配信息。 @@ -438,7 +438,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:consumer_seek}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:consumer_seek}} ``` 1. 使用 consumer.poll 方法轮询数据,直到获取到数据为止。 2. 对于轮询到的第一批数据,打印第一条数据的内容,并获取当前消费者的分区分配信息。 @@ -513,7 +513,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:commit_code_piece}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:commit_code_piece}} ``` @@ -565,7 +565,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:commit_code_piece}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:commit_code_piece}} ``` @@ -621,7 +621,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:unsubscribe_data_code_piece}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:unsubscribe_data_code_piece}} ``` @@ -644,7 +644,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 {{#include docs/examples/rust/restexample/examples/tmq.rs:unsubscribe}} ``` -**注意**:消费者取消订阅后无法重用,如果想订阅新的 `topic`, 请重新创建消费者。 +**注意**:消费者取消订阅后已经关闭,无法重用,如果想订阅新的 `topic`, 请重新创建消费者。 @@ -672,7 +672,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:unsubscribe_data_code_piece}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:unsubscribe_data_code_piece}} ``` @@ -694,7 +694,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请 {{#include docs/examples/rust/restexample/examples/tmq.rs:unsubscribe}} ``` -**注意**:消费者取消订阅后无法重用,如果想订阅新的 `topic`, 请重新创建消费者。 +**注意**:消费者取消订阅后已经关闭,无法重用,如果想订阅新的 `topic`, 请重新创建消费者。 不支持 @@ -723,7 +723,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
完整代码示例 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/WsConsumerLoopFull.java:consumer_demo}} +{{#include docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java:consumer_demo}} ``` **注意**:这里的 value.deserializer 配置参数值应该根据测试环境的包路径做相应的调整。 @@ -788,7 +788,7 @@ Rust 连接器创建消费者的参数为 DSN, 可以设置的参数列表请
完整代码示例 ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/ConsumerLoopFull.java:consumer_demo}} +{{#include docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java:consumer_demo}} ``` **注意**:这里的 value.deserializer 配置参数值应该根据测试环境的包路径做相应的调整。 diff --git a/docs/zh/14-reference/05-connector/14-java.mdx b/docs/zh/14-reference/05-connector/14-java.mdx index 0dc85feb13..a752867b3f 100644 --- a/docs/zh/14-reference/05-connector/14-java.mdx +++ b/docs/zh/14-reference/05-connector/14-java.mdx @@ -60,7 +60,7 @@ REST 连接支持所有能运行 Java 的平台。 在报错后,通过 SQLException 可以获取到错误的信息和错误码: ```java -{{#include examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcBasicDemo.java:jdbc_exception}} +{{#include docs/examples/java/src/main/java/com/taos/example/JdbcBasicDemo.java:jdbc_exception}} ``` JDBC 连接器可能报错的错误码包括 4 种: @@ -141,7 +141,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 由于历史原因,TDengine中的BINARY底层不是真正的二进制数据,已不建议使用。请用VARBINARY类型代替。 GEOMETRY类型是little endian字节序的二进制数据,符合WKB规范。详细信息请参考 [数据类型](../../taos-sql/data-type/#数据类型) WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/) -对于java连接器,可以使用jts库来方便的创建GEOMETRY类型对象,序列化后写入TDengine,这里有一个样例[Geometry示例](https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java) +对于java连接器,可以使用jts库来方便的创建GEOMETRY类型对象,序列化后写入TDengine,这里有一个样例[Geometry示例](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java) ## 示例程序汇总 diff --git a/docs/zh/14-reference/05-connector/60-rest-api.mdx b/docs/zh/14-reference/05-connector/60-rest-api.mdx index b6d6ec3b4a..a804229d27 100644 --- a/docs/zh/14-reference/05-connector/60-rest-api.mdx +++ b/docs/zh/14-reference/05-connector/60-rest-api.mdx @@ -444,6 +444,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata ```bash curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql + curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql ``` 返回值: @@ -495,6 +496,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata ```bash curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql + curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "create database demo" 192.168.0.1:6041/rest/sql ``` 返回值: diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md index 01cb42213e..5bc6a4df27 100644 --- a/docs/zh/27-train-faq/01-faq.md +++ b/docs/zh/27-train-faq/01-faq.md @@ -270,3 +270,6 @@ TDengine 在写入数据时如果有很严重的乱序写入问题,会严重 ### 28 修改database的root密码后,Grafana监控插件TDinsight无数据展示 TDinsight插件中展示的数据是通过taosKeeper和taosAdapter服务收集并存储于TD的log库中,在root密码修改后,需要同步更新taosKeeper和taosAdapter配置文件中对应的密码信息,然后重启taosKeeper和taosAdapter服务(注:若是集群需要重启每个节点上的对应服务)。 + +### 29 遇到报错 “some vnode/qnode/mnode(s) out of service” 怎么办? +客户端未配置所有服务端的 FQDN 解析。比如服务端有 3 个节点,客户端只配置了 1 个节点的 FQDN 解析。FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html) diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcInsertDataDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcInsertDataDemo.java deleted file mode 100644 index 7e60c10bf7..0000000000 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcInsertDataDemo.java +++ /dev/null @@ -1,53 +0,0 @@ -package com.taosdata.example; - -import com.taosdata.jdbc.AbstractStatement; - -import java.sql.*; -import java.util.Properties; - -public class JdbcInsertDataDemo { - private static final String host = "localhost"; - private static final String dbName = "test"; - private static final String tbName = "weather"; - private static final String user = "root"; - private static final String password = "taosdata"; - - - public static void main(String[] args) throws SQLException { - - final String jdbcUrl = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; - -// get connection - Properties properties = new Properties(); - properties.setProperty("charset", "UTF-8"); - properties.setProperty("locale", "en_US.UTF-8"); - properties.setProperty("timezone", "UTC-8"); - System.out.println("get connection starting..."); -// ANCHOR: insert_data - try (Connection connection = DriverManager.getConnection(jdbcUrl, properties); - Statement stmt = connection.createStatement()) { - - // insert data, please make sure the database and table are created before - String insertQuery = "INSERT INTO " + - "power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 219, 0.31000) " + - "(NOW + 2a, 12.60000, 218, 0.33000) " + - "(NOW + 3a, 12.30000, 221, 0.31000) " + - "power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " + - "VALUES " + - "(NOW + 1a, 10.30000, 218, 0.25000) "; - int affectedRows = stmt.executeUpdate(insertQuery); - // you can check affectedRows here - System.out.println("Successfully inserted " + affectedRows + " rows to power.meters."); - } catch (SQLException ex) { - // handle any errors, please refer to the JDBC specifications for detailed exceptions info - System.out.println("Failed to insert data to power.meters, url:" + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage()); - throw ex; - } catch (Exception ex){ - System.out.println("Failed to insert data to power.meters, url:" + jdbcUrl + "; ErrMessage: " + ex.getMessage()); - throw ex; - } -// ANCHOR_END: insert_data - } -} diff --git a/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/DruidDemo.java b/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/DruidDemo.java index dd96f5c632..a366efd419 100644 --- a/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/DruidDemo.java +++ b/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/DruidDemo.java @@ -2,35 +2,35 @@ package com.taosdata.example; import com.alibaba.druid.pool.DruidDataSource; -import javax.sql.DataSource; import java.sql.Connection; import java.sql.Statement; public class DruidDemo { -// ANCHOR: connection_pool -public static void main(String[] args) throws Exception { - String url = "jdbc:TAOS://127.0.0.1:6030/log"; + // ANCHOR: connection_pool + public static void main(String[] args) throws Exception { + String url = "jdbc:TAOS://127.0.0.1:6030/log"; - DruidDataSource dataSource = new DruidDataSource(); - // jdbc properties - dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); - dataSource.setUrl(url); - dataSource.setUsername("root"); - dataSource.setPassword("taosdata"); - // pool configurations - dataSource.setInitialSize(10); - dataSource.setMinIdle(10); - dataSource.setMaxActive(10); - dataSource.setMaxWait(30000); - dataSource.setValidationQuery("SELECT SERVER_VERSION()"); + DruidDataSource dataSource = new DruidDataSource(); + // jdbc properties + dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + dataSource.setUrl(url); + dataSource.setUsername("root"); + dataSource.setPassword("taosdata"); + // pool configurations + dataSource.setInitialSize(10); + dataSource.setMinIdle(10); + dataSource.setMaxActive(10); + dataSource.setMaxWait(30000); + dataSource.setValidationQuery("SELECT SERVER_VERSION()"); - Connection connection = dataSource.getConnection(); // get connection - Statement statement = connection.createStatement(); // get statement - //query or insert - // ... + Connection connection = dataSource.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + // query or insert + // ... - statement.close(); - connection.close(); // put back to connection pool -} -// ANCHOR_END: connection_pool + statement.close(); + connection.close(); // put back to connection pool + dataSource.close(); + } + // ANCHOR_END: connection_pool } diff --git a/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/HikariDemo.java b/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/HikariDemo.java index 4480cbc7c4..50b20fdb0c 100644 --- a/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/HikariDemo.java +++ b/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/HikariDemo.java @@ -4,34 +4,34 @@ import com.zaxxer.hikari.HikariConfig; import com.zaxxer.hikari.HikariDataSource; import java.sql.Connection; -import java.sql.SQLException; import java.sql.Statement; public class HikariDemo { -// ANCHOR: connection_pool -public static void main(String[] args) throws SQLException { - HikariConfig config = new HikariConfig(); - // jdbc properties - config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); - config.setUsername("root"); - config.setPassword("taosdata"); - // connection pool configurations - config.setMinimumIdle(10); //minimum number of idle connection - config.setMaximumPoolSize(10); //maximum number of connection in the pool - config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool - config.setMaxLifetime(0); // maximum life time for each connection - config.setIdleTimeout(0); // max idle time for recycle idle connection - config.setConnectionTestQuery("SELECT SERVER_VERSION()"); //validation query + // ANCHOR: connection_pool + public static void main(String[] args) throws Exception { + HikariConfig config = new HikariConfig(); + // jdbc properties + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + // connection pool configurations + config.setMinimumIdle(10); // minimum number of idle connection + config.setMaximumPoolSize(10); // maximum number of connection in the pool + config.setConnectionTimeout(30000); // maximum wait milliseconds for get connection from pool + config.setMaxLifetime(0); // maximum life time for each connection + config.setIdleTimeout(0); // max idle time for recycle idle connection + config.setConnectionTestQuery("SELECT SERVER_VERSION()"); // validation query - HikariDataSource ds = new HikariDataSource(config); //create datasource + HikariDataSource dataSource = new HikariDataSource(config); // create datasource - Connection connection = ds.getConnection(); // get connection - Statement statement = connection.createStatement(); // get statement + Connection connection = dataSource.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement - //query or insert - // ... - statement.close(); - connection.close(); // put back to connection pool -} -// ANCHOR_END: connection_pool + // query or insert + // ... + statement.close(); + connection.close(); // put back to connection pool + dataSource.close(); + } + // ANCHOR_END: connection_pool } diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 8d6184cab6..b0b5a64a50 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -19,7 +19,6 @@ #include "os.h" #include "streamMsg.h" #include "streamState.h" -#include "streamMsg.h" #include "tdatablock.h" #include "tdbInt.h" #include "tmsg.h" @@ -266,14 +265,14 @@ typedef struct SStreamTaskId { } SStreamTaskId; typedef struct SCheckpointInfo { - int64_t startTs; - int64_t checkpointId; // latest checkpoint id - int64_t checkpointVer; // latest checkpoint offset in wal - int64_t checkpointTime; // latest checkpoint time - int64_t processedVer; - int64_t nextProcessVer; // current offset in WAL, not serialize it - int64_t msgVer; - int32_t consensusTransId;// consensus checkpoint id + int64_t startTs; + int64_t checkpointId; // latest checkpoint id + int64_t checkpointVer; // latest checkpoint offset in wal + int64_t checkpointTime; // latest checkpoint time + int64_t processedVer; + int64_t nextProcessVer; // current offset in WAL, not serialize it + int64_t msgVer; + int32_t consensusTransId; // consensus checkpoint id SActiveCheckpointInfo* pActiveInfo; } SCheckpointInfo; @@ -455,6 +454,7 @@ struct SStreamTask { void* pBackend; int8_t subtableWithoutMd5; char reserve[256]; + char* backendPath; }; typedef int32_t (*startComplete_fn_t)(struct SStreamMeta*); @@ -591,9 +591,9 @@ typedef struct STaskStatusEntry { int32_t statusLastDuration; // to record the last duration of current status int64_t stage; int32_t nodeId; - SVersionRange verRange; // start/end version in WAL, only valid for source task - int64_t processedVer; // only valid for source task - double inputQUsed; // in MiB + SVersionRange verRange; // start/end version in WAL, only valid for source task + int64_t processedVer; // only valid for source task + double inputQUsed; // in MiB double inputRate; double procsThroughput; // duration between one element put into input queue and being processed. double procsTotal; // duration between one element put into input queue and being processed. @@ -678,9 +678,9 @@ int32_t streamTaskSendCheckRsp(const SStreamMeta* pMeta, int32_t vgId, SStreamTa int32_t streamTaskProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp); // check downstream status -void streamTaskStartMonitorCheckRsp(SStreamTask* pTask); -void streamTaskStopMonitorCheckRsp(STaskCheckInfo* pInfo, const char* id); -void streamTaskCleanupCheckInfo(STaskCheckInfo* pInfo); +void streamTaskStartMonitorCheckRsp(SStreamTask* pTask); +void streamTaskStopMonitorCheckRsp(STaskCheckInfo* pInfo, const char* id); +void streamTaskCleanupCheckInfo(STaskCheckInfo* pInfo); // fill-history task int32_t streamLaunchFillHistoryTask(SStreamTask* pTask); @@ -717,8 +717,8 @@ int32_t streamTaskSetDb(SStreamMeta* pMeta, SStreamTask* pTask, const char* key) bool streamTaskIsSinkTask(const SStreamTask* pTask); void streamTaskSetRemoveBackendFiles(SStreamTask* pTask); -void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask); -void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc); +void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask); +void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc); STaskStatusEntry streamTaskGetStatusEntry(SStreamTask* pTask); // source level @@ -815,9 +815,9 @@ void streamTaskSendRetrieveRsp(SStreamRetrieveReq* pReq, SRpcMsg* pRsp); int32_t streamProcessHeartbeatRsp(SStreamMeta* pMeta, SMStreamHbRspMsg* pRsp); int32_t streamTaskSendCheckpointsourceRsp(SStreamTask* pTask); -void streamMutexLock(TdThreadMutex *pMutex); -void streamMutexUnlock(TdThreadMutex *pMutex); -void streamMutexDestroy(TdThreadMutex *pMutex); +void streamMutexLock(TdThreadMutex* pMutex); +void streamMutexUnlock(TdThreadMutex* pMutex); +void streamMutexDestroy(TdThreadMutex* pMutex); #ifdef __cplusplus } diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index 66ea5ea5c7..a5d5316d23 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -95,6 +95,7 @@ typedef struct { } SWalCkHead; #pragma pack(pop) +typedef void (*stopDnodeFn)(); typedef struct SWal { // cfg SWalCfg cfg; @@ -117,6 +118,9 @@ typedef struct SWal { SHashObj *pRefHash; // refId -> SWalRef // path char path[WAL_PATH_LEN]; + + stopDnodeFn stopDnode; + // reusable write head SWalCkHead writeHead; } SWal; @@ -152,7 +156,7 @@ typedef struct SWalReader { } SWalReader; // module initialization -int32_t walInit(); +int32_t walInit(stopDnodeFn stopDnode); void walCleanUp(); // handle open and ctl diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c index 20802e33d9..48606b2ed9 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c @@ -85,7 +85,7 @@ static int32_t mndOpenWrapper(const char *path, SMnodeOpt *opt, SMnode **pMnode) } static int32_t mmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { int32_t code = 0; - if ((code = walInit()) != 0) { + if ((code = walInit(pInput->stopDnodeFp)) != 0) { dError("failed to init wal since %s", tstrerror(code)); return code; } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 6bc0b5fe93..e599676cec 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -624,8 +624,7 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { goto _OVER; } tmsgReportStartup("vnode-tfs", "initialized"); - - if ((code = walInit()) != 0) { + if ((code = walInit(pInput->stopDnodeFp)) != 0) { dError("failed to init wal since %s", tstrerror(code)); goto _OVER; } @@ -638,7 +637,7 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("vnode-sync", "initialized"); - if ((code = vnodeInit(tsNumOfCommitThreads)) != 0) { + if ((code = vnodeInit(tsNumOfCommitThreads, pInput->stopDnodeFp)) != 0) { dError("failed to init vnode since %s", tstrerror(code)); goto _OVER; } diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index 9819c4f64e..0a75847d96 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -414,6 +414,7 @@ SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper) { .getVnodeLoadsLiteFp = dmGetVnodeLoadsLite, .getMnodeLoadsFp = dmGetMnodeLoads, .getQnodeLoadsFp = dmGetQnodeLoads, + .stopDnodeFp = dmStop, }; opt.msgCb = dmGetMsgcb(pWrapper->pDnode); diff --git a/source/dnode/mgmt/node_util/inc/dmUtil.h b/source/dnode/mgmt/node_util/inc/dmUtil.h index 3b94f00bee..5be41f830d 100644 --- a/source/dnode/mgmt/node_util/inc/dmUtil.h +++ b/source/dnode/mgmt/node_util/inc/dmUtil.h @@ -121,6 +121,7 @@ typedef void (*GetVnodeLoadsFp)(SMonVloadInfo *pInfo); typedef void (*GetMnodeLoadsFp)(SMonMloadInfo *pInfo); typedef void (*GetQnodeLoadsFp)(SQnodeLoad *pInfo); typedef int32_t (*ProcessAlterNodeTypeFp)(EDndNodeType ntype, SRpcMsg *pMsg); +typedef void (*StopDnodeFp)(); typedef struct { int32_t dnodeId; @@ -159,6 +160,7 @@ typedef struct { GetVnodeLoadsFp getVnodeLoadsLiteFp; GetMnodeLoadsFp getMnodeLoadsFp; GetQnodeLoadsFp getQnodeLoadsFp; + StopDnodeFp stopDnodeFp; } SMgmtInputOpt; typedef struct { diff --git a/source/dnode/mnode/impl/src/mndDump.c b/source/dnode/mnode/impl/src/mndDump.c index 31e092f1a4..565e244014 100644 --- a/source/dnode/mnode/impl/src/mndDump.c +++ b/source/dnode/mnode/impl/src/mndDump.c @@ -581,7 +581,7 @@ void mndDumpSdb() { msgCb.mgmt = (SMgmtWrapper *)(&msgCb); // hack tmsgSetDefault(&msgCb); - (void)walInit(); + (void)walInit(NULL); (void)syncInit(); SMnodeOpt opt = {.msgCb = msgCb}; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index a35815cf4d..93397e3a8c 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -24,6 +24,7 @@ #include "mndVgroup.h" #include "osMemory.h" #include "parser.h" +#include "taoserror.h" #include "tmisce.h" #include "tname.h" @@ -879,6 +880,10 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { mndTransDrop(pTrans); + if (code == 0) { + code = TSDB_CODE_ACTION_IN_PROGRESS; + } + SName dbname = {0}; code = tNameFromString(&dbname, createReq.sourceDB, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); if (code) { @@ -3058,4 +3063,4 @@ _err: mDebug("create drop %d orphan tasks trans succ", numOfTasks); } return code; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index d01db56013..2f56aac7d6 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -49,7 +49,9 @@ typedef struct SVSnapWriter SVSnapWriter; extern const SVnodeCfg vnodeCfgDefault; -int32_t vnodeInit(int32_t nthreads); +typedef void (*StopDnodeFp)(); + +int32_t vnodeInit(int32_t nthreads, StopDnodeFp stopDnodeFp); void vnodeCleanup(); int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, int32_t diskPrimary, STfs *pTfs); bool vnodeShouldRemoveWal(SVnode *pVnode); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 314a6abdf5..a70a04f23d 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -14,7 +14,10 @@ */ #include "tq.h" +#include "osDef.h" +#include "taoserror.h" #include "tqCommon.h" +#include "tstream.h" #include "vnd.h" // 0: not init @@ -153,7 +156,7 @@ void tqNotifyClose(STQ* pTq) { } void tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) { - int32_t code = 0; + int32_t code = 0; SMqPollReq req = {0}; code = tDeserializeSMqPollReq(pHandle->msg->pCont, pHandle->msg->contLen, &req); if (code < 0) { @@ -169,7 +172,7 @@ void tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) { } dataRsp.common.blockNum = 0; char buf[TSDB_OFFSET_LEN] = {0}; - (void) tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.common.reqOffset); + (void)tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.common.reqOffset); tqInfo("tqPushEmptyDataRsp to consumer:0x%" PRIx64 " vgId:%d, offset:%s, reqId:0x%" PRIx64, req.consumerId, vgId, buf, req.reqId); @@ -180,15 +183,15 @@ void tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) { tDeleteMqDataRsp(&dataRsp); } -int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const void* pRsp, - int32_t type, int32_t vgId) { +int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const void* pRsp, int32_t type, + int32_t vgId) { int64_t sver = 0, ever = 0; walReaderValidVersionRange(pHandle->execHandle.pTqReader->pWalReader, &sver, &ever); char buf1[TSDB_OFFSET_LEN] = {0}; char buf2[TSDB_OFFSET_LEN] = {0}; - (void) tFormatOffset(buf1, TSDB_OFFSET_LEN, &((SMqDataRspCommon*)pRsp)->reqOffset); - (void) tFormatOffset(buf2, TSDB_OFFSET_LEN, &((SMqDataRspCommon*)pRsp)->rspOffset); + (void)tFormatOffset(buf1, TSDB_OFFSET_LEN, &((SMqDataRspCommon*)pRsp)->reqOffset); + (void)tFormatOffset(buf2, TSDB_OFFSET_LEN, &((SMqDataRspCommon*)pRsp)->rspOffset); tqDebug("tmq poll vgId:%d consumer:0x%" PRIx64 " (epoch %d) send rsp, block num:%d, req:%s, rsp:%s, reqId:0x%" PRIx64, vgId, pReq->consumerId, pReq->epoch, ((SMqDataRspCommon*)pRsp)->blockNum, buf1, buf2, pReq->reqId); @@ -200,7 +203,7 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t sversion, char* msg, int32_t SMqVgOffset vgOffset = {0}; int32_t vgId = TD_VID(pTq->pVnode); - int32_t code = 0; + int32_t code = 0; SDecoder decoder; tDecoderInit(&decoder, (uint8_t*)msg, msgLen); if (tDecodeMqVgOffset(&decoder, &vgOffset) < 0) { @@ -233,12 +236,13 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t sversion, char* msg, int32_t } // save the new offset value - if (taosHashPut(pTq->pOffset, pOffset->subKey, strlen(pOffset->subKey), pOffset, sizeof(STqOffset))){ + if (taosHashPut(pTq->pOffset, pOffset->subKey, strlen(pOffset->subKey), pOffset, sizeof(STqOffset))) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } - if (tqMetaSaveInfo(pTq, pTq->pOffsetStore, pOffset->subKey, strlen(pOffset->subKey), msg, msgLen - sizeof(vgOffset.consumerId)) < 0) { + if (tqMetaSaveInfo(pTq, pTq->pOffsetStore, pOffset->subKey, strlen(pOffset->subKey), msg, + msgLen - sizeof(vgOffset.consumerId)) < 0) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } @@ -416,7 +420,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) { } char buf[TSDB_OFFSET_LEN] = {0}; - (void) tFormatOffset(buf, TSDB_OFFSET_LEN, &reqOffset); + (void)tFormatOffset(buf, TSDB_OFFSET_LEN, &reqOffset); tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey %s, recv poll req vgId:%d, req:%s, reqId:0x%" PRIx64, consumerId, req.epoch, pHandle->subKey, vgId, buf, req.reqId); @@ -447,7 +451,7 @@ int32_t tqProcessVgCommittedInfoReq(STQ* pTq, SRpcMsg* pMsg) { tDecoderClear(&decoder); STqOffset* pSavedOffset = NULL; - int32_t code = tqMetaGetOffset(pTq, vgOffset.offset.subKey, &pSavedOffset); + int32_t code = tqMetaGetOffset(pTq, vgOffset.offset.subKey, &pSavedOffset); if (code != 0) { return TSDB_CODE_TMQ_NO_COMMITTED; } @@ -479,7 +483,7 @@ int32_t tqProcessVgCommittedInfoReq(STQ* pTq, SRpcMsg* pMsg) { } int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) { - int32_t code = 0; + int32_t code = 0; SMqPollReq req = {0}; if (tDeserializeSMqPollReq(pMsg->pCont, pMsg->contLen, &req) < 0) { tqError("tDeserializeSMqPollReq %d failed", pMsg->contLen); @@ -505,7 +509,6 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) { consumerId, vgId, req.subKey, pHandle->consumerId); taosRUnLockLatch(&pTq->lock); return TSDB_CODE_TMQ_CONSUMER_MISMATCH; - } int64_t sver = 0, ever = 0; @@ -612,8 +615,8 @@ int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg int32_t tqProcessAddCheckInfoReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) { STqCheckInfo info = {0}; - int32_t code = tqMetaDecodeCheckInfo(&info, msg, msgLen); - if(code != 0){ + int32_t code = tqMetaDecodeCheckInfo(&info, msg, msgLen); + if (code != 0) { return code; } @@ -650,7 +653,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg taosRLockLatch(&pTq->lock); STqHandle* pHandle = NULL; - (void)tqMetaGetHandle(pTq, req.subKey, &pHandle); //ignore return code + (void)tqMetaGetHandle(pTq, req.subKey, &pHandle); // ignore return code taosRUnLockLatch(&pTq->lock); if (pHandle == NULL) { if (req.oldConsumerId != -1) { @@ -697,7 +700,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg } } - end: +end: tDecoderClear(&dc); return ret; } @@ -705,7 +708,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg static void freePtr(void* ptr) { taosMemoryFree(*(void**)ptr); } int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessVer) { - STQ* pTq = (STQ*) pTqObj; + STQ* pTq = (STQ*)pTqObj; int32_t vgId = TD_VID(pTq->pVnode); tqDebug("s-task:0x%x start to build task", pTask->id.taskId); @@ -749,12 +752,12 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV } streamTaskResetUpstreamStageInfo(pTask); - (void) streamSetupScheduleTrigger(pTask); + (void)streamSetupScheduleTrigger(pTask); SCheckpointInfo* pChkInfo = &pTask->chkInfo; tqSetRestoreVersionInfo(pTask); - char* p = streamTaskGetStatus(pTask).name; + char* p = streamTaskGetStatus(pTask).name; const char* pNext = streamTaskGetStatusStr(pTask->status.taskStatus); if (pTask->info.fillHistory) { @@ -766,14 +769,13 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory, (int32_t)pTask->streamTaskId.taskId, pTask->info.delaySchedParam, nextProcessVer); } else { - tqInfo( - "vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 - " nextProcessVer:%" PRId64 - " child id:%d, level:%d, cur-status:%s next-status:%s fill-history:%d, related fill-task:0x%x delaySched:%" PRId64 - " ms, inputVer:%" PRId64, - vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, - pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory, - (int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam, nextProcessVer); + tqInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 + " nextProcessVer:%" PRId64 + " child id:%d, level:%d, cur-status:%s next-status:%s fill-history:%d, related fill-task:0x%x " + "delaySched:%" PRId64 " ms, inputVer:%" PRId64, + vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, + pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory, + (int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam, nextProcessVer); ASSERT(pChkInfo->checkpointVer <= pChkInfo->nextProcessVer); } @@ -781,8 +783,7 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV return 0; } -int32_t tqProcessTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) { - return tqStreamTaskProcessCheckReq(pTq->pStreamMeta, pMsg); } +int32_t tqProcessTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) { return tqStreamTaskProcessCheckReq(pTq->pStreamMeta, pMsg); } int32_t tqProcessTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg) { return tqStreamTaskProcessCheckRsp(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode)); @@ -803,13 +804,13 @@ static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask pTask->execInfo.step2Start = taosGetTimestampMs(); if (done) { - qDebug("s-task:%s scan wal(step 2) verRange:%" PRId64 "-%" PRId64 " ended, elapsed time:%.2fs", id, pStep2Range->minVer, - pStep2Range->maxVer, 0.0); + qDebug("s-task:%s scan wal(step 2) verRange:%" PRId64 "-%" PRId64 " ended, elapsed time:%.2fs", id, + pStep2Range->minVer, pStep2Range->maxVer, 0.0); int32_t code = streamTaskPutTranstateIntoInputQ(pTask); // todo: msg lost. if (code) { qError("s-task:%s failed put trans-state into inputQ, code:%s", id, tstrerror(code)); } - (void) streamExecTask(pTask); // exec directly + (void)streamExecTask(pTask); // exec directly } else { STimeWindow* pWindow = &pTask->dataRange.window; tqDebug("s-task:%s level:%d verRange:%" PRId64 "-%" PRId64 " window:%" PRId64 "-%" PRId64 @@ -830,12 +831,12 @@ static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask tqDebug("s-task:%s wal reader start scan WAL verRange:%" PRId64 "-%" PRId64 ", set sched-status:%d", id, dstVer, pStep2Range->maxVer, TASK_SCHED_STATUS__INACTIVE); - (void) streamTaskSetSchedStatusInactive(pTask); + (void)streamTaskSetSchedStatusInactive(pTask); // now the fill-history task starts to scan data from wal files. code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_SCANHIST_DONE); if (code == TSDB_CODE_SUCCESS) { - (void) tqScanWalAsync(pTq, false); + (void)tqScanWalAsync(pTq, false); } } } @@ -846,9 +847,9 @@ int32_t handleStep2Async(SStreamTask* pStreamTask, void* param) { SStreamMeta* pMeta = pStreamTask->pMeta; STaskId hId = pStreamTask->hTaskInfo.id; SStreamTask* pTask = NULL; - int32_t code = streamMetaAcquireTask(pStreamTask->pMeta, hId.streamId, hId.taskId, &pTask); + int32_t code = streamMetaAcquireTask(pStreamTask->pMeta, hId.streamId, hId.taskId, &pTask); if (pTask == NULL) { - tqWarn("s-task:0x%x failed to acquired it to exec step 2, scan wal quit", (int32_t) hId.taskId); + tqWarn("s-task:0x%x failed to acquired it to exec step 2, scan wal quit", (int32_t)hId.taskId); return TSDB_CODE_SUCCESS; } @@ -930,8 +931,8 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { if (retInfo.ret == TASK_SCANHISTORY_REXEC) { streamExecScanHistoryInFuture(pTask, retInfo.idleTime); } else { - SStreamTaskState p = streamTaskGetStatus(pTask); - ETaskStatus s = p.state; + SStreamTaskState p = streamTaskGetStatus(pTask); + ETaskStatus s = p.state; if (s == TASK_STATUS__PAUSE) { tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs total:%.2fs, sched-status:%d", pTask->id.idStr, @@ -963,7 +964,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { atomic_store_32(&pTask->status.inScanHistorySentinel, 0); streamMetaReleaseTask(pMeta, pTask); - return code; // todo: handle failure + return code; // todo: handle failure } ASSERT(pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE); @@ -988,7 +989,7 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) { // let's continue scan data in the wal files if (code == 0 && (pReq->reqType >= 0 || pReq->reqType == STREAM_EXEC_T_RESUME_TASK)) { - (void) tqScanWalAsync(pTq, false); // it's ok to failed + (void)tqScanWalAsync(pTq, false); // it's ok to failed } return code; @@ -1026,11 +1027,9 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) { return tqStreamTaskProcessRetrieveReq(pTq->pStreamMeta, pMsg); } -int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg) { - return 0; -} +int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg) { return 0; } -int32_t tqStreamProgressRetrieveReq(STQ *pTq, SRpcMsg *pMsg) { +int32_t tqStreamProgressRetrieveReq(STQ* pTq, SRpcMsg* pMsg) { char* msgStr = pMsg->pCont; char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead)); int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); @@ -1092,18 +1091,18 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) tqError("vgId:%d failed to decode checkpoint-source msg, code:%s", vgId, tstrerror(code)); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - tmsgSendRsp(&rsp); // error occurs - return TSDB_CODE_SUCCESS; // always return success to mnode, todo: handle failure of build and send msg to mnode + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + tmsgSendRsp(&rsp); // error occurs + return TSDB_CODE_SUCCESS; // always return success to mnode, todo: handle failure of build and send msg to mnode } tDecoderClear(&decoder); if (!vnodeIsRoleLeader(pTq->pVnode)) { tqDebug("vgId:%d not leader, ignore checkpoint-source msg, s-task:0x%x", vgId, req.taskId); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - tmsgSendRsp(&rsp); // error occurs - return TSDB_CODE_SUCCESS; // always return success to mnode, todo: handle failure of build and send msg to mnode + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + tmsgSendRsp(&rsp); // error occurs + return TSDB_CODE_SUCCESS; // always return success to mnode, todo: handle failure of build and send msg to mnode } if (!pTq->pVnode->restored) { @@ -1111,9 +1110,9 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) ", transId:%d s-task:0x%x ignore it", vgId, req.checkpointId, req.transId, req.taskId); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - tmsgSendRsp(&rsp); // error occurs - return TSDB_CODE_SUCCESS; // always return success to mnode, , todo: handle failure of build and send msg to mnode + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + tmsgSendRsp(&rsp); // error occurs + return TSDB_CODE_SUCCESS; // always return success to mnode, , todo: handle failure of build and send msg to mnode } SStreamTask* pTask = NULL; @@ -1123,7 +1122,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) " transId:%d it may have been destroyed", vgId, req.taskId, req.checkpointId, req.transId); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); tmsgSendRsp(&rsp); // error occurs return TSDB_CODE_SUCCESS; } @@ -1136,9 +1135,9 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) streamMetaReleaseTask(pMeta, pTask); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); - tmsgSendRsp(&rsp); // error occurs - return TSDB_CODE_SUCCESS; // todo retry handle error + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + tmsgSendRsp(&rsp); // error occurs + return TSDB_CODE_SUCCESS; // todo retry handle error } // todo save the checkpoint failed info @@ -1154,14 +1153,14 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) streamMetaReleaseTask(pMeta, pTask); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); tmsgSendRsp(&rsp); // error occurs return TSDB_CODE_SUCCESS; } } else { if (status != TASK_STATUS__HALT) { tqError("s-task:%s should in halt status, let's halt it directly", pTask->id.idStr); -// streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_HALT); + // streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_HALT); } } @@ -1178,16 +1177,17 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) streamMetaReleaseTask(pMeta, pTask); return TSDB_CODE_SUCCESS; - } else { // checkpoint already finished, and not in checkpoint status + } else { // checkpoint already finished, and not in checkpoint status if (req.checkpointId <= pTask->chkInfo.checkpointId) { tqWarn("s-task:%s repeatly recv checkpoint-source msg checkpointId:%" PRId64 - " transId:%d already handled, return success", pTask->id.idStr, req.checkpointId, req.transId); + " transId:%d already handled, return success", + pTask->id.idStr, req.checkpointId, req.transId); streamMutexUnlock(&pTask->lock); streamMetaReleaseTask(pMeta, pTask); SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); tmsgSendRsp(&rsp); // error occurs return TSDB_CODE_SUCCESS; @@ -1198,7 +1198,8 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) streamMutexUnlock(&pTask->lock); if (code) { - qError("s-task:%s (vgId:%d) failed to process checkpoint-source req, code:%s", pTask->id.idStr, vgId, tstrerror(code)); + qError("s-task:%s (vgId:%d) failed to process checkpoint-source req, code:%s", pTask->id.idStr, vgId, + tstrerror(code)); return code; } @@ -1215,7 +1216,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) code = streamAddCheckpointSourceRspMsg(&req, &pMsg->info, pTask); if (code != TSDB_CODE_SUCCESS) { SRpcMsg rsp = {0}; - (void) streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); + (void)streamTaskBuildCheckpointSourceRsp(&req, &pMsg->info, &rsp, TSDB_CODE_SUCCESS); tmsgSendRsp(&rsp); // error occurs return TSDB_CODE_SUCCESS; } @@ -1228,7 +1229,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) int32_t tqProcessTaskCheckpointReadyMsg(STQ* pTq, SRpcMsg* pMsg) { int32_t vgId = TD_VID(pTq->pVnode); - SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*) pMsg->pCont; + SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*)pMsg->pCont; if (!vnodeIsRoleLeader(pTq->pVnode)) { tqError("vgId:%d not leader, ignore the retrieve checkpoint-trigger msg from 0x%x", vgId, (int32_t)pReq->downstreamTaskId); @@ -1249,7 +1250,7 @@ int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg) { int32_t tqProcessTaskRetrieveTriggerReq(STQ* pTq, SRpcMsg* pMsg) { int32_t vgId = TD_VID(pTq->pVnode); - SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*) pMsg->pCont; + SRetrieveChkptTriggerReq* pReq = (SRetrieveChkptTriggerReq*)pMsg->pCont; if (!vnodeIsRoleLeader(pTq->pVnode)) { tqError("vgId:%d not leader, ignore the retrieve checkpoint-trigger msg from 0x%x", vgId, (int32_t)pReq->downstreamTaskId); @@ -1264,9 +1265,7 @@ int32_t tqProcessTaskRetrieveTriggerRsp(STQ* pTq, SRpcMsg* pMsg) { } // this function is needed, do not try to remove it. -int32_t tqProcessStreamHbRsp(STQ* pTq, SRpcMsg* pMsg) { - return tqStreamProcessStreamHbRsp(pTq->pStreamMeta, pMsg); -} +int32_t tqProcessStreamHbRsp(STQ* pTq, SRpcMsg* pMsg) { return tqStreamProcessStreamHbRsp(pTq->pStreamMeta, pMsg); } int32_t tqProcessStreamReqCheckpointRsp(STQ* pTq, SRpcMsg* pMsg) { return tqStreamProcessReqCheckpointRsp(pTq->pStreamMeta, pMsg); diff --git a/source/dnode/vnode/src/vnd/vnodeModule.c b/source/dnode/vnode/src/vnd/vnodeModule.c index 8b7de7058c..709bfa19bc 100644 --- a/source/dnode/vnode/src/vnd/vnodeModule.c +++ b/source/dnode/vnode/src/vnd/vnodeModule.c @@ -18,13 +18,13 @@ static volatile int32_t VINIT = 0; -int vnodeInit(int nthreads) { +int vnodeInit(int nthreads, StopDnodeFp stopDnodeFp) { if (atomic_val_compare_exchange_32(&VINIT, 0, 1)) { return 0; } TAOS_CHECK_RETURN(vnodeAsyncOpen(nthreads)); - TAOS_CHECK_RETURN(walInit()); + TAOS_CHECK_RETURN(walInit(stopDnodeFp)); return 0; } diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index d295e868e9..48adb22927 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -856,8 +856,6 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* de extern void doDestroyExchangeOperatorInfo(void* param); -int32_t doFilterImpl(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo, - SColumnInfoData** pResCol); int32_t doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo); int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int32_t numOfExpr, SSDataBlock* pBlock, int32_t rows, SExecTaskInfo* pTask, STableMetaCacheInfo* pCache); diff --git a/source/libs/executor/src/dynqueryctrloperator.c b/source/libs/executor/src/dynqueryctrloperator.c index 5359cc0980..02932cd278 100644 --- a/source/libs/executor/src/dynqueryctrloperator.c +++ b/source/libs/executor/src/dynqueryctrloperator.c @@ -966,13 +966,14 @@ int32_t createDynQueryCtrlOperatorInfo(SOperatorInfo** pDownstream, int32_t numO int32_t code = TSDB_CODE_SUCCESS; __optr_fn_t nextFp = NULL; + SOperatorInfo* pOperator = NULL; SDynQueryCtrlOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SDynQueryCtrlOperatorInfo)); if (pInfo == NULL) { code = terrno; goto _error; } - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pOperator == NULL) { code = terrno; goto _error; diff --git a/source/libs/executor/src/mergejoinoperator.c b/source/libs/executor/src/mergejoinoperator.c index 2c485cdd1b..946a1d2aa5 100644 --- a/source/libs/executor/src/mergejoinoperator.c +++ b/source/libs/executor/src/mergejoinoperator.c @@ -1863,13 +1863,14 @@ int32_t createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDo bool newDownstreams = false; int32_t code = TSDB_CODE_SUCCESS; + SOperatorInfo* pOperator = NULL; SMJoinOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SMJoinOperatorInfo)); if (pInfo == NULL) { code = terrno; goto _return; } - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pOperator == NULL) { code = terrno; goto _return; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 5984f75c05..4d8680918a 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -279,21 +279,20 @@ static int32_t doLoadBlockSMA(STableScanBase* pTableScanInfo, SSDataBlock* pBloc return code; } -static void doSetTagColumnData(STableScanBase* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, - int32_t rows) { - if (pTableScanInfo->pseudoSup.numOfExprs > 0) { - SExprSupp* pSup = &pTableScanInfo->pseudoSup; - - int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pBlock, rows, - pTaskInfo, &pTableScanInfo->metaCache); +static int32_t doSetTagColumnData(STableScanBase* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, + int32_t rows) { + int32_t code = 0; + SExprSupp* pSup = &pTableScanInfo->pseudoSup; + if (pSup->numOfExprs > 0) { + code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pBlock, rows, + pTaskInfo, &pTableScanInfo->metaCache); // ignore the table not exists error, since this table may have been dropped during the scan procedure. - if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_PAR_TABLE_NOT_EXIST) { - T_LONG_JMP(pTaskInfo->env, code); + if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) { + code = 0; } - - // reset the error code. - terrno = 0; } + + return code; } bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) { @@ -373,10 +372,10 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64 ", uid:%" PRIu64, GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, pBlockInfo->id.uid); - doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); + code = doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); pCost->skipBlocks += 1; pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->dataReader); - return TSDB_CODE_SUCCESS; + return code; } else if (*status == FUNC_DATA_REQUIRED_SMA_LOAD) { pCost->loadBlockStatis += 1; loadSMA = true; // mark the operation of load sma; @@ -391,9 +390,9 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca if (success) { // failed to load the block sma data, data block statistics does not exist, load data block instead qDebug("%s data block SMA loaded, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64, GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); - doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); + code = doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->dataReader); - return TSDB_CODE_SUCCESS; + return code; } else { qDebug("%s failed to load SMA, since not all columns have SMA", GET_TASKID(pTaskInfo)); *status = FUNC_DATA_REQUIRED_DATA_LOAD; @@ -469,7 +468,10 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca return code; } - doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); + code = doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows); + if (code) { + return code; + } // restore the previous value pCost->totalRows -= pBlock->info.rows; @@ -908,7 +910,8 @@ static SSDataBlock* getOneRowResultBlock(SExecTaskInfo* pTaskInfo, STableScanBas } // set tag/tbname - doSetTagColumnData(pBase, pBlock, pTaskInfo, 1); + terrno = doSetTagColumnData(pBase, pBlock, pTaskInfo, 1); + return pBlock; } @@ -1629,7 +1632,9 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU code = pAPI->tsdReader.tsdReaderRetrieveDataBlock(pReader, &p, NULL); QUERY_CHECK_CODE(code, lino, _end); - doSetTagColumnData(&pTableScanInfo->base, pBlock, pTaskInfo, pBlock->info.rows); + code = doSetTagColumnData(&pTableScanInfo->base, pBlock, pTaskInfo, pBlock->info.rows); + QUERY_CHECK_CODE(code, lino, _end); + pBlock->info.id.groupId = tableListGetTableGroupId(pTableScanInfo->base.pTableListInfo, pBlock->info.id.uid); } @@ -2758,12 +2763,16 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock // currently only the tbname pseudo column if (pInfo->numOfPseudoExpr > 0) { - int32_t tmpCode = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, + code = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes, pBlockInfo->rows, pTaskInfo, &pTableScanInfo->base.metaCache); // ignore the table not exists error, since this table may have been dropped during the scan procedure. - if (tmpCode != TSDB_CODE_SUCCESS && tmpCode != TSDB_CODE_PAR_TABLE_NOT_EXIST) { + if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) { + code = 0; + } + + if (code) { blockDataFreeRes((SSDataBlock*)pBlock); - T_LONG_JMP(pTaskInfo->env, code); + QUERY_CHECK_CODE(code, lino, _end); } // reset the error code. diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index a4f32356c6..f81205df7a 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -2668,8 +2668,8 @@ int32_t weekdayFunctionImpl(SScalarParam *pInput, int32_t inputNum, SScalarParam } struct STm tm; TAOS_CHECK_RETURN(taosTs2Tm(timeVal, timePrec, &tm)); - int32_t ret = startFromZero ? (tm.tm.tm_wday + 6) % 7 : tm.tm.tm_wday + 1; - SCL_ERR_RET(colDataSetVal(pOutput->columnData, i, (const char*)&ret, false)); + int64_t ret = startFromZero ? (tm.tm.tm_wday + 6) % 7 : tm.tm.tm_wday + 1; + colDataSetInt64(pOutput->columnData, i, &ret); } pOutput->numOfRows = pInput->numOfRows; @@ -2778,8 +2778,8 @@ int32_t weekFunctionImpl(SScalarParam *pInput, int32_t inputNum, SScalarParam *p } struct STm tm; SCL_ERR_RET(taosTs2Tm(timeVal, prec, &tm)); - int32_t ret = calculateWeekNum(tm.tm, weekMode(mode)); - SCL_ERR_RET(colDataSetVal(pOutput->columnData, i, (const char*)&ret, false)); + int64_t ret = calculateWeekNum(tm.tm, weekMode(mode)); + colDataSetInt64(pOutput->columnData, i, &ret); } pOutput->numOfRows = pInput->numOfRows; diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 5628095973..b99c9f5f65 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -14,6 +14,8 @@ */ #include "executor.h" +#include "osDir.h" +#include "osMemory.h" #include "streamInt.h" #include "streamsm.h" #include "tmisce.h" @@ -30,7 +32,7 @@ static int32_t addToTaskset(SArray* pArray, SStreamTask* pTask) { int32_t childId = taosArrayGetSize(pArray); pTask->info.selfChildId = childId; void* p = taosArrayPush(pArray, &pTask); - return (p == NULL)? TSDB_CODE_OUT_OF_MEMORY:TSDB_CODE_SUCCESS; + return (p == NULL) ? TSDB_CODE_OUT_OF_MEMORY : TSDB_CODE_SUCCESS; } static int32_t doUpdateTaskEpset(SStreamTask* pTask, int32_t nodeId, SEpSet* pEpSet, bool* pUpdated) { @@ -42,7 +44,7 @@ static int32_t doUpdateTaskEpset(SStreamTask* pTask, int32_t nodeId, SEpSet* pEp if (!isEqual) { (*pUpdated) = true; char tmp[512] = {0}; - (void) epsetToStr(&pTask->info.epSet, tmp, tListLen(tmp)); // only for log file, ignore errors + (void)epsetToStr(&pTask->info.epSet, tmp, tListLen(tmp)); // only for log file, ignore errors epsetAssign(&pTask->info.epSet, pEpSet); stDebug("s-task:0x%x (vgId:%d) self node epset is updated %s, old:%s", pTask->id.taskId, nodeId, buf, tmp); @@ -92,7 +94,7 @@ static SStreamUpstreamEpInfo* createStreamTaskEpInfo(const SStreamTask* pTask) { } int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam, - SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, SStreamTask** p) { + SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, SStreamTask** p) { *p = NULL; SStreamTask* pTask = (SStreamTask*)taosMemoryCalloc(1, sizeof(SStreamTask)); @@ -224,17 +226,17 @@ void tFreeStreamTask(SStreamTask* pTask) { } if (pTask->schedInfo.pDelayTimer != NULL) { - (void) taosTmrStop(pTask->schedInfo.pDelayTimer); + (void)taosTmrStop(pTask->schedInfo.pDelayTimer); pTask->schedInfo.pDelayTimer = NULL; } if (pTask->hTaskInfo.pTimer != NULL) { - (void) taosTmrStop(pTask->hTaskInfo.pTimer); + (void)taosTmrStop(pTask->hTaskInfo.pTimer); pTask->hTaskInfo.pTimer = NULL; } if (pTask->msgInfo.pRetryTmr != NULL) { - (void) taosTmrStop(pTask->msgInfo.pRetryTmr); + (void)taosTmrStop(pTask->msgInfo.pRetryTmr); pTask->msgInfo.pRetryTmr = NULL; } @@ -296,15 +298,6 @@ void tFreeStreamTask(SStreamTask* pTask) { taosArrayDestroy(pTask->outputInfo.pNodeEpsetUpdateList); pTask->outputInfo.pNodeEpsetUpdateList = NULL; - // if ((pTask->status.removeBackendFiles) && (pTask->pMeta != NULL)) { - // char* path = taosMemoryCalloc(1, strlen(pTask->pMeta->path) + 128); - // sprintf(path, "%s%s%s", pTask->pMeta->path, TD_DIRSEP, pTask->id.idStr); - // taosRemoveDir(path); - - // stInfo("s-task:0x%x vgId:%d remove all backend files:%s", taskId, pTask->pMeta->vgId, path); - // taosMemoryFree(path); - // } - if (pTask->id.idStr != NULL) { taosMemoryFree((void*)pTask->id.idStr); } @@ -321,10 +314,21 @@ void streamFreeTaskState(SStreamTask* pTask, int8_t remove) { stDebug("s-task:0x%x start to free task state", pTask->id.taskId); streamStateClose(pTask->pState, remove); - if (remove)taskDbSetClearFileFlag(pTask->pBackend); + if (remove) taskDbSetClearFileFlag(pTask->pBackend); taskDbRemoveRef(pTask->pBackend); pTask->pBackend = NULL; pTask->pState = NULL; + } else { + if (remove) { + if (pTask->backendPath != NULL) { + taosRemoveDir(pTask->backendPath); + } + } + } + + if (pTask->backendPath != NULL) { + taosMemoryFree(pTask->backendPath); + pTask->backendPath = NULL; } } @@ -364,8 +368,36 @@ static void setInitialVersionInfo(SStreamTask* pTask, int64_t ver) { } } +int32_t streamTaskSetBackendPath(SStreamTask* pTask) { + int64_t streamId = 0; + int32_t taskId = 0; + + if (pTask->info.fillHistory) { + streamId = pTask->hTaskInfo.id.taskId; + taskId = pTask->hTaskInfo.id.taskId; + } else { + streamId = pTask->streamTaskId.taskId; + taskId = pTask->streamTaskId.taskId; + } + + char id[128] = {0}; + int32_t nBytes = sprintf(id, "0x%" PRIx64 "-0x%x", streamId, taskId); + if (nBytes < 0 || nBytes >= sizeof(id)) { + return TSDB_CODE_OUT_OF_BUFFER; + } + + int32_t len = strlen(pTask->pMeta->path); + pTask->backendPath = (char*)taosMemoryMalloc(len + nBytes + 2); + if (pTask->backendPath == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + (void)sprintf(pTask->backendPath, "%s%s%s", pTask->pMeta->path, TD_DIRSEP, id); + + return 0; +} int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver) { - (void) createStreamTaskIdStr(pTask->id.streamId, pTask->id.taskId, &pTask->id.idStr); + (void)createStreamTaskIdStr(pTask->id.streamId, pTask->id.taskId, &pTask->id.idStr); pTask->refCnt = 1; pTask->inputq.status = TASK_INPUT_STATUS__NORMAL; @@ -459,10 +491,14 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i } if (pTask->chkInfo.pActiveInfo == NULL) { - code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); + code = streamTaskCreateActiveChkptInfo(&pTask->chkInfo.pActiveInfo); + if (code) { + stError("s-task:%s failed to create active checkpoint info, code:%s", pTask->id.idStr, tstrerror(code)); + return code; + } } - return code; + return streamTaskSetBackendPath(pTask); } int32_t streamTaskGetNumOfDownstream(const SStreamTask* pTask) { @@ -494,12 +530,12 @@ int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstre } void* p = taosArrayPush(pTask->upstreamInfo.pList, &pEpInfo); - return (p == NULL)? TSDB_CODE_OUT_OF_MEMORY:TSDB_CODE_SUCCESS; + return (p == NULL) ? TSDB_CODE_OUT_OF_MEMORY : TSDB_CODE_SUCCESS; } void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet, bool* pUpdated) { char buf[512] = {0}; - (void) epsetToStr(pEpSet, buf, tListLen(buf)); // ignore error since it is only for log file. + (void)epsetToStr(pEpSet, buf, tListLen(buf)); // ignore error since it is only for log file. int32_t numOfUpstream = taosArrayGetSize(pTask->upstreamInfo.pList); for (int32_t i = 0; i < numOfUpstream; ++i) { @@ -510,7 +546,7 @@ void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpS *pUpdated = true; char tmp[512] = {0}; - (void) epsetToStr(&pInfo->epSet, tmp, tListLen(tmp)); + (void)epsetToStr(&pInfo->epSet, tmp, tListLen(tmp)); epsetAssign(&pInfo->epSet, pEpSet); stDebug("s-task:0x%x update the upstreamInfo taskId:0x%x(nodeId:%d) newEpset:%s old:%s", pTask->id.taskId, @@ -545,7 +581,7 @@ void streamTaskSetFixedDownstreamInfo(SStreamTask* pTask, const SStreamTask* pDo void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet, bool* pUpdated) { char buf[512] = {0}; - (void) epsetToStr(pEpSet, buf, tListLen(buf)); // ignore the error since only for log files. + (void)epsetToStr(pEpSet, buf, tListLen(buf)); // ignore the error since only for log files. int32_t id = pTask->id.taskId; int8_t type = pTask->outputInfo.type; @@ -564,7 +600,7 @@ void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SE if (!isEqual) { *pUpdated = true; char tmp[512] = {0}; - (void) epsetToStr(&pVgInfo->epSet, tmp, tListLen(tmp)); + (void)epsetToStr(&pVgInfo->epSet, tmp, tListLen(tmp)); epsetAssign(&pVgInfo->epSet, pEpSet); stDebug("s-task:0x%x update dispatch info, task:0x%x(nodeId:%d) newEpset:%s old:%s", id, pVgInfo->taskId, @@ -584,7 +620,7 @@ void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SE *pUpdated = true; char tmp[512] = {0}; - (void) epsetToStr(&pDispatcher->epSet, tmp, tListLen(tmp)); + (void)epsetToStr(&pDispatcher->epSet, tmp, tListLen(tmp)); epsetAssign(&pDispatcher->epSet, pEpSet); stDebug("s-task:0x%x update dispatch info, task:0x%x(nodeId:%d) newEpset:%s old:%s", id, pDispatcher->taskId, @@ -919,7 +955,7 @@ STaskStatusEntry streamTaskGetStatusEntry(SStreamTask* pTask) { static int32_t taskPauseCallback(SStreamTask* pTask, void* param) { SStreamMeta* pMeta = pTask->pMeta; - int32_t code = 0; + int32_t code = 0; int32_t num = atomic_add_fetch_32(&pMeta->numOfPausedTasks, 1); stInfo("vgId:%d s-task:%s pause stream task. paused task num:%d", pMeta->vgId, pTask->id.idStr, num); @@ -935,7 +971,7 @@ static int32_t taskPauseCallback(SStreamTask* pTask, void* param) { } void streamTaskPause(SStreamTask* pTask) { - (void) streamTaskHandleEventAsync(pTask->status.pSM, TASK_EVENT_PAUSE, taskPauseCallback, NULL); + (void)streamTaskHandleEventAsync(pTask->status.pSM, TASK_EVENT_PAUSE, taskPauseCallback, NULL); } void streamTaskResume(SStreamTask* pTask) { @@ -1142,13 +1178,13 @@ void streamTaskDestroyActiveChkptInfo(SActiveCheckpointInfo* pInfo) { SStreamTmrInfo* pTriggerTmr = &pInfo->chkptTriggerMsgTmr; if (pTriggerTmr->tmrHandle != NULL) { - (void) taosTmrStop(pTriggerTmr->tmrHandle); + (void)taosTmrStop(pTriggerTmr->tmrHandle); pTriggerTmr->tmrHandle = NULL; } SStreamTmrInfo* pReadyTmr = &pInfo->chkptReadyMsgTmr; if (pReadyTmr->tmrHandle != NULL) { - (void) taosTmrStop(pReadyTmr->tmrHandle); + (void)taosTmrStop(pReadyTmr->tmrHandle); pReadyTmr->tmrHandle = NULL; } @@ -1187,4 +1223,4 @@ const char* streamTaskGetExecType(int32_t type) { default: return "invalid-exec-type"; } -} \ No newline at end of file +} diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index fd1d3e371e..5465007b18 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -788,9 +788,11 @@ int32_t syncCheckMember(int64_t rid) { } if (pSyncNode->myNodeInfo.nodeRole == TAOS_SYNC_ROLE_LEARNER) { + syncNodeRelease(pSyncNode); return TSDB_CODE_SYN_WRONG_ROLE; } + syncNodeRelease(pSyncNode); return 0; } @@ -2446,6 +2448,7 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) { (void)taosTmrReset(syncNodeEqPingTimer, pNode->pingTimerMS, (void*)pNode->rid, syncEnv()->pTimerManager, &pNode->pPingTimer); } + syncNodeRelease(pNode); } static void syncNodeEqElectTimer(void* param, void* tmrId) { diff --git a/source/libs/wal/src/walMgmt.c b/source/libs/wal/src/walMgmt.c index 9da3207471..581a63671c 100644 --- a/source/libs/wal/src/walMgmt.c +++ b/source/libs/wal/src/walMgmt.c @@ -26,6 +26,7 @@ typedef struct { uint32_t seq; int32_t refSetId; TdThread thread; + stopDnodeFn stopDnode; } SWalMgmt; static SWalMgmt tsWal = {0, .seq = 1}; @@ -35,7 +36,7 @@ static void walFreeObj(void *pWal); int64_t walGetSeq() { return (int64_t)atomic_load_32((volatile int32_t *)&tsWal.seq); } -int32_t walInit() { +int32_t walInit(stopDnodeFn stopDnode) { int8_t old; while (1) { old = atomic_val_compare_exchange_8(&tsWal.inited, 0, 2); @@ -57,6 +58,11 @@ int32_t walInit() { atomic_store_8(&tsWal.inited, 1); } + if (stopDnode == NULL) { + wWarn("failed to set stop dnode call back"); + } + tsWal.stopDnode = stopDnode; + return 0; } @@ -164,6 +170,8 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) { goto _err; } + pWal->stopDnode = tsWal.stopDnode; + wDebug("vgId:%d, wal:%p is opened, level:%d fsyncPeriod:%d", pWal->cfg.vgId, pWal, pWal->cfg.level, pWal->cfg.fsyncPeriod); return pWal; diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index dc3b2df52c..9979ddd0b0 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -525,6 +525,11 @@ static int32_t walWriteIndex(SWal *pWal, int64_t ver, int64_t offset) { if (size != sizeof(SWalIdxEntry)) { wError("vgId:%d, failed to write idx entry due to %s. ver:%" PRId64, pWal->cfg.vgId, strerror(errno), ver); + if (pWal->stopDnode != NULL) { + wWarn("vgId:%d, set stop dnode flag", pWal->cfg.vgId); + pWal->stopDnode(); + } + TAOS_RETURN(TAOS_SYSTEM_ERROR(errno)); } @@ -571,6 +576,11 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy wError("vgId:%d, file:%" PRId64 ".log, failed to write since %s", pWal->cfg.vgId, walGetLastFileFirstVer(pWal), strerror(errno)); + if (pWal->stopDnode != NULL) { + wWarn("vgId:%d, set stop dnode flag", pWal->cfg.vgId); + pWal->stopDnode(); + } + TAOS_CHECK_GOTO(code, &lino, _exit); } @@ -627,6 +637,11 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy taosMemoryFreeClear(newBodyEncrypted); } + if (pWal->stopDnode != NULL) { + wWarn("vgId:%d, set stop dnode flag", pWal->cfg.vgId); + pWal->stopDnode(); + } + TAOS_CHECK_GOTO(code, &lino, _exit); } diff --git a/source/libs/wal/test/walMetaTest.cpp b/source/libs/wal/test/walMetaTest.cpp index c7e83e7c86..8bd4de0a89 100644 --- a/source/libs/wal/test/walMetaTest.cpp +++ b/source/libs/wal/test/walMetaTest.cpp @@ -12,7 +12,7 @@ SWalSyncInfo syncMeta = {0}; class WalCleanEnv : public ::testing::Test { protected: static void SetUpTestCase() { - int code = walInit(); + int code = walInit(NULL); ASSERT(code == 0); } @@ -44,7 +44,7 @@ class WalCleanEnv : public ::testing::Test { class WalCleanDeleteEnv : public ::testing::Test { protected: static void SetUpTestCase() { - int code = walInit(); + int code = walInit(NULL); ASSERT(code == 0); } @@ -74,7 +74,7 @@ class WalCleanDeleteEnv : public ::testing::Test { class WalKeepEnv : public ::testing::Test { protected: static void SetUpTestCase() { - int code = walInit(); + int code = walInit(NULL); ASSERT(code == 0); } @@ -111,7 +111,7 @@ class WalKeepEnv : public ::testing::Test { class WalRetentionEnv : public ::testing::Test { protected: static void SetUpTestCase() { - int code = walInit(); + int code = walInit(NULL); ASSERT(code == 0); }