diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..b873e47b74 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,28 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.3.0 + hooks: + - id: check-yaml + - id: check-json + - id: end-of-file-fixer + - id: trailing-whitespace + +repos: + - repo: https://github.com/psf/black + rev: stable + hooks: + - id: black + +repos: + - repo: https://github.com/pocc/pre-commit-hooks + rev: master + hooks: + - id: cppcheck + args: ["--error-exitcode=0"] + +repos: + - repo: https://github.com/crate-ci/typos + rev: v1.15.7 + hooks: + - id: typos + diff --git a/cmake/cmake.version b/cmake/cmake.version index edc51f206c..fe35fbe7bd 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.0.6.0.alpha") + SET(TD_VER_NUMBER "3.1.0.0.alpha") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx index 3731882fb2..4d1b67e451 100644 --- a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx +++ b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx @@ -33,7 +33,7 @@ The below SQL statement is used to insert one row into table "d1001". INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31); ``` -`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). +`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). ### Insert Multiple Rows @@ -43,7 +43,7 @@ Multiple rows can be inserted in a single SQL statement. The example below inser INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25); ``` -`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). +`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). ### Insert into Multiple Tables @@ -53,7 +53,7 @@ Data can be inserted into multiple tables in the same SQL statement. The example INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31); ``` -`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). +`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index 578f38e73d..f5e0378a00 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -244,6 +244,8 @@ The following SQL statement creates a topic in TDengine: CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1; ``` +- There is an upper limit to the number of topics created, controlled by the parameter tmqMaxTopicNum, with a default of 20 + Multiple subscription types are supported. #### Subscribe to a Column @@ -265,14 +267,15 @@ You can subscribe to a topic through a SELECT statement. Statements that specify Syntax: ```sql -CREATE TOPIC topic_name AS STABLE stb_name +CREATE TOPIC topic_name [with meta] AS STABLE stb_name [where_condition] ``` Creating a topic in this manner differs from a `SELECT * from stbName` statement as follows: - The table schema can be modified. - Unstructured data is returned. The format of the data returned changes based on the supertable schema. -- A different table schema may exist for every data block to be processed. +- The 'with meta' parameter is optional. When selected, statements such as creating super tables and sub tables will be returned, mainly used for Taosx to perform super table migration +- The 'where_condition' parameter is optional and will be used to filter and subscribe to sub tables that meet the criteria. Where conditions cannot have ordinary columns, only tags or tbnames. Functions can be used in where conditions to filter tags, but cannot be aggregate functions because sub table tag values cannot be aggregated. It can also be a constant expression, such as 2>1 (subscribing to all child tables), Or false (subscribe to 0 sub tables) - The data returned does not include tags. ### Subscribe to a Database @@ -280,10 +283,12 @@ Creating a topic in this manner differs from a `SELECT * from stbName` statement Syntax: ```sql -CREATE TOPIC topic_name [WITH META] AS DATABASE db_name; +CREATE TOPIC topic_name [with meta] AS DATABASE db_name; ``` -This SQL statement creates a subscription to all tables in the database. You can add the `WITH META` parameter to include schema changes in the subscription, including creating and deleting supertables; adding, deleting, and modifying columns; and creating, deleting, and modifying the tags of subtables. Consumers can determine the message type from the API. Note that this differs from Kafka. +This SQL statement creates a subscription to all tables in the database. + +- The 'with meta' parameter is optional. When selected, it will return statements for creating all super tables and sub tables in the database, mainly used for Taosx database migration ## Create a Consumer @@ -295,7 +300,7 @@ You configure the following parameters when creating a consumer: | `td.connect.user` | string | User Name | | | `td.connect.pass` | string | Password | | | `td.connect.port` | string | Port of the server side | | -| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. | +| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. Each topic can create up to 100 consumer groups. | | `client.id` | string | Client ID | Maximum length: 192. | | `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) | | `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true | diff --git a/docs/en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md index 825d3c6f8b..5137e35c0a 100644 --- a/docs/en/07-develop/09-udf.md +++ b/docs/en/07-develop/09-udf.md @@ -17,7 +17,7 @@ When you create a user-defined function, you must implement standard interface f - For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions. - To initialize your function, implement the `udf_init` function. To terminate your function, implement the `udf_destroy` function. -There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be _start, _finish, _init, and _destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function. +There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be `_start`, `_finish`, `_init`, and `_destroy`, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function. ### Implementing a Scalar Function in C The implementation of a scalar function is described as follows: @@ -318,7 +318,7 @@ The implementation of a scalar UDF is described as follows: def process(input: datablock) -> tuple[output_type]: ``` -Description: this function prcesses datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype +Description: this function processes datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype #### Aggregate UDF Interface @@ -356,7 +356,7 @@ def process(input: datablock) -> tuple[output_type]: # return tuple object consisted of object of type outputtype ``` -Note:process() must be implemeted, init() and destroy() must be defined too but they can do nothing. +Note:process() must be implemented, init() and destroy() must be defined too but they can do nothing. #### Aggregate Template @@ -377,7 +377,7 @@ def finish(buf: bytes) -> output_type: #return obj of type outputtype ``` -Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be impemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`. +Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be implemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`. ### Data Mapping between TDengine SQL and Python UDF @@ -559,7 +559,7 @@ Note: Prior to TDengine 3.0.5.0 (excluding), updating a UDF requires to restart #### Sample 3: UDF with n arguments -A UDF which accepts n intergers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py: +A UDF which accepts n integers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py: ```python def init(): @@ -607,7 +607,7 @@ Query OK, 4 row(s) in set (0.010653s) #### Sample 4: Utilize 3rd party package -A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firslty. +A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firstly. ```shell pip3 install moment @@ -701,7 +701,7 @@ Query OK, 4 row(s) in set (1.011474s) #### Sample 5: Aggregate Function -An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For exmaple, assume the code is in `/root/udf/myspread.py`. +An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For example, assume the code is in `/root/udf/myspread.py`. ```python import io @@ -755,7 +755,7 @@ In this example, we implemented an aggregate function, and added some logging. 2. log() is the function for logging, it converts the input object to string and output with an end of line 3. destroy() closes the log file \ 4. start() returns the initial buffer for storing the intermediate result -5. reduce() processes each daa block and aggregates the result +5. reduce() processes each data block and aggregates the result 6. finish() converts the final buffer() to final result\ Create the UDF. diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index b517bcb3cc..afc1581c22 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r ELAPSED(ts_primary_key [, time_unit]) ``` -**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. +**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` clause, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. **Return value type**: Double if the input value is not NULL; @@ -999,18 +999,14 @@ SAMPLE(expr, k) **Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000]. -**Return value type**: Same as the column being operated plus the associated timestamp +**Return value type**: Same as the column being operated -**Applicable data types**: Any data type except for tags of STable +**Applicable data types**: Any data type **Applicable nested query**: Inner query and Outer query **Applicable table types**: standard tables and supertables -**More explanations**: - -- This function cannot be used in expression calculation. - ### TAIL @@ -1055,11 +1051,11 @@ TOP(expr, k) UNIQUE(expr) ``` -**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used. +**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword. **Return value type**:Same as the data type of the column being operated upon -**Applicable column types**: Any data types except for timestamp +**Applicable column types**: Any data types **Applicable table types**: table, STable diff --git a/docs/en/12-taos-sql/12-distinguished.md b/docs/en/12-taos-sql/12-distinguished.md index b082f7b888..7f0b8c7769 100644 --- a/docs/en/12-taos-sql/12-distinguished.md +++ b/docs/en/12-taos-sql/12-distinguished.md @@ -21,7 +21,7 @@ part_list can be any scalar expression, such as a column, constant, scalar funct A PARTITION BY clause is processed as follows: - The PARTITION BY clause must occur after the WHERE clause -- The PARTITION BY caluse partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause. +- The PARTITION BY clause partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause. - The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value: ```sql diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md index eb70a7664b..bd4a60b20e 100644 --- a/docs/en/12-taos-sql/24-show.md +++ b/docs/en/12-taos-sql/24-show.md @@ -36,7 +36,7 @@ Shows information about connections to the system. SHOW CONSUMERS; ``` -Shows information about all active consumers in the system. +Shows information about all consumers in the system. ## SHOW CREATE DATABASE diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx index ebd2891a9e..e8c407b125 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -36,7 +36,8 @@ REST connection supports all platforms that can run Java. | taos-jdbcdriver version | major changes | TDengine version | | :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: | -| 3.2.1 | subscription add seek function | 3.0.5.0 or later | +| 3.2.3 | Fixed resultSet data parsing failure in some cases | 3.0.5.0 or later | +| 3.2.2 | subscription add seek function | 3.0.5.0 or later | | 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later | | 3.2.0 | This version has been deprecated | - | | 3.1.0 | JDBC REST connection supports subscription over WebSocket | - | @@ -284,9 +285,9 @@ The configuration parameters in the URL are as follows: - batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. batchfetch uses HTTP for data transfer. JDBC REST supports batch pulls. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance. - charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true. - batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false. -- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 5000 ms. -- httpSocketTimeout: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when batchfetch is false. -- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when batchfetch is true. +- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 60000 ms. +- httpSocketTimeout: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is false. +- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is true. - useSSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. - httpPoolSize: size of REST concurrent requests. The default value is 20. @@ -352,9 +353,9 @@ The configuration parameters in properties are as follows. - TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set. - TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale. - TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone. -- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection. -- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false. -- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. +- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection. +- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is false. +- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. - TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection. - TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20. For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only). diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/14-reference/03-connector/05-go.mdx index 06d643c6c8..b3d4857d75 100644 --- a/docs/en/14-reference/03-connector/05-go.mdx +++ b/docs/en/14-reference/03-connector/05-go.mdx @@ -31,63 +31,78 @@ REST connections are supported on all platforms that can run Go. Please refer to [version support list](https://github.com/taosdata/driver-go#remind) -## Supported features +## Handling exceptions -### Native connections +If it is a TDengine error, you can get the error code and error information in the following ways. +```go +// import "github.com/taosdata/driver-go/v3/errors" + if err != nil { + tError, is := err.(*errors.TaosError) + if is { + fmt.Println("errorCode:", int(tError.Code)) + fmt.Println("errorMessage:", tError.ErrStr) + } else { + fmt.Println(err.Error()) + } + } +``` -A "native connection" is established by the connector directly to the TDengine instance via the TDengine client driver (taosc). The supported functional features are: +## TDengine DataType vs. Go DataType -* Normal queries -* Continuous queries -* Subscriptions -* Schemaless interface -* Parameter binding interface +| TDengine DataType | Go Type | +|-------------------|-----------| +| TIMESTAMP | time.Time | +| TINYINT | int8 | +| SMALLINT | int16 | +| INT | int32 | +| BIGINT | int64 | +| TINYINT UNSIGNED | uint8 | +| SMALLINT UNSIGNED | uint16 | +| INT UNSIGNED | uint32 | +| BIGINT UNSIGNED | uint64 | +| FLOAT | float32 | +| DOUBLE | float64 | +| BOOL | bool | +| BINARY | string | +| NCHAR | string | +| JSON | []byte | -### REST connection - -A "REST connection" is a connection between the application and the TDengine instance via the REST API provided by the taosAdapter component. The following features are supported: - -* Normal queries -* Continuous queries +**Note**: Only TAG supports JSON types ## Installation Steps ### Pre-installation preparation * Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above) -- If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps +* If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps Configure the environment variables and check the command. * ```go env``` * ```gcc -v``` -### Use go get to install - -`go get -u github.com/taosdata/driver-go/v3@latest` - -### Manage with go mod +### Install the connectors 1. Initialize the project with the `go mod` command. - ```text - go mod init taos-demo - ``` + ```text + go mod init taos-demo + ``` 2. Introduce taosSql - ```go - import ( - "database/sql" - _ "github.com/taosdata/driver-go/v3/taosSql" - ) - ``` + ```go + import ( + "database/sql" + _ "github.com/taosdata/driver-go/v3/taosSql" + ) + ``` 3. Update the dependency packages with `go mod tidy`. - ```text - go mod tidy - ``` + ```text + go mod tidy + ``` 4. Run the program with `go run taos-demo` or compile the binary with the `go build` command. @@ -98,8 +113,6 @@ Configure the environment variables and check the command. ## Establishing a connection -### Data source name (DSN) - Data source names have a standard format, e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but no type prefix (square brackets indicate optionally): ``` text @@ -111,9 +124,7 @@ DSN in full form. ```text username:password@protocol(address)/dbname?param=value ``` -### Connecting via connector - - + _taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver. @@ -209,42 +220,165 @@ func main() { +### Specify the URL and Properties to get the connection + +The Go connector does not support this feature + +### Priority of configuration parameters + +The Go connector does not support this feature + ## Usage examples -### Write data +### Create database and tables -#### SQL Write +```go +var taosDSN = "root:taosdata@tcp(localhost:6030)/" +taos, err := sql.Open("taosSql", taosDSN) +if err != nil { + log.Fatalln("failed to connect TDengine, err:", err) +} +defer taos.Close() +_, err := taos.Exec("CREATE DATABASE power") +if err != nil { + log.Fatalln("failed to create database, err:", err) +} +_, err = taos.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") +if err != nil { + log.Fatalln("failed to create stable, err:", err) +} +``` + +### Insert data -#### InfluxDB line protocol write - - - -#### OpenTSDB Telnet line protocol write - - - -#### OpenTSDB JSON line protocol write - - - -### Query data +### Querying data -### More sample programs +### execute SQL with reqId -* [sample program](https://github.com/taosdata/driver-go/tree/3.0/examples) +This reqId can be used to request link tracing. +```go +db, err := sql.Open("taosSql", "root:taosdata@tcp(localhost:6030)/") +if err != nil { + panic(err) +} +defer db.Close() +ctx := context.WithValue(context.Background(), common.ReqIDKey, common.GetReqID()) +_, err = db.ExecContext(ctx, "create database if not exists example_taos_sql") +if err != nil { + panic(err) +} +``` -## Usage limitations +### Writing data via parameter binding -Since the REST interface is stateless, the `use db` syntax will not work. You need to put the db name into the SQL command, e.g. `create table if not exists tb1 (ts timestamp, a int)` to `create table if not exists test.tb1 (ts timestamp, a int)` otherwise it will report the error `[0x217] Database not specified or available`. + + -You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error. +```go +package main -The complete example is as follows. +import ( + "time" + + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" +) + +func main() { + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_stmt") + if err != nil { + panic(err) + } + _, err = db.Exec("create table if not exists example_stmt.tb1(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") + if err != nil { + panic(err) + } + stmt := db.InsertStmt() + err = stmt.Prepare("insert into example_stmt.tb1 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + now := time.Now() + params := make([]*param.Param, 14) + params[0] = param.NewParam(2). + AddTimestamp(now, common.PrecisionMilliSecond). + AddTimestamp(now.Add(time.Second), common.PrecisionMilliSecond) + params[1] = param.NewParam(2).AddBool(true).AddNull() + params[2] = param.NewParam(2).AddTinyint(2).AddNull() + params[3] = param.NewParam(2).AddSmallint(3).AddNull() + params[4] = param.NewParam(2).AddInt(4).AddNull() + params[5] = param.NewParam(2).AddBigint(5).AddNull() + params[6] = param.NewParam(2).AddUTinyint(6).AddNull() + params[7] = param.NewParam(2).AddUSmallint(7).AddNull() + params[8] = param.NewParam(2).AddUInt(8).AddNull() + params[9] = param.NewParam(2).AddUBigint(9).AddNull() + params[10] = param.NewParam(2).AddFloat(10).AddNull() + params[11] = param.NewParam(2).AddDouble(11).AddNull() + params[12] = param.NewParam(2).AddBinary([]byte("binary")).AddNull() + params[13] = param.NewParam(2).AddNchar("nchar").AddNull() + + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(6). + AddNchar(5) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Execute() + if err != nil { + panic(err) + } + err = stmt.Close() + if err != nil { + panic(err) + } + // select * from example_stmt.tb1 +} +``` + + + ```go package main @@ -254,295 +388,734 @@ import ( "fmt" "time" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/stmt" ) func main() { - var taosDSN = "root:taosdata@http(localhost:6041)/test" - taos, err := sql.Open("taosRestful", taosDSN) + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") if err != nil { - fmt.Println("failed to connect TDengine, err:", err) - return - } - defer taos.Close() - taos.Exec("create database if not exists test") - taos.Exec("create table if not exists tb1 (ts timestamp, a int)") - _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") - if err != nil { - fmt.Println("failed to insert, err:", err) - return - } - rows, err := taos.Query("select * from tb1") - if err != nil { - fmt.Println("failed to select from table, err:", err) - return + panic(err) } + defer db.Close() + prepareEnv(db) - defer rows.Close() - for rows.Next() { - var r struct { - ts time.Time - a int - } - err := rows.Scan(&r.ts, &r.a) + config := stmt.NewConfig("ws://127.0.0.1:6041/rest/stmt", 0) + config.SetConnectUser("root") + config.SetConnectPass("taosdata") + config.SetConnectDB("example_ws_stmt") + config.SetMessageTimeout(common.DefaultMessageTimeout) + config.SetWriteWait(common.DefaultWriteWait) + config.SetErrorHandler(func(connector *stmt.Connector, err error) { + panic(err) + }) + config.SetCloseHandler(func() { + fmt.Println("stmt connector closed") + }) + + connector, err := stmt.NewConnector(config) + if err != nil { + panic(err) + } + now := time.Now() + { + stmt, err := connector.Init() if err != nil { - fmt.Println("scan error:\n", err) - return + panic(err) } - fmt.Println(r.ts, r.a) + err = stmt.Prepare("insert into ? using all_json tags(?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } + err = stmt.SetTags(param.NewParam(1).AddJson([]byte(`{"tb":1}`)), param.NewColumnType(1).AddJson(0)) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_json affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } + } + { + stmt, err := connector.Init() + if err != nil { + panic(err) + } + err = stmt.Prepare("insert into ? using all_all tags(?,?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } + + err = stmt.SetTableName("tb2") + if err != nil { + panic(err) + } + err = stmt.SetTags( + param.NewParam(14). + AddTimestamp(now, 0). + AddBool(true). + AddTinyint(2). + AddSmallint(2). + AddInt(2). + AddBigint(2). + AddUTinyint(2). + AddUSmallint(2). + AddUInt(2). + AddUBigint(2). + AddFloat(2). + AddDouble(2). + AddBinary([]byte("tb2")). + AddNchar("tb2"), + param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0), + ) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_all affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } + + } +} + +func prepareEnv(db *sql.DB) { + steps := []string{ + "create database example_ws_stmt", + "create table example_ws_stmt.all_json(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(t json)", + "create table example_ws_stmt.all_all(" + + "ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(" + + "tts timestamp," + + "tc1 bool," + + "tc2 tinyint," + + "tc3 smallint," + + "tc4 int," + + "tc5 bigint," + + "tc6 tinyint unsigned," + + "tc7 smallint unsigned," + + "tc8 int unsigned," + + "tc9 bigint unsigned," + + "tc10 float," + + "tc11 double," + + "tc12 binary(20)," + + "tc13 nchar(20))", + } + for _, step := range steps { + _, err := db.Exec(step) + if err != nil { + panic(err) + } + } +} + +``` + + + + + +### Schemaless Writing + + + + +```go +import ( + "fmt" + + "github.com/taosdata/driver-go/v3/af" +) + +func main() { + conn, err := af.Open("localhost", "root", "taosdata", "", 6030) + if err != nil { + fmt.Println("fail to connect, err:", err) + } + defer conn.Close() + _, err = conn.Exec("create database if not exists example") + if err != nil { + panic(err) + } + _, err = conn.Exec("use example") + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + err = conn.InfluxDBInsertLines([]string{influxdbData}, "ns") + if err != nil { + panic(err) + } + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + err = conn.OpenTSDBInsertTelnetLines([]string{telnetData}) + if err != nil { + panic(err) + } + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" + err = conn.OpenTSDBInsertJsonPayload(jsonData) + if err != nil { + panic(err) + } +} +``` + + + + +```go +import ( + "database/sql" + "log" + "time" + + "github.com/taosdata/driver-go/v3/common" + _ "github.com/taosdata/driver-go/v3/taosWS" + "github.com/taosdata/driver-go/v3/ws/schemaless" +) + +func main() { + db, err := sql.Open("taosWS", "root:taosdata@ws(localhost:6041)/") + if err != nil { + log.Fatal(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists schemaless_ws") + if err != nil { + log.Fatal(err) + } + s, err := schemaless.NewSchemaless(schemaless.NewConfig("ws://localhost:6041/rest/schemaless", 1, + schemaless.SetDb("schemaless_ws"), + schemaless.SetReadTimeout(10*time.Second), + schemaless.SetWriteTimeout(10*time.Second), + schemaless.SetUser("root"), + schemaless.SetPassword("taosdata"), + schemaless.SetErrorHandler(func(err error) { + log.Fatal(err) + }), + )) + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" + + err = s.Insert(influxdbData, schemaless.InfluxDBLineProtocol, "ns", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(telnetData, schemaless.OpenTSDBTelnetLineProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(jsonData, schemaless.OpenTSDBJsonFormatProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) } } ``` + + + + +### Schemaless with reqId + +```go +func (s *Schemaless) Insert(lines string, protocol int, precision string, ttl int, reqID int64) error +``` + +You can get the unique id by `common.GetReqID()`. + +### Data Subscription + +The TDengine Go Connector supports subscription functionality with the following application API. + +#### Create a Topic + +```go + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } +``` + +#### Create a Consumer + +```go + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } +``` + +#### Subscribe to consume data + +```go + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } +``` + +#### Assignment subscription Offset + +```go + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } +``` + +#### Close subscriptions + +```go + err = consumer.Close() + if err != nil { + panic(err) + } +``` + +#### Full Sample Code + + + + +```go +package main + +import ( + "fmt" + "os" + + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/af/tmq" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" +) + +func main() { + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } + if err != nil { + panic(err) + } + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_tmq.t1 values(now,1)") + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } + + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } + + err = consumer.Close() + if err != nil { + panic(err) + } +} +``` + + + + +```go +package main + +import ( + "database/sql" + "fmt" + + "github.com/taosdata/driver-go/v3/common" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/tmq" +) + +func main() { + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") + if err != nil { + panic(err) + } + defer db.Close() + prepareEnv(db) + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "ws.url": "ws://127.0.0.1:6041/rest/tmq", + "ws.message.channelLen": uint(0), + "ws.message.timeout": common.DefaultMessageTimeout, + "ws.message.writeWait": common.DefaultWriteWait, + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "group.id": "example", + "client.id": "example_consumer", + "auto.offset.reset": "earliest", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_ws_tmq_topic", nil) + if err != nil { + panic(err) + } + go func() { + _, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')") + if err != nil { + panic(err) + } + }() + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Printf("%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } + + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } + + err = consumer.Close() + if err != nil { + panic(err) + } +} + +func prepareEnv(db *sql.DB) { + _, err := db.Exec("create database example_ws_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic example_ws_tmq_topic as database example_ws_tmq") + if err != nil { + panic(err) + } +} +``` + + + + +### More sample programs + +* [sample program](https://github.com/taosdata/driver-go/tree/3.0/examples) + + ## Frequently Asked Questions 1. bind interface in database/sql crashes - REST does not support parameter binding related interface. It is recommended to use `db.Exec` and `db.Query`. + REST does not support parameter binding related interface. It is recommended to use `db.Exec` and `db.Query`. 2. error `[0x217] Database not specified or available` after executing other statements with `use db` statement - The execution of SQL command in the REST interface is not contextual, so using `use db` statement will not work, see the usage restrictions section above. + The execution of SQL command in the REST interface is not contextual, so using `use db` statement will not work, see the usage restrictions section above. 3. use `taosSql` without error but use `taosRestful` with error `[0x217] Database not specified or available` - Because the REST interface is stateless, using the `use db` statement will not take effect. See the usage restrictions section above. + Because the REST interface is stateless, using the `use db` statement will not take effect. See the usage restrictions section above. 4. `readBufferSize` parameter has no significant effect after being increased - Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. + Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance. 5. `disableCompression` parameter is set to `false` when the query efficiency is reduced - When set `disableCompression` parameter to `false`, the query result will be compressed by `gzip` and then transmitted, so you have to decompress the data by `gzip` after getting it. + When set `disableCompression` parameter to `false`, the query result will be compressed by `gzip` and then transmitted, so you have to decompress the data by `gzip` after getting it. 6. `go get` command can't get the package, or timeout to get the package - Set Go proxy `go env -w GOPROXY=https://goproxy.cn,direct`. - -## Common APIs - -### database/sql API - -* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - - Use This API to open a DB, returning an object of type \*DB. - -:::info -This API is created successfully without checking permissions, but only when you execute a Query or Exec, and check if user/password/host/port is legal. -::: - -* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` - - `sql.Open` built-in method to execute non-query related SQL. - -* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` - - `sql.Open` Built-in method to execute query statements. - -### Advanced functions (af) API - -The `af` package encapsulates TDengine advanced functions such as connection management, subscriptions, schemaless, parameter binding, etc. - -#### Connection management - -* `af.Open(host, user, pass, db string, port int) (*Connector, error)` - - This API creates a connection to taosd via cgo. - -* `func (conn *Connector) Close() error` - - Closes the connection. - -#### Subscribe - -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` - -Creates consumer group. - -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose - -Subscribes a topic. - -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose - -Subscribes to topics. - -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` - -Polling information. - -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -Note: `tmq.TopicPartition` is reserved for compatibility purpose - -Commit information. - -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` - -Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). - -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -Note: `ignoredTimeoutMs` is reserved for compatibility purpose - -Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). - -* `func (c *Consumer) Unsubscribe() error` - -Unsubscribe. - -* `func (c *Consumer) Close() error` - -Close consumer. - -#### schemaless - -* `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` - - Write to InfluxDB line protocol. - -* `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` - - Write OpenTDSB telnet protocol data. - -* `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` - - Writes OpenTSDB JSON protocol data. - -#### parameter binding - -* `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` - - Parameter bound single row insert. - -* `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` - - Initialize the parameters. - -* `func (stmt *InsertStmt) Prepare(sql string) error` - - Parameter binding preprocessing SQL statement. - -* `func (stmt *InsertStmt) SetTableName(name string) error` - - Bind the table name parameter. - -* `func (stmt *InsertStmt) SetSubTableName(name string) error` - - Parameter binding to set the sub table name. - -* `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` - - Parameter bind multiple rows of data. - -* `func (stmt *InsertStmt) AddBatch() error` - - Add to a parameter-bound batch. - -* `func (stmt *InsertStmt) Execute() error` - - Execute a parameter binding. - -* `func (stmt *InsertStmt) GetAffectedRows() int` - - Gets the number of affected rows inserted by the parameter binding. - -* `func (stmt *InsertStmt) Close() error` - - Closes the parameter binding. - -### Subscribe via WebSocket - -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` - -Creates consumer group. - -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose - -Subscribes a topic. - -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -Note: `rebalanceCb` is reserved for compatibility purpose - -Subscribes to topics. - -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` - -Polling information. - -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -Note: `tmq.TopicPartition` is reserved for compatibility purpose - -Commit information. - -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` - -Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). - -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -Note: `ignoredTimeoutMs` is reserved for compatibility purpose - -Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required). - -* `func (c *Consumer) Unsubscribe() error` - -Unsubscribe. - -* `func (c *Consumer) Close() error` - -Close consumer. - -For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go) - -### parameter binding via WebSocket - -* `func NewConnector(config *Config) (*Connector, error)` - - Create a connection. - -* `func (c *Connector) Init() (*Stmt, error)` - - Initialize the parameters. - -* `func (c *Connector) Close() error` - - Close the connection. - -* `func (s *Stmt) Prepare(sql string) error` - - Parameter binding preprocessing SQL statement. - -* `func (s *Stmt) SetTableName(name string) error` - - Bind the table name parameter. - -* `func (s *Stmt) SetTags(tags *param.Param, bindType *param.ColumnType) error` - - Set tags. - -* `func (s *Stmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` - - Parameter bind multiple rows of data. - -* `func (s *Stmt) AddBatch() error` - - Add to a parameter-bound batch. - -* `func (s *Stmt) Exec() error` - - Execute a parameter binding. - -* `func (s *Stmt) GetAffectedRows() int` - - Gets the number of affected rows inserted by the parameter binding. - -* `func (s *Stmt) Close() error` - - Closes the parameter binding. - -For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go) + Set Go proxy `go env -w GOPROXY=https://goproxy.cn,direct`. ## API Reference diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx index 344bd3590e..986b5cd104 100644 --- a/docs/en/14-reference/03-connector/06-rust.mdx +++ b/docs/en/14-reference/03-connector/06-rust.mdx @@ -31,21 +31,57 @@ Websocket connections are supported on all platforms that can run Go. | connector-rust version | TDengine version | major features | | :----------------: | :--------------: | :--------------------------------------------------: | -| v0.8.10 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. | +| v0.8.12 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. | | v0.8.0 | 3.0.4.0 | Support schemaless insert. | | v0.7.6 | 3.0.3.0 | Support req_id in query. | | v0.6.0 | 3.0.0.0 | Base features. | The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues. -## Installation +## Handling exceptions + +After the error is reported, the specific information of the error can be obtained: + +```rust +match conn.exec(sql) { + Ok(_) => { + Ok(()) + } + Err(e) => { + eprintln!("ERROR: {:?}", e); + Err(e) + } +} +``` + +## TDengine DataType vs. Rust DataType + +TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Rust is as follows: + +| TDengine DataType | Rust DataType | +| ----------------- | ----------------- | +| TIMESTAMP | Timestamp | +| INT | i32 | +| BIGINT | i64 | +| FLOAT | f32 | +| DOUBLE | f64 | +| SMALLINT | i16 | +| TINYINT | i8 | +| BOOL | bool | +| BINARY | Vec | +| NCHAR | String | +| JSON | serde_json::Value | + +Note: Only TAG supports JSON types + +## Installation Steps ### Pre-installation preparation * Install the Rust development toolchain * If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver) -### Add taos dependency +### Install the connectors Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows: @@ -146,7 +182,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?; let conn1 = builder.build(); // use websocket protocol. -let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let conn2 = builder2.build(); ``` After the connection is established, you can perform operations on your database. @@ -228,41 +265,191 @@ There are two ways to query data: Using built-in types or the [serde](https://se ## Usage examples -### Write data +### Create database and tables -#### SQL Write +```rust +use taos::*; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let dsn = "taos://localhost:6030"; + let builder = TaosBuilder::from_dsn(dsn)?; + + let taos = builder.build()?; + + let db = "query"; + + // create database + taos.exec_many([ + format!("DROP DATABASE IF EXISTS `{db}`"), + format!("CREATE DATABASE `{db}`"), + format!("USE `{db}`"), + ]) + .await?; + + // create table + taos.exec_many([ + // create super table + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ + TAGS (`groupid` INT, `location` BINARY(16))", + // create child table + "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + ]).await?; +} +``` + +> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set. + +### Insert data -#### STMT Write - - - -#### Schemaless Write - - - ### Query data -## API Reference +### execute SQL with req_id -### Connector Constructor - -You create a connector constructor by using a DSN. +This req_id can be used to request link tracing. ```rust -let cfg = TaosBuilder::default().build()?; +let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?; ``` -You use the builder object to create multiple connections. +### Writing data via parameter binding + +TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases. + +Parameter binding details see [API Reference](#stmt-api) + + + +### Schemaless Writing + +TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless). + + + +### Schemaless with req_id + +This req_id can be used to request link tracing. ```rust -let conn: Taos = cfg.build(); +let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Line) + .data(data) + .req_id(100u64) + .build()?; + +client.put(&sml_data)? ``` -### Connection pooling +### Data Subscription + +TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/). + +#### Create a Topic + +```rust +taos.exec_many([ + // create topic for subscription + format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}") +]) +.await?; +``` + +#### Create a Consumer + +You create a TMQ connector by using a DSN. + +```rust +let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; +``` + +Create a consumer: + +```rust +let mut consumer = tmq.build()?; +``` + +#### Subscribe to consume data + +A single consumer can subscribe to one or more topics. + +```rust +consumer.subscribe(["tmq_meters"]).await?; +``` + +The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed. + +```rust +{ + let mut stream = consumer.stream(); + + while let Some((offset, message)) = stream.try_next().await? { + // get information from offset + + // the topic + let topic = offset.topic(); + // the vgroup id, like partition id in kafka. + let vgroup_id = offset.vgroup_id(); + println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + + if let Some(data) = message.into_data() { + while let Some(block) = data.fetch_raw_block().await? { + // one block for one table, get table name if needed + let name = block.table_name(); + let records: Vec = block.deserialize().try_collect()?; + println!( + "** table: {}, got {} records: {:#?}\n", + name.unwrap(), + records.len(), + records + ); + } + } + consumer.commit(offset).await?; + } +} +``` + +Get assignments: + +Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 + +```rust +let assignments = consumer.assignments().await.unwrap(); +``` + +#### Assignment subscription Offset + +Seek offset: + +Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 + +```rust +consumer.offset_seek(topic, vgroup_id, offset).await; +``` + +#### Close subscriptions + +```rust +consumer.unsubscribe().await; +``` + +The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory. + +- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis. +- `client.id`: Subscriber client ID. +- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group. +- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential. +- `auto.commit.interval.ms`: Interval for automatic commits. + +#### Full Sample Code + +For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). + +### Use with connection pool In complex applications, we recommend enabling connection pools. [taos] implements connection pools based on [r2d2]. @@ -292,7 +479,17 @@ In the application code, use `pool.get()? ` to get a connection object [Taos]. let taos = pool.get()?; ``` -### Connectors +### More sample programs + +The source code of the sample application is under `TDengine/examples/rust` : + +[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust) + +## Frequently Asked Questions + +For additional troubleshooting, see [FAQ](../../../train-faq/faq). + +## API Reference The [Taos][struct.Taos] object provides an API to perform operations on multiple databases. @@ -378,9 +575,13 @@ Note that Rust asynchronous functions and an asynchronous runtime are required. - `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement. - `.use_database(database: &str)`: Executes the `USE` statement. -In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage. +In addition, this structure is also the entry point for Parameter Binding and Line Protocol Interface. Please refer to the specific API descriptions for usage. -### Bind Interface +

+ +Bind Interface + +

Similar to the C interface, Rust provides the bind interface's wrapping. First, the [Taos][struct.taos] object creates a parameter binding object [Stmt] for an SQL statement. @@ -391,7 +592,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?; The bind object provides a set of interfaces for implementing parameter binding. -#### `.set_tbname(name)` +`.set_tbname(name)` To bind table names. @@ -400,7 +601,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?; stmt.set_tbname("d0")?; ``` -#### `.set_tags(&[tag])` +`.set_tags(&[tag])` Bind sub-table table names and tag values when the SQL statement uses a super table. @@ -410,7 +611,7 @@ stmt.set_tbname("d0")?; stmt.set_tags(&[Value::VarChar("taos".to_string())])?; ``` -#### `.bind(&[column])` +`.bind(&[column])` Bind value types. Use the [ColumnView] structure to create and bind the required types. @@ -434,7 +635,7 @@ let params = vec![ let rows = stmt.bind(¶ms)?.add_batch()?.execute()?; ``` -#### `.execute()` +`.execute()` Execute SQL. [Stmt] objects can be reused, re-binded, and executed after execution. Before execution, ensure that all data has been added to the queue with `.add_batch`. @@ -449,92 +650,6 @@ stmt.execute()?; For a working example, see [GitHub](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs). -### Subscriptions - -TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/). - -You create a TMQ connector by using a DSN. - -```rust -let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; -``` - -Create a consumer: - -```rust -let mut consumer = tmq.build()?; -``` - -A single consumer can subscribe to one or more topics. - -```rust -consumer.subscribe(["tmq_meters"]).await?; -``` - -The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed. - -```rust -{ - let mut stream = consumer.stream(); - - while let Some((offset, message)) = stream.try_next().await? { - // get information from offset - - // the topic - let topic = offset.topic(); - // the vgroup id, like partition id in kafka. - let vgroup_id = offset.vgroup_id(); - println!("* in vgroup id {vgroup_id} of topic {topic}\n"); - - if let Some(data) = message.into_data() { - while let Some(block) = data.fetch_raw_block().await? { - // one block for one table, get table name if needed - let name = block.table_name(); - let records: Vec = block.deserialize().try_collect()?; - println!( - "** table: {}, got {} records: {:#?}\n", - name.unwrap(), - records.len(), - records - ); - } - } - consumer.commit(offset).await?; - } -} -``` - -Get assignments: - -Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 - -```rust -let assignments = consumer.assignments().await.unwrap(); -``` - -Seek offset: - -Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0 - -```rust -consumer.offset_seek(topic, vgroup_id, offset).await; -``` - -Unsubscribe: - -```rust -consumer.unsubscribe().await; -``` - -The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory. - -- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis. -- `client.id`: Subscriber client ID. -- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group. -- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential. -- `auto.commit.interval.ms`: Interval for automatic commits. - -For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos). diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index 461bdfbf16..2a6cd9ecf7 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -20,10 +20,25 @@ The source code for the Python connector is hosted on [GitHub](https://github.co - The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client. - REST connections are supported on all platforms that can run Python. +### Supported features + +- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing. +- REST connections support features such as connection management and SQL execution. (SQL execution allows you to: manage databases, tables, and supertables, write data, query data, create continuous queries, etc.). + ## Version selection We recommend using the latest version of `taospy`, regardless of the version of TDengine. +|Python Connector Version|major changes| +|:-------------------:|:----:| +|2.7.9|support for getting assignment and seek function on subscription| +|2.7.8|add `execute_many` method| + +|Python Websocket Connector Version|major changes| +|:----------------------------:|:-----:| +|0.2.5|1. support for getting assignment and seek function on subscription
2. support schemaless
3. support STMT| +|0.2.4|support `unsubscribe` on subscription| + ## Handling Exceptions There are 4 types of exception in python connector. @@ -54,10 +69,23 @@ All exceptions from the Python Connector are thrown directly. Applications shoul {{#include docs/examples/python/handle_exception.py}} ``` -## Supported features +## TDengine DataType vs. Python DataType -- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing. -- REST connections support features such as connection management and SQL execution. (SQL execution allows you to: manage databases, tables, and supertables, write data, query data, create continuous queries, etc.). +TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Python is as follows: + +|TDengine DataType|Python DataType| +|:---------------:|:-------------:| +|TIMESTAMP|datetime| +|INT|int| +|BIGINT|int| +|FLOAT|float| +|DOUBLE|int| +|SMALLINT|int| +|TINYINT|int| +|BOOL|bool| +|BINARY|str| +|NCHAR|str| +|JSON|str| ## Installation @@ -534,7 +562,7 @@ Connector support data subscription. For more information about subscroption, pl The `consumer` in the connector contains the subscription api. -#### Create Consumer +##### Create Consumer The syntax for creating a consumer is `consumer = Consumer(configs)`. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/). @@ -544,7 +572,7 @@ from taos.tmq import Consumer consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) ``` -#### Subscribe topics +##### Subscribe topics The `subscribe` function is used to subscribe to a list of topics. @@ -552,7 +580,7 @@ The `subscribe` function is used to subscribe to a list of topics. consumer.subscribe(['topic1', 'topic2']) ``` -#### Consume +##### Consume The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data. @@ -570,7 +598,7 @@ while True: print(block.fetchall()) ``` -#### assignment +##### assignment The `assignment` function is used to get the assignment of the topic. @@ -578,7 +606,7 @@ The `assignment` function is used to get the assignment of the topic. assignments = consumer.assignment() ``` -#### Seek +##### Seek The `seek` function is used to reset the assignment of the topic. @@ -587,7 +615,7 @@ tp = TopicPartition(topic='topic1', partition=0, offset=0) consumer.seek(tp) ``` -#### After consuming data +##### After consuming data You should unsubscribe to the topics and close the consumer after consuming. @@ -596,13 +624,13 @@ consumer.unsubscribe() consumer.close() ``` -#### Tmq subscription example +##### Tmq subscription example ```python {{#include docs/examples/python/tmq_example.py}} ``` -#### assignment and seek example +##### assignment and seek example ```python {{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}} @@ -614,7 +642,7 @@ consumer.close() In addition to native connections, the connector also supports subscriptions via websockets. -#### Create Consumer +##### Create Consumer The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer). @@ -624,7 +652,7 @@ import taosws consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"}) ``` -#### subscribe topics +##### subscribe topics The `subscribe` function is used to subscribe to a list of topics. @@ -632,7 +660,7 @@ The `subscribe` function is used to subscribe to a list of topics. consumer.subscribe(['topic1', 'topic2']) ``` -#### Consume +##### Consume The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data. @@ -649,7 +677,7 @@ while True: print(row) ``` -#### assignment +##### assignment The `assignment` function is used to get the assignment of the topic. @@ -657,7 +685,7 @@ The `assignment` function is used to get the assignment of the topic. assignments = consumer.assignment() ``` -#### Seek +##### Seek The `seek` function is used to reset the assignment of the topic. @@ -665,7 +693,7 @@ The `seek` function is used to reset the assignment of the topic. consumer.seek(topic='topic1', partition=0, offset=0) ``` -#### After consuming data +##### After consuming data You should unsubscribe to the topics and close the consumer after consuming. @@ -674,13 +702,13 @@ consumer.unsubscribe() consumer.close() ``` -#### Subscription example +##### Subscription example ```python {{#include docs/examples/python/tmq_websocket_example.py}} ``` -#### Assignment and seek example +##### Assignment and seek example ```python {{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}} @@ -696,19 +724,19 @@ Connector support schemaless insert. -Simple insert +##### Simple insert ```python {{#include docs/examples/python/schemaless_insert.py}} ``` -Insert with ttl argument +##### Insert with ttl argument ```python {{#include docs/examples/python/schemaless_insert_ttl.py}} ``` -Insert with req_id argument +##### Insert with req_id argument ```python {{#include docs/examples/python/schemaless_insert_req_id.py}} @@ -718,19 +746,19 @@ Insert with req_id argument -Simple insert +##### Simple insert ```python {{#include docs/examples/python/schemaless_insert_raw.py}} ``` -Insert with ttl argument +##### Insert with ttl argument ```python {{#include docs/examples/python/schemaless_insert_raw_ttl.py}} ``` -Insert with req_id argument +##### Insert with req_id argument ```python {{#include docs/examples/python/schemaless_insert_raw_req_id.py}} @@ -746,7 +774,7 @@ The Python connector provides a parameter binding api for inserting data. Simila -#### Create Stmt +##### Create Stmt Call the `statement` method in `Connection` to create the `stmt` for parameter binding. @@ -757,7 +785,7 @@ conn = taos.connect() stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") ``` -#### parameter binding +##### parameter binding Call the `new_multi_binds` function to create the parameter list for parameter bindings. @@ -787,7 +815,7 @@ Call the `bind_param` (for a single row) method or the `bind_param_batch` (for m stmt.bind_param_batch(params) ``` -#### execute sql +##### execute sql Call `execute` method to execute sql. @@ -795,13 +823,13 @@ Call `execute` method to execute sql. stmt.execute() ``` -#### Close Stmt +##### Close Stmt ``` stmt.close() ``` -#### Example +##### Example ```python {{#include docs/examples/python/stmt_example.py}} @@ -810,7 +838,7 @@ stmt.close() -#### Create Stmt +##### Create Stmt Call the `statement` method in `Connection` to create the `stmt` for parameter binding. @@ -821,7 +849,7 @@ conn = taosws.connect('taosws://localhost:6041/test') stmt = conn.statement() ``` -#### Prepare sql +##### Prepare sql Call `prepare` method in stmt to prepare sql. @@ -829,7 +857,7 @@ Call `prepare` method in stmt to prepare sql. stmt.prepare("insert into t1 values (?, ?, ?, ?)") ``` -#### parameter binding +##### parameter binding Call the `bind_param` method to bind parameters. @@ -848,7 +876,7 @@ Call the `add_batch` method to add parameters to the batch. stmt.add_batch() ``` -#### execute sql +##### execute sql Call `execute` method to execute sql. @@ -856,13 +884,13 @@ Call `execute` method to execute sql. stmt.execute() ``` -#### Close Stmt +##### Close Stmt ``` stmt.close() ``` -#### Example +##### Example ```python {{#include docs/examples/python/stmt_websocket_example.py}} diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index f4d9ba8e42..a5c1553402 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w import Release from "/components/ReleaseV3"; +## 3.0.6.0 + + + ## 3.0.5.1 diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index a87a1f64f8..54a8af2287 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -243,6 +243,7 @@ TDengine 使用 SQL åˆ›å»ŗäø€äøŖ topic: ```sql CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1; ``` +- topicåˆ›å»ŗäøŖę•°ęœ‰äøŠé™ļ¼Œé€ščæ‡å‚ę•° tmqMaxTopicNum ęŽ§åˆ¶ļ¼Œé»˜č®¤ 20 äøŖ TMQ ę”ÆęŒå¤šē§č®¢é˜…ē±»åž‹ļ¼š @@ -265,14 +266,15 @@ CREATE TOPIC topic_name as subquery čÆ­ę³•ļ¼š ```sql -CREATE TOPIC topic_name AS STABLE stb_name +CREATE TOPIC topic_name [with meta] AS STABLE stb_name [where_condition] ``` äøŽ `SELECT * from stbName` č®¢é˜…ēš„åŒŗåˆ«ę˜Æļ¼š - äøä¼šé™åˆ¶ē”Øęˆ·ēš„č”Øē»“ęž„å˜ę›“ć€‚ - čæ”å›žēš„ę˜Æéžē»“ęž„åŒ–ēš„ę•°ę®ļ¼ščæ”å›žę•°ę®ēš„ē»“ęž„ä¼šéšä¹‹č¶…ēŗ§č”Øēš„č”Øē»“ęž„å˜åŒ–č€Œå˜åŒ–ć€‚ -- ē”Øęˆ·åÆ¹äŗŽč¦å¤„ē†ēš„ęÆäø€äøŖę•°ę®å—éƒ½åÆčƒ½ęœ‰äøåŒēš„č”Øē»“ęž„ć€‚ +- with meta å‚ę•°åÆé€‰ļ¼Œé€‰ę‹©ę—¶å°†čæ”å›žåˆ›å»ŗč¶…ēŗ§č”Øļ¼Œå­č”Øē­‰čÆ­å„ļ¼Œäø»č¦ē”ØäŗŽtaosxåšč¶…ēŗ§č”Øčæē§» +- where_condition å‚ę•°åÆé€‰ļ¼Œé€‰ę‹©ę—¶å°†ē”Øę„čæ‡ę»¤ē¬¦åˆę”ä»¶ēš„å­č”Øļ¼Œč®¢é˜…čæ™äŗ›å­č”Øć€‚where ę”ä»¶é‡Œäøčƒ½ęœ‰ę™®é€šåˆ—ļ¼ŒåŖčƒ½ę˜Ætagꈖtbname,whereę”ä»¶é‡ŒåÆä»„ē”Øå‡½ę•°ļ¼Œē”Øę„čæ‡ę»¤tagļ¼Œä½†ę˜Æäøčƒ½ę˜Æčšåˆå‡½ę•°ļ¼Œå› äøŗå­č”Øtagå€¼ę— ę³•åščšåˆć€‚ä¹ŸåÆä»„ę˜Æåøøé‡č”Øč¾¾å¼ļ¼ŒęÆ”å¦‚ 2 > 1ļ¼ˆč®¢é˜…å…ØéƒØå­č”Øļ¼‰ļ¼Œęˆ–č€… falseļ¼ˆč®¢é˜…0个子蔨) - čæ”å›žę•°ę®äøåŒ…å«ę ‡ē­¾ć€‚ ### ę•°ę®åŗ“č®¢é˜… @@ -280,11 +282,13 @@ CREATE TOPIC topic_name AS STABLE stb_name čÆ­ę³•ļ¼š ```sql -CREATE TOPIC topic_name AS DATABASE db_name; +CREATE TOPIC topic_name [with meta] AS DATABASE db_name; ``` é€ščæ‡čÆ„čÆ­å„åÆåˆ›å»ŗäø€äøŖåŒ…å«ę•°ę®åŗ“ę‰€ęœ‰č”Øę•°ę®ēš„č®¢é˜… +- with meta å‚ę•°åÆé€‰ļ¼Œé€‰ę‹©ę—¶å°†čæ”å›žåˆ›å»ŗę•°ę®åŗ“é‡Œę‰€ęœ‰č¶…ēŗ§č”Øļ¼Œå­č”Øēš„čÆ­å„ļ¼Œäø»č¦ē”ØäŗŽtaosxåšę•°ę®åŗ“čæē§» + ## åˆ›å»ŗę¶ˆč“¹č€… *consumer* ę¶ˆč“¹č€…éœ€č¦é€ščæ‡äø€ē³»åˆ—é…ē½®é€‰é”¹åˆ›å»ŗļ¼ŒåŸŗē”€é…ē½®é”¹å¦‚äø‹č”Øę‰€ē¤ŗļ¼š @@ -295,7 +299,7 @@ CREATE TOPIC topic_name AS DATABASE db_name; | `td.connect.user` | string | ē”Øęˆ·å | | | `td.connect.pass` | string | 密码 | | | `td.connect.port` | integer | ęœåŠ”ē«Æēš„ē«Æå£å· | | -| `group.id` | string | ę¶ˆč“¹ē»„ IDļ¼ŒåŒäø€ę¶ˆč“¹ē»„å…±äŗ«ę¶ˆč“¹čæ›åŗ¦ | **必唫锹**ć€‚ęœ€å¤§é•æåŗ¦ļ¼š192怂 | +| `group.id` | string | ę¶ˆč“¹ē»„ IDļ¼ŒåŒäø€ę¶ˆč“¹ē»„å…±äŗ«ę¶ˆč“¹čæ›åŗ¦ |
**必唫锹**ć€‚ęœ€å¤§é•æåŗ¦ļ¼š192怂
ęÆäøŖtopicęœ€å¤šåÆå»ŗē«‹100äøŖ consumer group | | `client.id` | string | 客户端 ID | ęœ€å¤§é•æåŗ¦ļ¼š192怂 | | `auto.offset.reset` | enum | ę¶ˆč“¹ē»„č®¢é˜…ēš„åˆå§‹ä½ē½® |
`earliest`: default;ä»Žå¤“å¼€å§‹č®¢é˜…;
`latest`: ä»…ä»Žęœ€ę–°ę•°ę®å¼€å§‹č®¢é˜…;
`none`: ę²”ęœ‰ęäŗ¤ēš„ offset ę— ę³•č®¢é˜… | | `enable.auto.commit` | boolean | ę˜Æå¦åÆē”Øę¶ˆč“¹ä½ē‚¹č‡ŖåŠØęäŗ¤ļ¼Œtrue: č‡ŖåŠØęäŗ¤ļ¼Œå®¢ęˆ·ē«Æåŗ”ē”Øę— éœ€commitļ¼›falseļ¼šå®¢ęˆ·ē«Æåŗ”ē”Øéœ€č¦č‡Ŗč”Œcommit | é»˜č®¤å€¼äøŗ true | diff --git a/docs/zh/07-develop/09-udf.md b/docs/zh/07-develop/09-udf.md index ae11273a39..ff46437687 100644 --- a/docs/zh/07-develop/09-udf.md +++ b/docs/zh/07-develop/09-udf.md @@ -17,7 +17,7 @@ TDengine ę”ÆęŒé€ščæ‡ C/Python čÆ­čØ€čæ›č”Œ UDF å®šä¹‰ć€‚ęŽ„äø‹ę„ē»“åˆē¤ŗä¾‹ - čšåˆå‡½ę•°éœ€č¦å®žēŽ°čšåˆęŽ„å£å‡½ę•° aggfn_start , aggfn , aggfn_finish怂 - å¦‚ęžœéœ€č¦åˆå§‹åŒ–ļ¼Œå®žēŽ° udf_initļ¼›å¦‚ęžœéœ€č¦ęø…ē†å·„ä½œļ¼Œå®žēŽ°udf_destroy怂 -ęŽ„å£å‡½ę•°ēš„åē§°ę˜Æ UDF åē§°ļ¼Œęˆ–č€…ę˜Æ UDF åē§°å’Œē‰¹å®šåŽē¼€ļ¼ˆ_start, _finish, _init, _destroy)ēš„čæžęŽ„ć€‚åˆ—č”Øäø­ēš„scalarfn,aggfn, udféœ€č¦ę›æę¢ęˆudfå‡½ę•°åć€‚ +ęŽ„å£å‡½ę•°ēš„åē§°ę˜Æ UDF åē§°ļ¼Œęˆ–č€…ę˜Æ UDF åē§°å’Œē‰¹å®šåŽē¼€ļ¼ˆ`_start`, `_finish`, `_init`, `_destroy`)ēš„čæžęŽ„ć€‚åˆ—č”Øäø­ēš„scalarfn,aggfn, udféœ€č¦ę›æę¢ęˆudfå‡½ę•°åć€‚ ### 用 C čÆ­čØ€å®žēŽ°ę ‡é‡å‡½ę•° ę ‡é‡å‡½ę•°å®žēŽ°ęØ”ęæå¦‚äø‹ diff --git a/docs/zh/08-connector/14-java.mdx b/docs/zh/08-connector/14-java.mdx index 27b732b883..c7da2bd4f5 100644 --- a/docs/zh/08-connector/14-java.mdx +++ b/docs/zh/08-connector/14-java.mdx @@ -36,14 +36,15 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Java ēš„å¹³å°ć€‚ | taos-jdbcdriver ē‰ˆęœ¬ | äø»č¦å˜åŒ– | TDengine ē‰ˆęœ¬ | | :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: | +| 3.2.3 | äæ®å¤ ResultSet åœØäø€äŗ›ęƒ…å†µę•°ę®č§£ęžå¤±č“„ | - | | 3.2.2 | ę–°å¢žåŠŸčƒ½ļ¼šę•°ę®č®¢é˜…ę”ÆęŒ seek åŠŸčƒ½ć€‚ | 3.0.5.0 åŠę›“é«˜ē‰ˆęœ¬ | | 3.2.1 | ę–°å¢žåŠŸčƒ½ļ¼šWebSocket čæžęŽ„ę”ÆęŒ schemaless äøŽ prepareStatement å†™å…„ć€‚å˜ę›“ļ¼šconsumer poll čæ”å›žē»“ęžœé›†äøŗ ConsumerRecordļ¼ŒåÆé€ščæ‡ value() čŽ·å–ęŒ‡å®šē»“ęžœé›†ę•°ę®ć€‚ | 3.0.3.0 åŠę›“é«˜ē‰ˆęœ¬ | | 3.2.0 | å­˜åœØčæžęŽ„é—®é¢˜ļ¼ŒäøęŽØčä½æē”Ø | - | | 3.1.0 | WebSocket čæžęŽ„ę”ÆęŒč®¢é˜…åŠŸčƒ½ | - | | 3.0.1 - 3.0.4 | äæ®å¤äø€äŗ›ęƒ…å†µäø‹ē»“ęžœé›†ę•°ę®č§£ęžé”™čÆÆēš„é—®é¢˜ć€‚3.0.1 在 JDK 11 ēŽÆå¢ƒē¼–čÆ‘ļ¼ŒJDK 8 ēŽÆå¢ƒäø‹å»ŗč®®ä½æē”Øå…¶ä»–ē‰ˆęœ¬ | - | | 3.0.0 | ę”ÆęŒ TDengine 3.0 | 3.0.0.0 åŠę›“é«˜ē‰ˆęœ¬ | -| 2.0.42 | 修在 WebSocket čæžęŽ„äø­ wasNull ęŽ„å£čæ”å›žå€¼ | - | -| 2.0.41 | 修正 REST čæžęŽ„äø­ē”Øęˆ·åå’ŒåÆ†ē č½¬ē ę–¹å¼ | - | +| 2.0.42 | äæ®å¤ WebSocket čæžęŽ„äø­ wasNull ęŽ„å£čæ”å›žå€¼ | - | +| 2.0.41 | äæ®å¤ REST čæžęŽ„äø­ē”Øęˆ·åå’ŒåÆ†ē č½¬ē ę–¹å¼ | - | | 2.0.39 - 2.0.40 | 增加 REST čæžęŽ„/请求 超时设置 | - | | 2.0.38 | JDBC REST čæžęŽ„å¢žåŠ ę‰¹é‡ę‹‰å–åŠŸčƒ½ | - | | 2.0.37 | å¢žåŠ åÆ¹ json tag ę”ÆęŒ | - | @@ -287,9 +288,9 @@ url äø­ēš„é…ē½®å‚ę•°å¦‚äø‹ļ¼š - batchfetch: trueļ¼šåœØę‰§č”ŒęŸ„čÆ¢ę—¶ę‰¹é‡ę‹‰å–ē»“ęžœé›†ļ¼›falseļ¼šé€č”Œę‹‰å–ē»“ęžœé›†ć€‚é»˜č®¤å€¼äøŗļ¼šfalseć€‚é€č”Œę‹‰å–ē»“ęžœé›†ä½æē”Ø HTTP ę–¹å¼čæ›č”Œę•°ę®ä¼ č¾“ć€‚JDBC REST čæžęŽ„ę”ÆęŒę‰¹é‡ę‹‰å–ę•°ę®åŠŸčƒ½ć€‚taos-jdbcdriver äøŽ TDengine ä¹‹é—“é€ščæ‡ WebSocket čæžęŽ„čæ›č”Œę•°ę®ä¼ č¾“ć€‚ē›øč¾ƒäŗŽ HTTP,WebSocket åÆä»„ä½æ JDBC REST čæžęŽ„ę”ÆęŒå¤§ę•°ę®é‡ęŸ„čÆ¢ļ¼Œå¹¶ęå‡ęŸ„čÆ¢ę€§čƒ½ć€‚ - charset: å½“å¼€åÆę‰¹é‡ę‹‰å–ę•°ę®ę—¶ļ¼ŒęŒ‡å®šč§£ęžå­—ē¬¦äø²ę•°ę®ēš„å­—ē¬¦é›†ć€‚ - batchErrorIgnore:trueļ¼šåœØę‰§č”Œ Statement ēš„ executeBatch ę—¶ļ¼Œå¦‚ęžœäø­é—“ęœ‰äø€ę” SQL ę‰§č”Œå¤±č“„ļ¼Œē»§ē»­ę‰§č”Œäø‹é¢ēš„ SQL 了。falseļ¼šäøå†ę‰§č”Œå¤±č“„ SQL åŽēš„ä»»ä½•čÆ­å„ć€‚é»˜č®¤å€¼äøŗļ¼šfalse怂 -- httpConnectTimeout: čæžęŽ„č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ ms, é»˜č®¤å€¼äøŗ 5000怂 -- httpSocketTimeout: socket č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 5000ć€‚ä»…åœØ batchfetch 设置为 false ę—¶ē”Ÿę•ˆć€‚ -- messageWaitTimeout: ę¶ˆęÆč¶…ę—¶ę—¶é—“, 单位 ms, é»˜č®¤å€¼äøŗ 3000怂 ä»…åœØ batchfetch 设置为 true ę—¶ē”Ÿę•ˆć€‚ +- httpConnectTimeout: čæžęŽ„č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ ms, é»˜č®¤å€¼äøŗ 60000怂 +- httpSocketTimeout: socket č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 60000ć€‚ä»…åœØ batchfetch 设置为 false ę—¶ē”Ÿę•ˆć€‚ +- messageWaitTimeout: ę¶ˆęÆč¶…ę—¶ę—¶é—“, 单位 ms, é»˜č®¤å€¼äøŗ 60000怂 ä»…åœØ batchfetch 设置为 true ę—¶ē”Ÿę•ˆć€‚ - useSSL: čæžęŽ„äø­ę˜Æå¦ä½æē”Ø SSL怂 - httpPoolSize: REST å¹¶å‘čÆ·ę±‚å¤§å°ļ¼Œé»˜č®¤ 20怂 @@ -355,9 +356,9 @@ properties äø­ēš„é…ē½®å‚ę•°å¦‚äø‹ļ¼š - TSDBDriver.PROPERTY_KEY_CHARSETļ¼šå®¢ęˆ·ē«Æä½æē”Øēš„å­—ē¬¦é›†ļ¼Œé»˜č®¤å€¼äøŗē³»ē»Ÿå­—ē¬¦é›†ć€‚ - TSDBDriver.PROPERTY_KEY_LOCALEļ¼šä»…åœØä½æē”Ø JDBC åŽŸē”ŸčæžęŽ„ę—¶ē”Ÿę•ˆć€‚ å®¢ęˆ·ē«ÆčÆ­čØ€ēŽÆå¢ƒļ¼Œé»˜č®¤å€¼ē³»ē»Ÿå½“å‰ locale怂 - TSDBDriver.PROPERTY_KEY_TIME_ZONEļ¼šä»…åœØä½æē”Ø JDBC åŽŸē”ŸčæžęŽ„ę—¶ē”Ÿę•ˆć€‚ å®¢ęˆ·ē«Æä½æē”Øēš„ę—¶åŒŗļ¼Œé»˜č®¤å€¼äøŗē³»ē»Ÿå½“å‰ę—¶åŒŗć€‚ -- TSDBDriver.HTTP_CONNECT_TIMEOUT: čæžęŽ„č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ ms, é»˜č®¤å€¼äøŗ 5000ć€‚ä»…åœØ REST čæžęŽ„ę—¶ē”Ÿę•ˆć€‚ -- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 5000ć€‚ä»…åœØ REST čæžęŽ„äø” batchfetch 设置为 false ę—¶ē”Ÿę•ˆć€‚ -- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: ę¶ˆęÆč¶…ę—¶ę—¶é—“, 单位 ms, é»˜č®¤å€¼äøŗ 3000怂 ä»…åœØ REST čæžęŽ„äø” batchfetch 设置为 true ę—¶ē”Ÿę•ˆć€‚ +- TSDBDriver.HTTP_CONNECT_TIMEOUT: čæžęŽ„č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ ms, é»˜č®¤å€¼äøŗ 60000ć€‚ä»…åœØ REST čæžęŽ„ę—¶ē”Ÿę•ˆć€‚ +- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket č¶…ę—¶ę—¶é—“ļ¼Œå•ä½ msļ¼Œé»˜č®¤å€¼äøŗ 60000ć€‚ä»…åœØ REST čæžęŽ„äø” batchfetch 设置为 false ę—¶ē”Ÿę•ˆć€‚ +- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: ę¶ˆęÆč¶…ę—¶ę—¶é—“, 单位 ms, é»˜č®¤å€¼äøŗ 60000怂 ä»…åœØ REST čæžęŽ„äø” batchfetch 设置为 true ę—¶ē”Ÿę•ˆć€‚ - TSDBDriver.PROPERTY_KEY_USE_SSL: čæžęŽ„äø­ę˜Æå¦ä½æē”Ø SSLć€‚ä»…åœØ REST čæžęŽ„ę—¶ē”Ÿę•ˆć€‚ - TSDBDriver.HTTP_POOL_SIZE: REST å¹¶å‘čÆ·ę±‚å¤§å°ļ¼Œé»˜č®¤ 20怂 此外对 JDBC åŽŸē”ŸčæžęŽ„ļ¼Œé€ščæ‡ęŒ‡å®š URL 和 Properties čæ˜åÆä»„ęŒ‡å®šå…¶ä»–å‚ę•°ļ¼ŒęÆ”å¦‚ę—„åæ—ēŗ§åˆ«ć€SQL é•æåŗ¦ē­‰ć€‚ę›“å¤ščÆ¦ē»†é…ē½®čÆ·å‚č€ƒ[å®¢ęˆ·ē«Æé…ē½®](/reference/config/#ä»…å®¢ęˆ·ē«Æé€‚ē”Ø)怂 diff --git a/docs/zh/08-connector/20-go.mdx b/docs/zh/08-connector/20-go.mdx index d431be35cb..90ef4d83ca 100644 --- a/docs/zh/08-connector/20-go.mdx +++ b/docs/zh/08-connector/20-go.mdx @@ -32,24 +32,44 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ čÆ·å‚č€ƒ[ē‰ˆęœ¬ę”ÆęŒåˆ—č”Ø](https://github.com/taosdata/driver-go#remind) -## ę”ÆęŒēš„åŠŸčƒ½ē‰¹ę€§ +## 处理异常 -### åŽŸē”ŸčæžęŽ„ +å¦‚ęžœę˜Æ TDengine é”™čÆÆåÆä»„é€ščæ‡ä»„äø‹ę–¹å¼čŽ·å–é”™čÆÆē å’Œé”™čÆÆäæ”ęÆć€‚ -ā€œåŽŸē”ŸčæžęŽ„ā€ęŒ‡čæžęŽ„å™Øé€ščæ‡ TDengine 客户端驱动(taoscļ¼‰ē›“ęŽ„äøŽ TDengine čæč”Œå®žä¾‹å»ŗē«‹ēš„čæžęŽ„ć€‚ę”ÆęŒēš„åŠŸčƒ½ē‰¹ę€§ęœ‰ļ¼š +```go +// import "github.com/taosdata/driver-go/v3/errors" + if err != nil { + tError, is := err.(*errors.TaosError) + if is { + fmt.Println("errorCode:", int(tError.Code)) + fmt.Println("errorMessage:", tError.ErrStr) + } else { + fmt.Println(err.Error()) + } + } +``` -* ę™®é€šęŸ„čÆ¢ -* čæžē»­ęŸ„čÆ¢ -* č®¢é˜… -* schemaless ęŽ„å£ -* å‚ę•°ē»‘å®šęŽ„å£ +## TDengine DataType 和 Go DataType -### REST čæžęŽ„ +| TDengine DataType | Go Type | +|-------------------|-----------| +| TIMESTAMP | time.Time | +| TINYINT | int8 | +| SMALLINT | int16 | +| INT | int32 | +| BIGINT | int64 | +| TINYINT UNSIGNED | uint8 | +| SMALLINT UNSIGNED | uint16 | +| INT UNSIGNED | uint32 | +| BIGINT UNSIGNED | uint64 | +| FLOAT | float32 | +| DOUBLE | float64 | +| BOOL | bool | +| BINARY | string | +| NCHAR | string | +| JSON | []byte | -"REST čæžęŽ„"ęŒ‡čæžęŽ„å™Øé€ščæ‡ taosAdapter ē»„ä»¶ęä¾›ēš„ REST API äøŽ TDengine čæč”Œå®žä¾‹å»ŗē«‹ēš„čæžęŽ„ć€‚ę”ÆęŒēš„åŠŸčƒ½ē‰¹ę€§ęœ‰ļ¼š - -* ę™®é€šęŸ„čÆ¢ -* čæžē»­ęŸ„čÆ¢ +**ę³Øę„**:JSON ē±»åž‹ä»…åœØ tag äø­ę”ÆęŒć€‚ ## 安装歄骤 @@ -63,32 +83,28 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ * ```go env``` * ```gcc -v``` -### 使用 go get 安装 - -`go get -u github.com/taosdata/driver-go/v3@latest` - -### 使用 go mod 箔理 +### å®‰č£…čæžęŽ„å™Ø 1. 使用 `go mod` å‘½ä»¤åˆå§‹åŒ–é”¹ē›®ļ¼š - ```text - go mod init taos-demo - ``` + ```text + go mod init taos-demo + ``` 2. 引兄 taosSql : - ```go - import ( - "database/sql" - _ "github.com/taosdata/driver-go/v3/taosSql" - ) - ``` + ```go + import ( + "database/sql" + _ "github.com/taosdata/driver-go/v3/taosSql" + ) + ``` 3. 使用 `go mod tidy` ę›“ę–°ä¾čµ–åŒ…ļ¼š - ```text - go mod tidy - ``` + ```text + go mod tidy + ``` 4. 使用 `go run taos-demo` čæč”ŒēØ‹åŗęˆ–ä½æē”Ø `go build` å‘½ä»¤ē¼–čÆ‘å‡ŗäŗŒčæ›åˆ¶ę–‡ä»¶ć€‚ @@ -99,8 +115,6 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ ## å»ŗē«‹čæžęŽ„ -### ę•°ę®ęŗåē§°ļ¼ˆDSN) - ę•°ę®ęŗåē§°å…·ęœ‰é€šē”Øę ¼å¼ļ¼Œä¾‹å¦‚ [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php)ļ¼Œä½†ę²”ęœ‰ē±»åž‹å‰ē¼€ļ¼ˆę–¹ę‹¬å·č”Øē¤ŗåÆé€‰ļ¼‰ļ¼š ``` text @@ -113,9 +127,7 @@ REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Go ēš„å¹³å°ć€‚ username:password@protocol(address)/dbname?param=value ``` -### ä½æē”ØčæžęŽ„å™Øčæ›č”ŒčæžęŽ„ - - + _taosSql_ é€ščæ‡ cgo å®žēŽ°äŗ† Go ēš„ `database/sql/driver` ęŽ„å£ć€‚åŖéœ€č¦å¼•å…„é©±åŠØå°±åÆä»„ä½æē”Ø [`database/sql`](https://golang.org/pkg/database/sql/) ēš„ęŽ„å£ć€‚ @@ -213,42 +225,165 @@ func main() { +### ęŒ‡å®š URL 和 Properties čŽ·å–čæžęŽ„ + +Go čæžęŽ„å™Øäøę”ÆęŒę­¤åŠŸčƒ½ + +### é…ē½®å‚ę•°ēš„ä¼˜å…ˆēŗ§ + +Go čæžęŽ„å™Øäøę”ÆęŒę­¤åŠŸčƒ½ + ## 使用示例 -### å†™å…„ę•°ę® +### åˆ›å»ŗę•°ę®åŗ“å’Œč”Ø -#### SQL 写兄 +```go +var taosDSN = "root:taosdata@tcp(localhost:6030)/" +taos, err := sql.Open("taosSql", taosDSN) +if err != nil { + log.Fatalln("failed to connect TDengine, err:", err) +} +defer taos.Close() +_, err := taos.Exec("CREATE DATABASE power") +if err != nil { + log.Fatalln("failed to create database, err:", err) +} +_, err = taos.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)") +if err != nil { + log.Fatalln("failed to create stable, err:", err) +} +``` + +### ę’å…„ę•°ę® -#### InfluxDB č”Œåč®®å†™å…„ - - - -#### OpenTSDB Telnet č”Œåč®®å†™å…„ - - - -#### OpenTSDB JSON č”Œåč®®å†™å…„ - - - ### ęŸ„čÆ¢ę•°ę® -### ę›“å¤šē¤ŗä¾‹ēØ‹åŗ +### ę‰§č”Œåø¦ęœ‰ reqId ēš„ SQL -* [ē¤ŗä¾‹ēØ‹åŗ](https://github.com/taosdata/driver-go/tree/3.0/examples) -* [视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)怂 +ę­¤ reqId åÆē”ØäŗŽčÆ·ę±‚é“¾č·Æčæ½čøŖć€‚ -## ä½æē”Øé™åˆ¶ +```go +db, err := sql.Open("taosSql", "root:taosdata@tcp(localhost:6030)/") +if err != nil { + panic(err) +} +defer db.Close() +ctx := context.WithValue(context.Background(), common.ReqIDKey, common.GetReqID()) +_, err = db.ExecContext(ctx, "create database if not exists example_taos_sql") +if err != nil { + panic(err) +} +``` -ē”±äŗŽ REST ęŽ„å£ę— ēŠ¶ę€ę‰€ä»„ `use db` čÆ­ę³•äøä¼šē”Ÿę•ˆļ¼Œéœ€č¦å°† db åē§°ę”¾åˆ° SQL čÆ­å„äø­ļ¼Œå¦‚ļ¼š`create table if not exists tb1 (ts timestamp, a int)`改为`create table if not exists test.tb1 (ts timestamp, a int)`å¦åˆ™å°†ęŠ„é”™`[0x217] Database not specified or available`怂 +### é€ščæ‡å‚ę•°ē»‘å®šå†™å…„ę•°ę® -ä¹ŸåÆä»„å°† db åē§°ę”¾åˆ° DSN äø­ļ¼Œå°† `root:taosdata@http(localhost:6041)/` 改为 `root:taosdata@http(localhost:6041)/test`ć€‚å½“ęŒ‡å®šēš„ db äøå­˜åœØę—¶ę‰§č”Œ `create database` čÆ­å„äøä¼šęŠ„é”™ļ¼Œč€Œę‰§č”Œé’ˆåÆ¹čÆ„ db ēš„å…¶ä»–ęŸ„čÆ¢ęˆ–å†™å…„ę“ä½œä¼šęŠ„é”™ć€‚ + + -å®Œę•“ē¤ŗä¾‹å¦‚äø‹ļ¼š +```go +package main + +import ( + "time" + + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" +) + +func main() { + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_stmt") + if err != nil { + panic(err) + } + _, err = db.Exec("create table if not exists example_stmt.tb1(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") + if err != nil { + panic(err) + } + stmt := db.InsertStmt() + err = stmt.Prepare("insert into example_stmt.tb1 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + now := time.Now() + params := make([]*param.Param, 14) + params[0] = param.NewParam(2). + AddTimestamp(now, common.PrecisionMilliSecond). + AddTimestamp(now.Add(time.Second), common.PrecisionMilliSecond) + params[1] = param.NewParam(2).AddBool(true).AddNull() + params[2] = param.NewParam(2).AddTinyint(2).AddNull() + params[3] = param.NewParam(2).AddSmallint(3).AddNull() + params[4] = param.NewParam(2).AddInt(4).AddNull() + params[5] = param.NewParam(2).AddBigint(5).AddNull() + params[6] = param.NewParam(2).AddUTinyint(6).AddNull() + params[7] = param.NewParam(2).AddUSmallint(7).AddNull() + params[8] = param.NewParam(2).AddUInt(8).AddNull() + params[9] = param.NewParam(2).AddUBigint(9).AddNull() + params[10] = param.NewParam(2).AddFloat(10).AddNull() + params[11] = param.NewParam(2).AddDouble(11).AddNull() + params[12] = param.NewParam(2).AddBinary([]byte("binary")).AddNull() + params[13] = param.NewParam(2).AddNchar("nchar").AddNull() + + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(6). + AddNchar(5) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Execute() + if err != nil { + panic(err) + } + err = stmt.Close() + if err != nil { + panic(err) + } + // select * from example_stmt.tb1 +} +``` + + + ```go package main @@ -258,288 +393,733 @@ import ( "fmt" "time" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/common/param" _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/stmt" ) func main() { - var taosDSN = "root:taosdata@http(localhost:6041)/test" - taos, err := sql.Open("taosRestful", taosDSN) + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") if err != nil { - fmt.Println("failed to connect TDengine, err:", err) - return - } - defer taos.Close() - taos.Exec("create database if not exists test") - taos.Exec("create table if not exists tb1 (ts timestamp, a int)") - _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") - if err != nil { - fmt.Println("failed to insert, err:", err) - return - } - rows, err := taos.Query("select * from tb1") - if err != nil { - fmt.Println("failed to select from table, err:", err) - return + panic(err) } + defer db.Close() + prepareEnv(db) - defer rows.Close() - for rows.Next() { - var r struct { - ts time.Time - a int - } - err := rows.Scan(&r.ts, &r.a) + config := stmt.NewConfig("ws://127.0.0.1:6041/rest/stmt", 0) + config.SetConnectUser("root") + config.SetConnectPass("taosdata") + config.SetConnectDB("example_ws_stmt") + config.SetMessageTimeout(common.DefaultMessageTimeout) + config.SetWriteWait(common.DefaultWriteWait) + config.SetErrorHandler(func(connector *stmt.Connector, err error) { + panic(err) + }) + config.SetCloseHandler(func() { + fmt.Println("stmt connector closed") + }) + + connector, err := stmt.NewConnector(config) + if err != nil { + panic(err) + } + now := time.Now() + { + stmt, err := connector.Init() if err != nil { - fmt.Println("scan error:\n", err) - return + panic(err) } - fmt.Println(r.ts, r.a) + err = stmt.Prepare("insert into ? using all_json tags(?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + if err != nil { + panic(err) + } + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } + err = stmt.SetTags(param.NewParam(1).AddJson([]byte(`{"tb":1}`)), param.NewColumnType(1).AddJson(0)) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_json affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } + } + { + stmt, err := connector.Init() + if err != nil { + panic(err) + } + err = stmt.Prepare("insert into ? using all_all tags(?,?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)") + err = stmt.SetTableName("tb1") + if err != nil { + panic(err) + } + + err = stmt.SetTableName("tb2") + if err != nil { + panic(err) + } + err = stmt.SetTags( + param.NewParam(14). + AddTimestamp(now, 0). + AddBool(true). + AddTinyint(2). + AddSmallint(2). + AddInt(2). + AddBigint(2). + AddUTinyint(2). + AddUSmallint(2). + AddUInt(2). + AddUBigint(2). + AddFloat(2). + AddDouble(2). + AddBinary([]byte("tb2")). + AddNchar("tb2"), + param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0), + ) + if err != nil { + panic(err) + } + params := []*param.Param{ + param.NewParam(3).AddTimestamp(now, 0).AddTimestamp(now.Add(time.Second), 0).AddTimestamp(now.Add(time.Second*2), 0), + param.NewParam(3).AddBool(true).AddNull().AddBool(true), + param.NewParam(3).AddTinyint(1).AddNull().AddTinyint(1), + param.NewParam(3).AddSmallint(1).AddNull().AddSmallint(1), + param.NewParam(3).AddInt(1).AddNull().AddInt(1), + param.NewParam(3).AddBigint(1).AddNull().AddBigint(1), + param.NewParam(3).AddUTinyint(1).AddNull().AddUTinyint(1), + param.NewParam(3).AddUSmallint(1).AddNull().AddUSmallint(1), + param.NewParam(3).AddUInt(1).AddNull().AddUInt(1), + param.NewParam(3).AddUBigint(1).AddNull().AddUBigint(1), + param.NewParam(3).AddFloat(1).AddNull().AddFloat(1), + param.NewParam(3).AddDouble(1).AddNull().AddDouble(1), + param.NewParam(3).AddBinary([]byte("test_binary")).AddNull().AddBinary([]byte("test_binary")), + param.NewParam(3).AddNchar("test_nchar").AddNull().AddNchar("test_nchar"), + } + paramTypes := param.NewColumnType(14). + AddTimestamp(). + AddBool(). + AddTinyint(). + AddSmallint(). + AddInt(). + AddBigint(). + AddUTinyint(). + AddUSmallint(). + AddUInt(). + AddUBigint(). + AddFloat(). + AddDouble(). + AddBinary(0). + AddNchar(0) + err = stmt.BindParam(params, paramTypes) + if err != nil { + panic(err) + } + err = stmt.AddBatch() + if err != nil { + panic(err) + } + err = stmt.Exec() + if err != nil { + panic(err) + } + affected := stmt.GetAffectedRows() + fmt.Println("all_all affected rows:", affected) + err = stmt.Close() + if err != nil { + panic(err) + } + + } +} + +func prepareEnv(db *sql.DB) { + steps := []string{ + "create database example_ws_stmt", + "create table example_ws_stmt.all_json(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(t json)", + "create table example_ws_stmt.all_all(" + + "ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")" + + "tags(" + + "tts timestamp," + + "tc1 bool," + + "tc2 tinyint," + + "tc3 smallint," + + "tc4 int," + + "tc5 bigint," + + "tc6 tinyint unsigned," + + "tc7 smallint unsigned," + + "tc8 int unsigned," + + "tc9 bigint unsigned," + + "tc10 float," + + "tc11 double," + + "tc12 binary(20)," + + "tc13 nchar(20))", + } + for _, step := range steps { + _, err := db.Exec(step) + if err != nil { + panic(err) + } + } +} + +``` + + + + +### ę— ęØ”å¼å†™å…„ + + + + +```go +import ( + "fmt" + + "github.com/taosdata/driver-go/v3/af" +) + +func main() { + conn, err := af.Open("localhost", "root", "taosdata", "", 6030) + if err != nil { + fmt.Println("fail to connect, err:", err) + } + defer conn.Close() + _, err = conn.Exec("create database if not exists example") + if err != nil { + panic(err) + } + _, err = conn.Exec("use example") + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + err = conn.InfluxDBInsertLines([]string{influxdbData}, "ns") + if err != nil { + panic(err) + } + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + err = conn.OpenTSDBInsertTelnetLines([]string{telnetData}) + if err != nil { + panic(err) + } + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" + err = conn.OpenTSDBInsertJsonPayload(jsonData) + if err != nil { + panic(err) + } +} +``` + + + + +```go +import ( + "database/sql" + "log" + "time" + + "github.com/taosdata/driver-go/v3/common" + _ "github.com/taosdata/driver-go/v3/taosWS" + "github.com/taosdata/driver-go/v3/ws/schemaless" +) + +func main() { + db, err := sql.Open("taosWS", "root:taosdata@ws(localhost:6041)/") + if err != nil { + log.Fatal(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists schemaless_ws") + if err != nil { + log.Fatal(err) + } + s, err := schemaless.NewSchemaless(schemaless.NewConfig("ws://localhost:6041/rest/schemaless", 1, + schemaless.SetDb("schemaless_ws"), + schemaless.SetReadTimeout(10*time.Second), + schemaless.SetWriteTimeout(10*time.Second), + schemaless.SetUser("root"), + schemaless.SetPassword("taosdata"), + schemaless.SetErrorHandler(func(err error) { + log.Fatal(err) + }), + )) + if err != nil { + panic(err) + } + influxdbData := "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000" + telnetData := "stb0_0 1626006833 4 host=host0 interface=eth0" + jsonData := "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}" + + err = s.Insert(influxdbData, schemaless.InfluxDBLineProtocol, "ns", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(telnetData, schemaless.OpenTSDBTelnetLineProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) + } + err = s.Insert(jsonData, schemaless.OpenTSDBJsonFormatProtocol, "ms", 0, common.GetReqID()) + if err != nil { + panic(err) } } ``` + + + +### ę‰§č”Œåø¦ęœ‰ reqId ēš„ę— ęØ”å¼å†™å…„ + +```go +func (s *Schemaless) Insert(lines string, protocol int, precision string, ttl int, reqID int64) error +``` + +åÆä»„é€ščæ‡ `common.GetReqID()` čŽ·å–å”Æäø€ id怂 + +### ę•°ę®č®¢é˜… + +TDengine Go čæžęŽ„å™Øę”ÆęŒč®¢é˜…åŠŸčƒ½ļ¼Œåŗ”ē”Ø API å¦‚äø‹ļ¼š + +#### åˆ›å»ŗ Topic + +```go + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } +``` + +#### åˆ›å»ŗ Consumer + +```go + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } +``` + +#### č®¢é˜…ę¶ˆč“¹ę•°ę® + +```go + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } +``` + +#### ęŒ‡å®šč®¢é˜… Offset + +```go + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } +``` + +#### å…³é—­č®¢é˜… + +```go + err = consumer.Close() + if err != nil { + panic(err) + } +``` + +#### å®Œę•“ē¤ŗä¾‹ + + + + +```go +package main + +import ( + "fmt" + "os" + + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/af/tmq" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" +) + +func main() { + db, err := af.Open("", "root", "taosdata", "", 0) + if err != nil { + panic(err) + } + defer db.Close() + _, err = db.Exec("create database if not exists example_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic as DATABASE example_tmq") + if err != nil { + panic(err) + } + if err != nil { + panic(err) + } + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "group.id": "test", + "auto.offset.reset": "earliest", + "td.connect.ip": "127.0.0.1", + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "td.connect.port": "6030", + "client.id": "test_tmq_client", + "enable.auto.commit": "false", + "msg.with.table.name": "true", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_tmq_topic", nil) + if err != nil { + panic(err) + } + _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_tmq.t1 values(now,1)") + if err != nil { + panic(err) + } + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } + + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } + + err = consumer.Close() + if err != nil { + panic(err) + } +} +``` + + + + +```go +package main + +import ( + "database/sql" + "fmt" + + "github.com/taosdata/driver-go/v3/common" + tmqcommon "github.com/taosdata/driver-go/v3/common/tmq" + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/driver-go/v3/ws/tmq" +) + +func main() { + db, err := sql.Open("taosRestful", "root:taosdata@http(localhost:6041)/") + if err != nil { + panic(err) + } + defer db.Close() + prepareEnv(db) + consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{ + "ws.url": "ws://127.0.0.1:6041/rest/tmq", + "ws.message.channelLen": uint(0), + "ws.message.timeout": common.DefaultMessageTimeout, + "ws.message.writeWait": common.DefaultWriteWait, + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "group.id": "example", + "client.id": "example_consumer", + "auto.offset.reset": "earliest", + }) + if err != nil { + panic(err) + } + err = consumer.Subscribe("example_ws_tmq_topic", nil) + if err != nil { + panic(err) + } + go func() { + _, err := db.Exec("create table example_ws_tmq.t_all(ts timestamp," + + "c1 bool," + + "c2 tinyint," + + "c3 smallint," + + "c4 int," + + "c5 bigint," + + "c6 tinyint unsigned," + + "c7 smallint unsigned," + + "c8 int unsigned," + + "c9 bigint unsigned," + + "c10 float," + + "c11 double," + + "c12 binary(20)," + + "c13 nchar(20)" + + ")") + if err != nil { + panic(err) + } + _, err = db.Exec("insert into example_ws_tmq.t_all values(now,true,2,3,4,5,6,7,8,9,10.123,11.123,'binary','nchar')") + if err != nil { + panic(err) + } + }() + for i := 0; i < 5; i++ { + ev := consumer.Poll(500) + if ev != nil { + switch e := ev.(type) { + case *tmqcommon.DataMessage: + fmt.Printf("get message:%v\n", e) + case tmqcommon.Error: + fmt.Printf("%% Error: %v: %v\n", e.Code(), e) + panic(e) + } + consumer.Commit() + } + } + partitions, err := consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + err = consumer.Seek(tmqcommon.TopicPartition{ + Topic: partitions[i].Topic, + Partition: partitions[i].Partition, + Offset: 0, + }, 0) + if err != nil { + panic(err) + } + } + + partitions, err = consumer.Assignment() + if err != nil { + panic(err) + } + for i := 0; i < len(partitions); i++ { + fmt.Println(partitions[i]) + } + + err = consumer.Close() + if err != nil { + panic(err) + } +} + +func prepareEnv(db *sql.DB) { + _, err := db.Exec("create database example_ws_tmq WAL_RETENTION_PERIOD 86400") + if err != nil { + panic(err) + } + _, err = db.Exec("create topic example_ws_tmq_topic as database example_ws_tmq") + if err != nil { + panic(err) + } +} +``` + + + + +### ę›“å¤šē¤ŗä¾‹ēØ‹åŗ + +* [ē¤ŗä¾‹ēØ‹åŗ](https://github.com/taosdata/driver-go/tree/3.0/examples) +* [视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)怂 + ## åøøč§é—®é¢˜ 1. database/sql äø­ stmtļ¼ˆå‚ę•°ē»‘å®šļ¼‰ē›øå…³ęŽ„å£å“©ęŗƒ - REST äøę”ÆęŒå‚ę•°ē»‘å®šē›øå…³ęŽ„å£ļ¼Œå»ŗč®®ä½æē”Ø`db.Exec`和`db.Query`怂 + REST äøę”ÆęŒå‚ę•°ē»‘å®šē›øå…³ęŽ„å£ļ¼Œå»ŗč®®ä½æē”Ø`db.Exec`和`db.Query`怂 2. 使用 `use db` čÆ­å„åŽę‰§č”Œå…¶ä»–čÆ­å„ęŠ„é”™ `[0x217] Database not specified or available` - 在 REST ęŽ„å£äø­ SQL čÆ­å„ēš„ę‰§č”Œę— äøŠäø‹ę–‡å…³č”ļ¼Œä½æē”Ø `use db` čÆ­å„äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Øé™åˆ¶ē« čŠ‚ć€‚ + 在 REST ęŽ„å£äø­ SQL čÆ­å„ēš„ę‰§č”Œę— äøŠäø‹ę–‡å…³č”ļ¼Œä½æē”Ø `use db` čÆ­å„äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Øé™åˆ¶ē« čŠ‚ć€‚ 3. 使用 taosSql äøęŠ„é”™ä½æē”Ø taosRestful ꊄ错 `[0x217] Database not specified or available` - å› äøŗ REST ęŽ„å£ę— ēŠ¶ę€ļ¼Œä½æē”Ø `use db` čÆ­å„äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Øé™åˆ¶ē« čŠ‚ć€‚ + å› äøŗ REST ęŽ„å£ę— ēŠ¶ę€ļ¼Œä½æē”Ø `use db` čÆ­å„äøä¼šē”Ÿę•ˆļ¼Œč§£å†³åŠžę³•č§äøŠę–¹ä½æē”Øé™åˆ¶ē« čŠ‚ć€‚ 4. `readBufferSize` å‚ę•°č°ƒå¤§åŽę— ę˜Žę˜¾ę•ˆęžœ - `readBufferSize` č°ƒå¤§åŽä¼šå‡å°‘čŽ·å–ē»“ęžœę—¶ `syscall` ēš„č°ƒē”Øć€‚å¦‚ęžœęŸ„čÆ¢ē»“ęžœēš„ę•°ę®é‡äøå¤§ļ¼Œäæ®ę”¹čÆ„å‚ę•°äøä¼šåø¦ę„ę˜Žę˜¾ęå‡ļ¼Œå¦‚ęžœčÆ„å‚ę•°äæ®ę”¹čæ‡å¤§ļ¼Œē“¶é¢ˆä¼šåœØč§£ęž JSON ę•°ę®ć€‚å¦‚ęžœéœ€č¦ä¼˜åŒ–ęŸ„čÆ¢é€Ÿåŗ¦ļ¼Œéœ€č¦ę ¹ę®å®žé™…ęƒ…å†µč°ƒę•“čÆ„å€¼ę„č¾¾åˆ°ęŸ„čÆ¢ę•ˆęžœęœ€ä¼˜ć€‚ + `readBufferSize` č°ƒå¤§åŽä¼šå‡å°‘čŽ·å–ē»“ęžœę—¶ `syscall` ēš„č°ƒē”Øć€‚å¦‚ęžœęŸ„čÆ¢ē»“ęžœēš„ę•°ę®é‡äøå¤§ļ¼Œäæ®ę”¹čÆ„å‚ę•°äøä¼šåø¦ę„ę˜Žę˜¾ęå‡ļ¼Œå¦‚ęžœčÆ„å‚ę•°äæ®ę”¹čæ‡å¤§ļ¼Œē“¶é¢ˆä¼šåœØč§£ęž JSON ę•°ę®ć€‚å¦‚ęžœéœ€č¦ä¼˜åŒ–ęŸ„čÆ¢é€Ÿåŗ¦ļ¼Œéœ€č¦ę ¹ę®å®žé™…ęƒ…å†µč°ƒę•“čÆ„å€¼ę„č¾¾åˆ°ęŸ„čÆ¢ę•ˆęžœęœ€ä¼˜ć€‚ 5. `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ę•ˆēŽ‡é™ä½Ž - 当 `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ē»“ęžœä¼šä½æē”Ø `gzip` åŽ‹ē¼©åŽä¼ č¾“ļ¼Œę‹æåˆ°ę•°ę®åŽč¦å…ˆčæ›č”Œ `gzip` č§£åŽ‹ć€‚ + 当 `disableCompression` å‚ę•°č®¾ē½®äøŗ `false` ę—¶ęŸ„čÆ¢ē»“ęžœä¼šä½æē”Ø `gzip` åŽ‹ē¼©åŽä¼ č¾“ļ¼Œę‹æåˆ°ę•°ę®åŽč¦å…ˆčæ›č”Œ `gzip` č§£åŽ‹ć€‚ 6. `go get` å‘½ä»¤ę— ę³•čŽ·å–åŒ…ļ¼Œęˆ–č€…čŽ·å–åŒ…č¶…ę—¶ 设置 Go 代理 `go env -w GOPROXY=https://goproxy.cn,direct`怂 -## 常用 API - -### database/sql API - -* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - - 评 API ē”Øę„ę‰“å¼€ DBļ¼Œčæ”å›žäø€äøŖē±»åž‹äøŗ \*DB ēš„åÆ¹č±”ć€‚ - -:::info -评 API ęˆåŠŸåˆ›å»ŗēš„ę—¶å€™ļ¼Œå¹¶ę²”ęœ‰åšęƒé™ē­‰ę£€ęŸ„ļ¼ŒåŖęœ‰åœØēœŸę­£ę‰§č”Œ Query ꈖ者 Exec ēš„ę—¶å€™ę‰čƒ½ēœŸę­£ēš„åŽ»åˆ›å»ŗčæžęŽ„ļ¼Œå¹¶åŒę—¶ę£€ęŸ„ user/password/host/port ę˜Æäøę˜Æåˆę³•ć€‚ -::: - -* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` - - `sql.Open` å†…ē½®ēš„ę–¹ę³•ļ¼Œē”Øę„ę‰§č”ŒéžęŸ„čÆ¢ē›øå…³ SQL怂 - -* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` - - `sql.Open` å†…ē½®ēš„ę–¹ę³•ļ¼Œē”Øę„ę‰§č”ŒęŸ„čÆ¢čÆ­å„ć€‚ - -### 高级功能(af)API - -`af` åŒ…å°č£…äŗ†čæžęŽ„ē®”ē†ć€č®¢é˜…ć€schemalessć€å‚ę•°ē»‘å®šē­‰ TDengine é«˜ēŗ§åŠŸčƒ½ć€‚ - -#### čæžęŽ„ē®”ē† - -* `af.Open(host, user, pass, db string, port int) (*Connector, error)` - - 评 API é€ščæ‡ cgo åˆ›å»ŗäøŽ taosd ēš„čæžęŽ„ć€‚ - -* `func (conn *Connector) Close() error` - - å…³é—­äøŽ taosd ēš„čæžęŽ„ć€‚ - -#### č®¢é˜… - -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` - - åˆ›å»ŗę¶ˆč“¹č€…ć€‚ - -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - č®¢é˜…å•äøŖäø»é¢˜ć€‚ - -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - č®¢é˜…äø»é¢˜ć€‚ - -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` - - č½®čÆ¢ę¶ˆęÆć€‚ - -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `tmq.TopicPartition` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - ęäŗ¤ę¶ˆęÆć€‚ - -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` - - čŽ·å–ę¶ˆč“¹čæ›åŗ¦ć€‚(éœ€č¦ TDengine >= 3.0.5.0, driver-go >= v3.5.0) - -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `ignoredTimeoutMs` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - ęŒ‰ē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ć€‚(éœ€č¦ TDengine >= 3.0.5.0, driver-go >= v3.5.0) - -* `func (c *Consumer) Close() error` - - å…³é—­čæžęŽ„ć€‚ - -#### schemaless - -* `func (conn *Connector) InfluxDBInsertLines(lines []string, precision string) error` - - 写兄 InfluxDB č”Œåč®®ć€‚ - -* `func (conn *Connector) OpenTSDBInsertTelnetLines(lines []string) error` - - 写兄 OpenTDSB telnet åč®®ę•°ę®ć€‚ - -* `func (conn *Connector) OpenTSDBInsertJsonPayload(payload string) error` - - 写兄 OpenTSDB JSON åč®®ę•°ę®ć€‚ - -#### å‚ę•°ē»‘å®š - -* `func (conn *Connector) StmtExecute(sql string, params *param.Param) (res driver.Result, err error)` - - å‚ę•°ē»‘å®šå•č”Œę’å…„ć€‚ - -* `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` - - åˆå§‹åŒ–å‚ę•°ć€‚ - -* `func (stmt *InsertStmt) Prepare(sql string) error` - - å‚ę•°ē»‘å®šé¢„å¤„ē† SQL čÆ­å„ć€‚ - -* `func (stmt *InsertStmt) SetTableName(name string) error` - - å‚ę•°ē»‘å®šč®¾ē½®č”Øåć€‚ - -* `func (stmt *InsertStmt) SetSubTableName(name string) error` - - å‚ę•°ē»‘å®šč®¾ē½®å­č”Øåć€‚ - -* `func (stmt *InsertStmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` - - å‚ę•°ē»‘å®šå¤šč”Œę•°ę®ć€‚ - -* `func (stmt *InsertStmt) AddBatch() error` - - ę·»åŠ åˆ°å‚ę•°ē»‘å®šę‰¹å¤„ē†ć€‚ - -* `func (stmt *InsertStmt) Execute() error` - - ę‰§č”Œå‚ę•°ē»‘å®šć€‚ - -* `func (stmt *InsertStmt) GetAffectedRows() int` - - čŽ·å–å‚ę•°ē»‘å®šę’å…„å—å½±å“č”Œę•°ć€‚ - -* `func (stmt *InsertStmt) Close() error` - - ē»“ęŸå‚ę•°ē»‘å®šć€‚ - -### é€ščæ‡ WebSocket č®¢é˜… - -* `func NewConsumer(conf *tmq.ConfigMap) (*Consumer, error)` - - åˆ›å»ŗę¶ˆč“¹č€…ć€‚ - -* `func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - č®¢é˜…å•äøŖäø»é¢˜ć€‚ - -* `func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) error` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `rebalanceCb` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - č®¢é˜…äø»é¢˜ć€‚ - -* `func (c *Consumer) Poll(timeoutMs int) tmq.Event` - - č½®čÆ¢ę¶ˆęÆć€‚ - -* `func (c *Consumer) Commit() ([]tmq.TopicPartition, error)` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `tmq.TopicPartition` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - ęäŗ¤ę¶ˆęÆć€‚ - -* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)` - - čŽ·å–ę¶ˆč“¹čæ›åŗ¦ć€‚(éœ€č¦ TDengine >= 3.0.5.0, driver-go >= v3.5.0) - -* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error` -ę³Øę„ļ¼šå‡ŗäŗŽå…¼å®¹ē›®ēš„äæē•™ `ignoredTimeoutMs` å‚ę•°ļ¼Œå½“å‰ęœŖä½æē”Ø - - ęŒ‰ē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ć€‚(éœ€č¦ TDengine >= 3.0.5.0, driver-go >= v3.5.0) - -* `func (c *Consumer) Close() error` - - å…³é—­čæžęŽ„ć€‚ - -å®Œę•“č®¢é˜…ē¤ŗä¾‹å‚č§ [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go) - -### é€ščæ‡ WebSocket čæ›č”Œå‚ę•°ē»‘å®š - -* `func NewConnector(config *Config) (*Connector, error)` - - åˆ›å»ŗčæžęŽ„ć€‚ - -* `func (c *Connector) Init() (*Stmt, error)` - - åˆå§‹åŒ–å‚ę•°ć€‚ - -* `func (c *Connector) Close() error` - - å…³é—­čæžęŽ„ć€‚ - -* `func (s *Stmt) Prepare(sql string) error` - - å‚ę•°ē»‘å®šé¢„å¤„ē† SQL čÆ­å„ć€‚ - -* `func (s *Stmt) SetTableName(name string) error` - - å‚ę•°ē»‘å®šč®¾ē½®č”Øåć€‚ - -* `func (s *Stmt) SetTags(tags *param.Param, bindType *param.ColumnType) error` - - å‚ę•°ē»‘å®šč®¾ē½®ę ‡ē­¾ć€‚ - -* `func (s *Stmt) BindParam(params []*param.Param, bindType *param.ColumnType) error` - - å‚ę•°ē»‘å®šå¤šč”Œę•°ę®ć€‚ - -* `func (s *Stmt) AddBatch() error` - - ę·»åŠ åˆ°å‚ę•°ē»‘å®šę‰¹å¤„ē†ć€‚ - -* `func (s *Stmt) Exec() error` - - ę‰§č”Œå‚ę•°ē»‘å®šć€‚ - -* `func (s *Stmt) GetAffectedRows() int` - - čŽ·å–å‚ę•°ē»‘å®šę’å…„å—å½±å“č”Œę•°ć€‚ - -* `func (s *Stmt) Close() error` - - ē»“ęŸå‚ę•°ē»‘å®šć€‚ - -å®Œę•“å‚ę•°ē»‘å®šē¤ŗä¾‹å‚č§ [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go) - ## API å‚č€ƒ å…ØéƒØ API 见 [driver-go 文攣](https://pkg.go.dev/github.com/taosdata/driver-go/v3) diff --git a/docs/zh/08-connector/26-rust.mdx b/docs/zh/08-connector/26-rust.mdx index c23228c8cf..79a6badfea 100644 --- a/docs/zh/08-connector/26-rust.mdx +++ b/docs/zh/08-connector/26-rust.mdx @@ -30,21 +30,57 @@ Websocket čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Rust ēš„å¹³å°ć€‚ | Rust čæžęŽ„å™Øē‰ˆęœ¬ | TDengine ē‰ˆęœ¬ | 主要功能 | | :----------------: | :--------------: | :--------------------------------------------------: | -| v0.8.10 | 3.0.5.0 or later | ę¶ˆęÆč®¢é˜…ļ¼ščŽ·å–ę¶ˆč“¹čæ›åŗ¦åŠęŒ‰ē…§ęŒ‡å®ščæ›åŗ¦å¼€å§‹ę¶ˆč“¹ć€‚ | +| v0.8.12 | 3.0.5.0 or later | ę¶ˆęÆč®¢é˜…ļ¼ščŽ·å–ę¶ˆč“¹čæ›åŗ¦åŠęŒ‰ē…§ęŒ‡å®ščæ›åŗ¦å¼€å§‹ę¶ˆč“¹ć€‚ | | v0.8.0 | 3.0.4.0 | ę”ÆęŒę— ęØ”å¼å†™å…„ć€‚ | | v0.7.6 | 3.0.3.0 | ę”ÆęŒåœØčÆ·ę±‚äø­ä½æē”Ø req_id怂 | | v0.6.0 | 3.0.0.0 | åŸŗē”€åŠŸčƒ½ć€‚ | Rust čæžęŽ„å™Øä»ē„¶åœØåæ«é€Ÿå¼€å‘äø­ļ¼Œ1.0 ä¹‹å‰ę— ę³•äæčÆå…¶å‘åŽå…¼å®¹ć€‚å»ŗč®®ä½æē”Ø 3.0 ē‰ˆęœ¬ä»„äøŠēš„ TDengineļ¼Œä»„éæå…å·²ēŸ„é—®é¢˜ć€‚ -## 安装 +## 处理错误 + +åœØęŠ„é”™åŽļ¼ŒåÆä»„čŽ·å–åˆ°é”™čÆÆēš„å…·ä½“äæ”ęÆļ¼š + +```rust +match conn.exec(sql) { + Ok(_) => { + Ok(()) + } + Err(e) => { + eprintln!("ERROR: {:?}", e); + Err(e) + } +} +``` + +## TDengine DataType 和 Rust DataType + +TDengine ē›®å‰ę”ÆęŒę—¶é—“ęˆ³ć€ę•°å­—ć€å­—ē¬¦ć€åøƒå°”ē±»åž‹ļ¼ŒäøŽ Rust åÆ¹åŗ”ē±»åž‹č½¬ę¢å¦‚äø‹ļ¼š + +| TDengine DataType | Rust DataType | +| ----------------- | ----------------- | +| TIMESTAMP | Timestamp | +| INT | i32 | +| BIGINT | i64 | +| FLOAT | f32 | +| DOUBLE | f64 | +| SMALLINT | i16 | +| TINYINT | i8 | +| BOOL | bool | +| BINARY | Vec | +| NCHAR | String | +| JSON | serde_json::Value | + +**ę³Øę„**:JSON ē±»åž‹ä»…åœØ tag äø­ę”ÆęŒć€‚ + +## 安装歄骤 ### å®‰č£…å‰å‡†å¤‡ * 安装 Rust 开发巄具链 * å¦‚ęžœä½æē”ØåŽŸē”ŸčæžęŽ„ļ¼ŒčÆ·å®‰č£… TDengine å®¢ęˆ·ē«Æé©±åŠØļ¼Œå…·ä½“ę­„éŖ¤čÆ·å‚č€ƒ[å®‰č£…å®¢ęˆ·ē«Æé©±åŠØ](../#å®‰č£…å®¢ęˆ·ē«Æé©±åŠØ) -### 添加 taos ä¾čµ– +### å®‰č£…čæžęŽ„å™Ø ę ¹ę®é€‰ę‹©ēš„čæžęŽ„ę–¹å¼ļ¼ŒęŒ‰ē…§å¦‚äø‹čÆ“ę˜ŽåœØ [Rust](https://rust-lang.org) é”¹ē›®äø­ę·»åŠ  [taos][taos] ä¾čµ–ļ¼š @@ -151,7 +187,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?; let conn1 = builder.build(); // use websocket protocol. -let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?; +let conn2 = builder2.build(); ``` å»ŗē«‹čæžęŽ„åŽļ¼Œę‚ØåÆä»„čæ›č”Œē›øå…³ę•°ę®åŗ“ę“ä½œļ¼š @@ -233,41 +270,191 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> { ## 使用示例 -### å†™å…„ę•°ę® +### åˆ›å»ŗę•°ę®åŗ“å’Œč”Ø -#### SQL 写兄 +```rust +use taos::*; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let dsn = "taos://localhost:6030"; + let builder = TaosBuilder::from_dsn(dsn)?; + + let taos = builder.build()?; + + let db = "query"; + + // create database + taos.exec_many([ + format!("DROP DATABASE IF EXISTS `{db}`"), + format!("CREATE DATABASE `{db}`"), + format!("USE `{db}`"), + ]) + .await?; + + // create table + taos.exec_many([ + // create super table + "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \ + TAGS (`groupid` INT, `location` BINARY(16))", + // create child table + "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')", + ]).await?; +} +``` + +> **ę³Øę„**ļ¼šå¦‚ęžœäøä½æē”Ø `use db` ęŒ‡å®šę•°ę®åŗ“ļ¼Œåˆ™åŽē»­åÆ¹č”Øēš„ę“ä½œéƒ½éœ€č¦å¢žåŠ ę•°ę®åŗ“åē§°ä½œäøŗå‰ē¼€ļ¼Œå¦‚ db.tb怂 + +### ę’å…„ę•°ę® -#### STMT 写兄 - - - -#### Schemaless 写兄 - - - ### ęŸ„čÆ¢ę•°ę® -## API å‚č€ƒ +### ę‰§č”Œåø¦ęœ‰ req_id ēš„ SQL -### čæžęŽ„ęž„é€ å™Ø - -é€ščæ‡ DSN ę„ęž„å»ŗäø€äøŖčæžęŽ„å™Øęž„é€ å™Øć€‚ +ę­¤ req_id åÆē”ØäŗŽčÆ·ę±‚é“¾č·Æčæ½čøŖć€‚ ```rust -let cfg = TaosBuilder::default().build()?; +let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?; ``` -使用 `builder` åÆ¹č±”åˆ›å»ŗå¤šäøŖčæžęŽ„ļ¼š +### é€ščæ‡å‚ę•°ē»‘å®šå†™å…„ę•°ę® + +TDengine ēš„ Rust čæžęŽ„å™Øå®žēŽ°äŗ†å‚ę•°ē»‘å®šę–¹å¼åÆ¹ę•°ę®å†™å…„ļ¼ˆINSERTļ¼‰åœŗę™Æēš„ę”ÆęŒć€‚é‡‡ē”Øčæ™ē§ę–¹å¼å†™å…„ę•°ę®ę—¶ļ¼Œčƒ½éæå… SQL čÆ­ę³•č§£ęžēš„čµ„ęŗę¶ˆč€—ļ¼Œä»Žč€ŒåœØå¾ˆå¤šęƒ…å†µäø‹ę˜¾č‘—ęå‡å†™å…„ę€§čƒ½ć€‚ + +å‚ę•°ē»‘å®šęŽ„å£čÆ¦č§[APIå‚č€ƒ](#stmt-api) + + + +### ę— ęØ”å¼å†™å…„ + +TDengine ę”ÆęŒę— ęØ”å¼å†™å…„åŠŸčƒ½ć€‚ę— ęØ”å¼å†™å…„å…¼å®¹ InfluxDB ēš„ č”Œåč®®ļ¼ˆLine Protocol)、OpenTSDB ēš„ telnet č”Œåč®®å’Œ OpenTSDB ēš„ JSON ę ¼å¼åč®®ć€‚čÆ¦ęƒ…čÆ·å‚č§[ę— ęØ”å¼å†™å…„](../../reference/schemaless/)怂 + + + +### ę‰§č”Œåø¦ęœ‰ req_id ēš„ę— ęØ”å¼å†™å…„ + +ę­¤ req_id åÆē”ØäŗŽčÆ·ę±‚é“¾č·Æčæ½čøŖć€‚ ```rust -let conn: Taos = cfg.build(); +let sml_data = SmlDataBuilder::default() + .protocol(SchemalessProtocol::Line) + .data(data) + .req_id(100u64) + .build()?; + +client.put(&sml_data)? ``` -### čæžęŽ„ę±  +### ę•°ę®č®¢é˜… + +TDengine é€ščæ‡ę¶ˆęÆé˜Ÿåˆ— [TMQ](../../../taos-sql/tmq/) åÆåŠØäø€äøŖč®¢é˜…ć€‚ + +#### åˆ›å»ŗ Topic + +```rust +taos.exec_many([ + // create topic for subscription + format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}") +]) +.await?; +``` + +#### åˆ›å»ŗ Consumer + +从 DSN å¼€å§‹ļ¼Œęž„å»ŗäø€äøŖ TMQ čæžęŽ„å™Øć€‚ + +```rust +let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; +``` + +åˆ›å»ŗę¶ˆč“¹č€…ļ¼š + +```rust +let mut consumer = tmq.build()?; +``` + +#### č®¢é˜…ę¶ˆč“¹ę•°ę® + +ę¶ˆč“¹č€…åÆč®¢é˜…äø€äøŖęˆ–å¤šäøŖ `TOPIC`怂 + +```rust +consumer.subscribe(["tmq_meters"]).await?; +``` + +TMQ ę¶ˆęÆé˜Ÿåˆ—ę˜Æäø€äøŖ [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) ē±»åž‹ļ¼ŒåÆä»„ä½æē”Øē›øåŗ” API åÆ¹ęÆäøŖę¶ˆęÆčæ›č”Œę¶ˆč“¹ļ¼Œå¹¶é€ščæ‡ `.commit` čæ›č”Œå·²ę¶ˆč“¹ę ‡č®°ć€‚ + +```rust +{ + let mut stream = consumer.stream(); + + while let Some((offset, message)) = stream.try_next().await? { + // get information from offset + + // the topic + let topic = offset.topic(); + // the vgroup id, like partition id in kafka. + let vgroup_id = offset.vgroup_id(); + println!("* in vgroup id {vgroup_id} of topic {topic}\n"); + + if let Some(data) = message.into_data() { + while let Some(block) = data.fetch_raw_block().await? { + // one block for one table, get table name if needed + let name = block.table_name(); + let records: Vec = block.deserialize().try_collect()?; + println!( + "** table: {}, got {} records: {:#?}\n", + name.unwrap(), + records.len(), + records + ); + } + } + consumer.commit(offset).await?; + } +} +``` + +čŽ·å–ę¶ˆč“¹čæ›åŗ¦ļ¼š + +ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8, TDengine >= 3.0.5.0 + +```rust +let assignments = consumer.assignments().await.unwrap(); +``` + +#### ęŒ‡å®šč®¢é˜… Offset + +ęŒ‰ē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ļ¼š + +ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8, TDengine >= 3.0.5.0 + +```rust +consumer.offset_seek(topic, vgroup_id, offset).await; +``` + +#### å…³é—­č®¢é˜… + +```rust +consumer.unsubscribe().await; +``` + +åÆ¹äŗŽ TMQ DSN, ęœ‰ä»„äø‹é…ē½®é”¹åÆä»„čæ›č”Œč®¾ē½®ļ¼Œéœ€č¦ę³Øę„ēš„ę˜Æļ¼Œ`group.id` ę˜Æåæ…é”»ēš„ć€‚ + +- `group.id`: åŒäø€äøŖę¶ˆč“¹č€…ē»„ļ¼Œå°†ä»„č‡³å°‘ę¶ˆč“¹äø€ę¬”ēš„ę–¹å¼čæ›č”Œę¶ˆęÆč“Ÿč½½å‡č””ć€‚ +- `client.id`: åÆé€‰ēš„č®¢é˜…å®¢ęˆ·ē«ÆčÆ†åˆ«é”¹ć€‚ +- `auto.offset.reset`: åÆé€‰åˆå§‹åŒ–č®¢é˜…čµ·ē‚¹ļ¼Œ *earliest* äøŗä»Žå¤“å¼€å§‹č®¢é˜…ļ¼Œ *latest* äøŗä»…ä»Žęœ€ę–°ę•°ę®å¼€å§‹č®¢é˜…ļ¼Œé»˜č®¤äøŗä»Žå¤“č®¢é˜…ć€‚ę³Øę„ļ¼Œę­¤é€‰é”¹åœØåŒäø€äøŖ `group.id` äø­ä»…ē”Ÿę•ˆäø€ę¬”ć€‚ +- `enable.auto.commit`: 当设置为 `true` ę—¶ļ¼Œå°†åÆē”Øč‡ŖåŠØę ‡č®°ęØ”å¼ļ¼Œå½“åÆ¹ę•°ę®äø€č‡“ę€§äøę•ę„Ÿę—¶ļ¼ŒåÆä»„åÆē”Øę­¤ę–¹å¼ć€‚ +- `auto.commit.interval.ms`: č‡ŖåŠØę ‡č®°ēš„ę—¶é—“é—“éš”ć€‚ + +#### å®Œę•“ē¤ŗä¾‹ + +å®Œę•“č®¢é˜…ē¤ŗä¾‹å‚č§ [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). + +### äøŽčæžęŽ„ę± ä½æē”Ø åœØå¤ę‚åŗ”ē”Øäø­ļ¼Œå»ŗč®®åÆē”ØčæžęŽ„ę± ć€‚[taos] ēš„čæžęŽ„ę± é»˜č®¤ļ¼ˆå¼‚ę­„ęØ”å¼ļ¼‰ä½æē”Ø [deadpool] å®žēŽ°ć€‚ @@ -295,7 +482,17 @@ let pool: Pool = Pool::builder(Manager::from_dsn(self.dsn.clone()). let taos = pool.get()?; ``` -### čæžęŽ„ +### ę›“å¤šē¤ŗä¾‹ēØ‹åŗ + +ē¤ŗä¾‹ēØ‹åŗęŗē ä½äŗŽ `TDengine/examples/rust` äø‹: + +čÆ·å‚č€ƒļ¼š[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust) + +## åøøč§é—®é¢˜ + +čÆ·å‚č€ƒ [FAQ](../../../train-faq/faq) + +## API å‚č€ƒ [Taos][struct.Taos] åÆ¹č±”ęä¾›äŗ†å¤šäøŖę•°ę®åŗ“ę“ä½œēš„ API: @@ -381,9 +578,13 @@ let taos = pool.get()?; - `.create_database(database: &str)`: ę‰§č”Œ `CREATE DATABASE` čÆ­å„ć€‚ - `.use_database(database: &str)`: ę‰§č”Œ `USE` čÆ­å„ć€‚ -é™¤ę­¤ä¹‹å¤–ļ¼ŒčÆ„ē»“ęž„ä¹Ÿę˜Æ [å‚ę•°ē»‘å®š](#å‚ę•°ē»‘å®šęŽ„å£) 和 [č”Œåč®®ęŽ„å£](#č”Œåč®®ęŽ„å£) ēš„å…„å£ļ¼Œä½æē”Øę–¹ę³•čÆ·å‚č€ƒå…·ä½“ēš„ API čÆ“ę˜Žć€‚ +é™¤ę­¤ä¹‹å¤–ļ¼ŒčÆ„ē»“ęž„ä¹Ÿę˜Æå‚ę•°ē»‘å®šå’Œč”Œåč®®ęŽ„å£ēš„å…„å£ļ¼Œä½æē”Øę–¹ę³•čÆ·å‚č€ƒå…·ä½“ēš„ API čÆ“ę˜Žć€‚ -### å‚ę•°ē»‘å®šęŽ„å£ +

+ +å‚ę•°ē»‘å®šęŽ„å£ + +

äøŽ C ęŽ„å£ē±»ä¼¼ļ¼ŒRust ęä¾›å‚ę•°ē»‘å®šęŽ„å£ć€‚é¦–å…ˆļ¼Œé€ščæ‡ [Taos][struct.Taos] åÆ¹č±”åˆ›å»ŗäø€äøŖ SQL čÆ­å„ēš„å‚ę•°ē»‘å®šåÆ¹č±” [Stmt]: @@ -394,7 +595,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?; å‚ę•°ē»‘å®šåÆ¹č±”ęä¾›äŗ†äø€ē»„ęŽ„å£ē”ØäŗŽå®žēŽ°å‚ę•°ē»‘å®šļ¼š -#### `.set_tbname(name)` +`.set_tbname(name)` ē”ØäŗŽē»‘å®šč”Øåć€‚ @@ -403,7 +604,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?; stmt.set_tbname("d0")?; ``` -#### `.set_tags(&[tag])` +`.set_tags(&[tag])` 当 SQL čÆ­å„ä½æē”Øč¶…ēŗ§č”Øę—¶ļ¼Œē”ØäŗŽē»‘å®šå­č”Øč”Øåå’Œę ‡ē­¾å€¼ļ¼š @@ -413,7 +614,7 @@ stmt.set_tbname("d0")?; stmt.set_tags(&[Value::VarChar("ę¶›ę€".to_string())])?; ``` -#### `.bind(&[column])` +`.bind(&[column])` ē”ØäŗŽē»‘å®šå€¼ē±»åž‹ć€‚ä½æē”Ø [ColumnView] ē»“ęž„ä½“ęž„å»ŗéœ€č¦ēš„ē±»åž‹å¹¶ē»‘å®šļ¼š @@ -437,7 +638,7 @@ let params = vec![ let rows = stmt.bind(¶ms)?.add_batch()?.execute()?; ``` -#### `.execute()` +`.execute()` ę‰§č”Œ SQL怂[Stmt] åÆ¹č±”åÆä»„å¤ē”Øļ¼ŒåœØę‰§č”ŒåŽåÆä»„é‡ę–°ē»‘å®šå¹¶ę‰§č”Œć€‚ę‰§č”Œå‰čÆ·ē”®äæę‰€ęœ‰ę•°ę®å·²é€ščæ‡ `.add_batch` åŠ å…„åˆ°ę‰§č”Œé˜Ÿåˆ—äø­ć€‚ @@ -452,92 +653,6 @@ stmt.execute()?; äø€äøŖåÆčæč”Œēš„ē¤ŗä¾‹čÆ·č§ [GitHub äøŠēš„ē¤ŗä¾‹](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs)怂 -### č®¢é˜… - -TDengine é€ščæ‡ę¶ˆęÆé˜Ÿåˆ— [TMQ](../../../taos-sql/tmq/) åÆåŠØäø€äøŖč®¢é˜…ć€‚ - -从 DSN å¼€å§‹ļ¼Œęž„å»ŗäø€äøŖ TMQ čæžęŽ„å™Øć€‚ - -```rust -let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?; -``` - -åˆ›å»ŗę¶ˆč“¹č€…ļ¼š - -```rust -let mut consumer = tmq.build()?; -``` - -ę¶ˆč“¹č€…åÆč®¢é˜…äø€äøŖęˆ–å¤šäøŖ `TOPIC`怂 - -```rust -consumer.subscribe(["tmq_meters"]).await?; -``` - -TMQ ę¶ˆęÆé˜Ÿåˆ—ę˜Æäø€äøŖ [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) ē±»åž‹ļ¼ŒåÆä»„ä½æē”Øē›øåŗ” API åÆ¹ęÆäøŖę¶ˆęÆčæ›č”Œę¶ˆč“¹ļ¼Œå¹¶é€ščæ‡ `.commit` čæ›č”Œå·²ę¶ˆč“¹ę ‡č®°ć€‚ - -```rust -{ - let mut stream = consumer.stream(); - - while let Some((offset, message)) = stream.try_next().await? { - // get information from offset - - // the topic - let topic = offset.topic(); - // the vgroup id, like partition id in kafka. - let vgroup_id = offset.vgroup_id(); - println!("* in vgroup id {vgroup_id} of topic {topic}\n"); - - if let Some(data) = message.into_data() { - while let Some(block) = data.fetch_raw_block().await? { - // one block for one table, get table name if needed - let name = block.table_name(); - let records: Vec = block.deserialize().try_collect()?; - println!( - "** table: {}, got {} records: {:#?}\n", - name.unwrap(), - records.len(), - records - ); - } - } - consumer.commit(offset).await?; - } -} -``` - -čŽ·å–ę¶ˆč“¹čæ›åŗ¦ļ¼š - -ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8, TDengine >= 3.0.5.0 - -```rust -let assignments = consumer.assignments().await.unwrap(); -``` - -ęŒ‰ē…§ęŒ‡å®šēš„čæ›åŗ¦ę¶ˆč“¹ļ¼š - -ē‰ˆęœ¬č¦ę±‚ connector-rust >= v0.8.8, TDengine >= 3.0.5.0 - -```rust -consumer.offset_seek(topic, vgroup_id, offset).await; -``` - -åœę­¢č®¢é˜…ļ¼š - -```rust -consumer.unsubscribe().await; -``` - -åÆ¹äŗŽ TMQ DSN, ęœ‰ä»„äø‹é…ē½®é”¹åÆä»„čæ›č”Œč®¾ē½®ļ¼Œéœ€č¦ę³Øę„ēš„ę˜Æļ¼Œ`group.id` ę˜Æåæ…é”»ēš„ć€‚ - -- `group.id`: åŒäø€äøŖę¶ˆč“¹č€…ē»„ļ¼Œå°†ä»„č‡³å°‘ę¶ˆč“¹äø€ę¬”ēš„ę–¹å¼čæ›č”Œę¶ˆęÆč“Ÿč½½å‡č””ć€‚ -- `client.id`: åÆé€‰ēš„č®¢é˜…å®¢ęˆ·ē«ÆčÆ†åˆ«é”¹ć€‚ -- `auto.offset.reset`: åÆé€‰åˆå§‹åŒ–č®¢é˜…čµ·ē‚¹ļ¼Œ *earliest* äøŗä»Žå¤“å¼€å§‹č®¢é˜…ļ¼Œ *latest* äøŗä»…ä»Žęœ€ę–°ę•°ę®å¼€å§‹č®¢é˜…ļ¼Œé»˜č®¤äøŗä»Žå¤“č®¢é˜…ć€‚ę³Øę„ļ¼Œę­¤é€‰é”¹åœØåŒäø€äøŖ `group.id` äø­ä»…ē”Ÿę•ˆäø€ę¬”ć€‚ -- `enable.auto.commit`: 当设置为 `true` ę—¶ļ¼Œå°†åÆē”Øč‡ŖåŠØę ‡č®°ęØ”å¼ļ¼Œå½“åÆ¹ę•°ę®äø€č‡“ę€§äøę•ę„Ÿę—¶ļ¼ŒåÆä»„åÆē”Øę­¤ę–¹å¼ć€‚ -- `auto.commit.interval.ms`: č‡ŖåŠØę ‡č®°ēš„ę—¶é—“é—“éš”ć€‚ - -å®Œę•“č®¢é˜…ē¤ŗä¾‹å‚č§ [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs). å…¶ä»–ē›øå…³ē»“ęž„ä½“ API ä½æē”ØčÆ“ę˜ŽčÆ·ē§»ę­„ Rust ę–‡ę”£ę‰˜ē®”ē½‘é”µļ¼šć€‚ diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index 8752dc2145..0b9f2d75a7 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -21,10 +21,25 @@ Python čæžęŽ„å™Øēš„ęŗē ę‰˜ē®”åœØ [GitHub](https://github.com/taosdata/taos-con - åŽŸē”ŸčæžęŽ„[ę”ÆęŒēš„å¹³å°](../#ę”ÆęŒēš„å¹³å°)和 TDengine å®¢ęˆ·ē«Æę”ÆęŒēš„å¹³å°äø€č‡“ć€‚ - REST čæžęŽ„ę”ÆęŒę‰€ęœ‰čƒ½čæč”Œ Python ēš„å¹³å°ć€‚ -## ē‰ˆęœ¬é€‰ę‹© +### ę”ÆęŒēš„åŠŸčƒ½ + +- åŽŸē”ŸčæžęŽ„ę”ÆęŒ TDengine ēš„ę‰€ęœ‰ę øåæƒåŠŸčƒ½ļ¼Œ åŒ…ę‹¬ļ¼š čæžęŽ„ē®”ē†ć€ę‰§č”Œ SQLć€å‚ę•°ē»‘å®šć€č®¢é˜…ć€ę— ęØ”å¼å†™å…„ļ¼ˆschemaless)。 +- REST čæžęŽ„ę”ÆęŒēš„åŠŸčƒ½åŒ…ę‹¬ļ¼ščæžęŽ„ē®”ē†ć€ę‰§č”Œ SQL怂 (é€ščæ‡ę‰§č”Œ SQL åÆä»„ļ¼š ē®”ē†ę•°ę®åŗ“ć€ē®”ē†č”Øå’Œč¶…ēŗ§č”Øć€å†™å…„ę•°ę®ć€ęŸ„čÆ¢ę•°ę®ć€åˆ›å»ŗčæžē»­ęŸ„čÆ¢ē­‰)怂 + +## åŽ†å²ē‰ˆęœ¬ ę— č®ŗä½æē”Øä»€ä¹ˆē‰ˆęœ¬ēš„ TDengine éƒ½å»ŗč®®ä½æē”Øęœ€ę–°ē‰ˆęœ¬ēš„ `taospy`怂 +|Python Connector ē‰ˆęœ¬|äø»č¦å˜åŒ–| +|:-------------------:|:----:| +|2.7.9|ę•°ę®č®¢é˜…ę”ÆęŒčŽ·å–ę¶ˆč“¹čæ›åŗ¦å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦| +|2.7.8|ę–°å¢ž `execute_many`| + +|Python Websocket Connector ē‰ˆęœ¬|äø»č¦å˜åŒ–| +|:----------------------------:|:-----:| +|0.2.5|1. ę•°ę®č®¢é˜…ę”ÆęŒčŽ·å–ę¶ˆč“¹čæ›åŗ¦å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦
2. ę”ÆęŒ schemaless
3. ę”ÆęŒ STMT| +|0.2.4|ę•°ę®č®¢é˜…ę–°å¢žå–ę¶ˆč®¢é˜…ę–¹ę³•| + ## 处理异常 Python čæžęŽ„å™ØåÆčƒ½ä¼šäŗ§ē”Ÿ 4 ē§å¼‚åøøļ¼š @@ -55,12 +70,25 @@ Python Connector ēš„ę‰€ęœ‰ę•°ę®åŗ“ę“ä½œå¦‚ęžœå‡ŗēŽ°å¼‚åøøļ¼Œéƒ½ä¼šē›“ęŽ„ęŠ›å‡ŗ {{#include docs/examples/python/handle_exception.py}} ``` -## ę”ÆęŒēš„åŠŸčƒ½ +TDengine DataType 和 Python DataType -- åŽŸē”ŸčæžęŽ„ę”ÆęŒ TDengine ēš„ę‰€ęœ‰ę øåæƒåŠŸčƒ½ļ¼Œ åŒ…ę‹¬ļ¼š čæžęŽ„ē®”ē†ć€ę‰§č”Œ SQLć€å‚ę•°ē»‘å®šć€č®¢é˜…ć€ę— ęØ”å¼å†™å…„ļ¼ˆschemaless)。 -- REST čæžęŽ„ę”ÆęŒēš„åŠŸčƒ½åŒ…ę‹¬ļ¼ščæžęŽ„ē®”ē†ć€ę‰§č”Œ SQL怂 (é€ščæ‡ę‰§č”Œ SQL åÆä»„ļ¼š ē®”ē†ę•°ę®åŗ“ć€ē®”ē†č”Øå’Œč¶…ēŗ§č”Øć€å†™å…„ę•°ę®ć€ęŸ„čÆ¢ę•°ę®ć€åˆ›å»ŗčæžē»­ęŸ„čÆ¢ē­‰)怂 +TDengine ē›®å‰ę”ÆęŒę—¶é—“ęˆ³ć€ę•°å­—ć€å­—ē¬¦ć€åøƒå°”ē±»åž‹ļ¼ŒäøŽ Python åÆ¹åŗ”ē±»åž‹č½¬ę¢å¦‚äø‹ļ¼š -## 安装 +|TDengine DataType|Python DataType| +|:---------------:|:-------------:| +|TIMESTAMP|datetime| +|INT|int| +|BIGINT|int| +|FLOAT|float| +|DOUBLE|int| +|SMALLINT|int| +|TINYINT|int| +|BOOL|bool| +|BINARY|str| +|NCHAR|str| +|JSON|str| + +## 安装歄骤 ### å®‰č£…å‰å‡†å¤‡ @@ -373,7 +401,7 @@ TaosCursor ē±»ä½æē”ØåŽŸē”ŸčæžęŽ„čæ›č”Œå†™å…„ć€ęŸ„čÆ¢ę“ä½œć€‚åœØå®¢ęˆ·ē«Æå¤šēŗæ
-#### Connection ē±»ēš„ä½æē”Ø +##### Connection ē±»ēš„ä½æē”Ø `Connection` ē±»ę—¢åŒ…å«åÆ¹ PEP249 Connection ęŽ„å£ēš„å®žēŽ°(å¦‚ļ¼šcursorę–¹ę³•å’Œ close 方法)ļ¼Œä¹ŸåŒ…å«å¾ˆå¤šę‰©å±•åŠŸčƒ½ļ¼ˆå¦‚ļ¼š execute态 query态schemaless_insert 和 subscribe 方法。 @@ -537,7 +565,7 @@ RestClient ē±»ę˜ÆåÆ¹äŗŽ REST API ēš„ē›“ęŽ„å°č£…ć€‚å®ƒåŖåŒ…å«äø€äøŖ sql() ę–¹ `Consumer` ęä¾›äŗ† Python čæžęŽ„å™Øč®¢é˜… TMQ ę•°ę®ēš„ API怂 -#### åˆ›å»ŗ Consumer +##### åˆ›å»ŗ Consumer åˆ›å»ŗ Consumer 语法为 `consumer = Consumer(configs)`ļ¼Œå‚ę•°å®šä¹‰čÆ·å‚č€ƒ [ę•°ę®č®¢é˜…ę–‡ę”£](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)怂 @@ -547,15 +575,15 @@ from taos.tmq import Consumer consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"}) ``` -#### č®¢é˜… topics +##### č®¢é˜… topics -Comsumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topics,consumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 +Consumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topics,consumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 ```python consumer.subscribe(['topic1', 'topic2']) ``` -#### ę¶ˆč“¹ę•°ę® +##### ę¶ˆč“¹ę•°ę® Consumer API ēš„ `poll` ę–¹ę³•ē”ØäŗŽę¶ˆč“¹ę•°ę®ļ¼Œ`poll` ę–¹ę³•ęŽ„ę”¶äø€äøŖ float ē±»åž‹ēš„č¶…ę—¶ę—¶é—“ļ¼Œč¶…ę—¶ę—¶é—“å•ä½äøŗē§’ļ¼ˆsļ¼‰ļ¼Œ`poll` ę–¹ę³•åœØč¶…ę—¶ä¹‹å‰čæ”å›žäø€ę” Message ē±»åž‹ēš„ę•°ę®ęˆ–č¶…ę—¶čæ”å›ž `None`ć€‚ę¶ˆč“¹č€…åæ…é”»é€ščæ‡ Message ēš„ `error()` ę–¹ę³•ę ”éŖŒčæ”å›žę•°ę®ēš„ error 俔息。 @@ -573,7 +601,7 @@ while True: print(block.fetchall()) ``` -#### čŽ·å–ę¶ˆč“¹čæ›åŗ¦ +##### čŽ·å–ę¶ˆč“¹čæ›åŗ¦ Consumer API ēš„ `assignment` ę–¹ę³•ē”ØäŗŽčŽ·å– Consumer č®¢é˜…ēš„ę‰€ęœ‰ topic ēš„ę¶ˆč“¹čæ›åŗ¦ļ¼Œčæ”å›žē»“ęžœē±»åž‹äøŗ TopicPartition åˆ—č”Øć€‚ @@ -581,7 +609,7 @@ Consumer API ēš„ `assignment` ę–¹ę³•ē”ØäŗŽčŽ·å– Consumer č®¢é˜…ēš„ę‰€ęœ‰ topic assignments = consumer.assignment() ``` -#### é‡ē½®ę¶ˆč“¹čæ›åŗ¦ +##### ęŒ‡å®šč®¢é˜… Offset Consumer API ēš„ `seek` ę–¹ę³•ē”ØäŗŽé‡ē½® Consumer ēš„ę¶ˆč“¹čæ›åŗ¦åˆ°ęŒ‡å®šä½ē½®ļ¼Œę–¹ę³•å‚ę•°ē±»åž‹äøŗ TopicPartition怂 @@ -590,7 +618,7 @@ tp = TopicPartition(topic='topic1', partition=0, offset=0) consumer.seek(tp) ``` -#### ē»“ęŸę¶ˆč“¹ +##### å…³é—­č®¢é˜… ę¶ˆč“¹ē»“ęŸåŽļ¼Œåŗ”å½“å–ę¶ˆč®¢é˜…ļ¼Œå¹¶å…³é—­ Consumer怂 @@ -599,13 +627,13 @@ consumer.unsubscribe() consumer.close() ``` -#### tmq č®¢é˜…ē¤ŗä¾‹ä»£ē  +##### å®Œę•“ē¤ŗä¾‹ ```python {{#include docs/examples/python/tmq_example.py}} ``` -#### čŽ·å–å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦ē¤ŗä¾‹ä»£ē  +##### čŽ·å–å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦ē¤ŗä¾‹ä»£ē  ```python {{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}} @@ -619,7 +647,7 @@ consumer.close() taosws `Consumer` API ęä¾›äŗ†åŸŗäŗŽ Websocket č®¢é˜… TMQ ę•°ę®ēš„ API怂 -#### åˆ›å»ŗ Consumer +##### åˆ›å»ŗ Consumer åˆ›å»ŗ Consumer 语法为 `consumer = Consumer(conf=configs)`ļ¼Œä½æē”Øę—¶éœ€č¦ęŒ‡å®š `td.connect.websocket.scheme` å‚ę•°å€¼äøŗ "ws"ļ¼Œå‚ę•°å®šä¹‰čÆ·å‚č€ƒ [ę•°ę®č®¢é˜…ę–‡ę”£](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)怂 @@ -629,15 +657,15 @@ import taosws consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"}) ``` -#### č®¢é˜… topics +##### č®¢é˜… topics -Comsumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topics,consumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 +Consumer API ēš„ `subscribe` ę–¹ę³•ē”ØäŗŽč®¢é˜… topics,consumer ę”ÆęŒåŒę—¶č®¢é˜…å¤šäøŖ topic怂 ```python consumer.subscribe(['topic1', 'topic2']) ``` -#### ę¶ˆč“¹ę•°ę® +##### ę¶ˆč“¹ę•°ę® Consumer API ēš„ `poll` ę–¹ę³•ē”ØäŗŽę¶ˆč“¹ę•°ę®ļ¼Œ`poll` ę–¹ę³•ęŽ„ę”¶äø€äøŖ float ē±»åž‹ēš„č¶…ę—¶ę—¶é—“ļ¼Œč¶…ę—¶ę—¶é—“å•ä½äøŗē§’ļ¼ˆsļ¼‰ļ¼Œ`poll` ę–¹ę³•åœØč¶…ę—¶ä¹‹å‰čæ”å›žäø€ę” Message ē±»åž‹ēš„ę•°ę®ęˆ–č¶…ę—¶čæ”å›ž `None`ć€‚ę¶ˆč“¹č€…åæ…é”»é€ščæ‡ Message ēš„ `error()` ę–¹ę³•ę ”éŖŒčæ”å›žę•°ę®ēš„ error 俔息。 @@ -654,7 +682,7 @@ while True: print(row) ``` -#### čŽ·å–ę¶ˆč“¹čæ›åŗ¦ +##### čŽ·å–ę¶ˆč“¹čæ›åŗ¦ Consumer API ēš„ `assignment` ę–¹ę³•ē”ØäŗŽčŽ·å– Consumer č®¢é˜…ēš„ę‰€ęœ‰ topic ēš„ę¶ˆč“¹čæ›åŗ¦ļ¼Œčæ”å›žē»“ęžœē±»åž‹äøŗ TopicPartition åˆ—č”Øć€‚ @@ -662,7 +690,7 @@ Consumer API ēš„ `assignment` ę–¹ę³•ē”ØäŗŽčŽ·å– Consumer č®¢é˜…ēš„ę‰€ęœ‰ topic assignments = consumer.assignment() ``` -#### é‡ē½®ę¶ˆč“¹čæ›åŗ¦ +##### é‡ē½®ę¶ˆč“¹čæ›åŗ¦ Consumer API ēš„ `seek` ę–¹ę³•ē”ØäŗŽé‡ē½® Consumer ēš„ę¶ˆč“¹čæ›åŗ¦åˆ°ęŒ‡å®šä½ē½®ć€‚ @@ -670,7 +698,7 @@ Consumer API ēš„ `seek` ę–¹ę³•ē”ØäŗŽé‡ē½® Consumer ēš„ę¶ˆč“¹čæ›åŗ¦åˆ°ęŒ‡å®šä½ consumer.seek(topic='topic1', partition=0, offset=0) ``` -#### ē»“ęŸę¶ˆč“¹ +##### ē»“ęŸę¶ˆč“¹ ę¶ˆč“¹ē»“ęŸåŽļ¼Œåŗ”å½“å–ę¶ˆč®¢é˜…ļ¼Œå¹¶å…³é—­ Consumer怂 @@ -679,7 +707,7 @@ consumer.unsubscribe() consumer.close() ``` -#### tmq č®¢é˜…ē¤ŗä¾‹ä»£ē  +##### tmq č®¢é˜…ē¤ŗä¾‹ä»£ē  ```python {{#include docs/examples/python/tmq_websocket_example.py}} @@ -687,7 +715,7 @@ consumer.close() čæžęŽ„å™Øęä¾›äŗ† `assignment` ęŽ„å£ļ¼Œē”ØäŗŽčŽ·å– topic assignment ēš„åŠŸčƒ½ļ¼ŒåÆä»„ęŸ„čÆ¢č®¢é˜…ēš„ topic ēš„ę¶ˆč“¹čæ›åŗ¦ļ¼Œå¹¶ęä¾› `seek` ęŽ„å£ļ¼Œē”ØäŗŽé‡ē½® topic ēš„ę¶ˆč“¹čæ›åŗ¦ć€‚ -#### čŽ·å–å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦ē¤ŗä¾‹ä»£ē  +##### čŽ·å–å’Œé‡ē½®ę¶ˆč“¹čæ›åŗ¦ē¤ŗä¾‹ä»£ē  ```python {{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}} @@ -703,19 +731,19 @@ consumer.close() -ē®€å•å†™å…„ +##### ē®€å•å†™å…„ ```python {{#include docs/examples/python/schemaless_insert.py}} ``` -åø¦ęœ‰ ttl å‚ę•°ēš„å†™å…„ +##### åø¦ęœ‰ ttl å‚ę•°ēš„å†™å…„ ```python {{#include docs/examples/python/schemaless_insert_ttl.py}} ``` -åø¦ęœ‰ req_id å‚ę•°ēš„å†™å…„ +##### åø¦ęœ‰ req_id å‚ę•°ēš„å†™å…„ ```python {{#include docs/examples/python/schemaless_insert_req_id.py}} @@ -725,19 +753,19 @@ consumer.close() -ē®€å•å†™å…„ +##### ē®€å•å†™å…„ ```python {{#include docs/examples/python/schemaless_insert_raw.py}} ``` -åø¦ęœ‰ ttl å‚ę•°ēš„å†™å…„ +##### åø¦ęœ‰ ttl å‚ę•°ēš„å†™å…„ ```python {{#include docs/examples/python/schemaless_insert_raw_ttl.py}} ``` -åø¦ęœ‰ req_id å‚ę•°ēš„å†™å…„ +##### åø¦ęœ‰ req_id å‚ę•°ēš„å†™å…„ ```python {{#include docs/examples/python/schemaless_insert_raw_req_id.py}} @@ -753,7 +781,7 @@ TDengine ēš„ Python čæžęŽ„å™Øę”ÆęŒå‚ę•°ē»‘å®šé£Žę ¼ēš„ Prepare API ę–¹å¼å†™ -#### åˆ›å»ŗ stmt +##### åˆ›å»ŗ stmt Python čæžęŽ„å™Øēš„ `Connection` ęä¾›äŗ† `statement` ę–¹ę³•ē”ØäŗŽåˆ›å»ŗå‚ę•°ē»‘å®šåÆ¹č±” stmtļ¼ŒčÆ„ę–¹ę³•ęŽ„ę”¶ sql å­—ē¬¦äø²ä½œäøŗå‚ę•°ļ¼Œsql å­—ē¬¦äø²ē›®å‰ä»…ę”ÆęŒē”Ø `?` ę„ä»£č”Øē»‘å®šēš„å‚ę•°ć€‚ @@ -764,7 +792,7 @@ conn = taos.connect() stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") ``` -#### å‚ę•°ē»‘å®š +##### å‚ę•°ē»‘å®š č°ƒē”Ø `new_multi_binds` å‡½ę•°åˆ›å»ŗ params åˆ—č”Øļ¼Œē”ØäŗŽå‚ę•°ē»‘å®šć€‚ @@ -794,7 +822,7 @@ params[15].timestamp([None, None, 1626861392591]) stmt.bind_param_batch(params) ``` -#### ę‰§č”Œ sql +##### ę‰§č”Œ sql č°ƒē”Ø stmt ēš„ `execute` ę–¹ę³•ę‰§č”Œ sql @@ -802,7 +830,7 @@ stmt.bind_param_batch(params) stmt.execute() ``` -#### 关闭 stmt +##### 关闭 stmt ęœ€åŽéœ€č¦å…³é—­ stmt怂 @@ -810,7 +838,7 @@ stmt.execute() stmt.close() ``` -#### 示例代码 +##### 示例代码 ```python {{#include docs/examples/python/stmt_example.py}} @@ -819,7 +847,7 @@ stmt.close() -#### åˆ›å»ŗ stmt +##### åˆ›å»ŗ stmt Python WebSocket čæžęŽ„å™Øēš„ `Connection` ęä¾›äŗ† `statement` ę–¹ę³•ē”ØäŗŽåˆ›å»ŗå‚ę•°ē»‘å®šåÆ¹č±” stmtļ¼ŒčÆ„ę–¹ę³•ęŽ„ę”¶ sql å­—ē¬¦äø²ä½œäøŗå‚ę•°ļ¼Œsql å­—ē¬¦äø²ē›®å‰ä»…ę”ÆęŒē”Ø `?` ę„ä»£č”Øē»‘å®šēš„å‚ę•°ć€‚ @@ -830,7 +858,7 @@ conn = taosws.connect('taosws://localhost:6041/test') stmt = conn.statement() ``` -#### č§£ęž sql +##### č§£ęž sql č°ƒē”Ø stmt ēš„ `prepare` ę–¹ę³•ę„č§£ęž insert čÆ­å„ć€‚ @@ -838,7 +866,7 @@ stmt = conn.statement() stmt.prepare("insert into t1 values (?, ?, ?, ?)") ``` -#### å‚ę•°ē»‘å®š +##### å‚ę•°ē»‘å®š č°ƒē”Ø stmt ēš„ `bind_param` ę–¹ę³•ē»‘å®šå‚ę•°ć€‚ @@ -857,7 +885,7 @@ stmt.bind_param([ stmt.add_batch() ``` -#### ę‰§č”Œ sql +##### ę‰§č”Œ sql č°ƒē”Ø stmt ēš„ `execute` ę–¹ę³•ę‰§č”Œ sql @@ -865,7 +893,7 @@ stmt.add_batch() stmt.execute() ``` -#### 关闭 stmt +##### 关闭 stmt ęœ€åŽéœ€č¦å…³é—­ stmt怂 @@ -873,7 +901,7 @@ stmt.execute() stmt.close() ``` -#### 示例代码 +##### 示例代码 ```python {{#include docs/examples/python/stmt_websocket_example.py}} diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index 416d41614d..fc0cfbe330 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -991,18 +991,14 @@ SAMPLE(expr, k) **åŠŸčƒ½čÆ“ę˜Ž**: čŽ·å–ę•°ę®ēš„ k äøŖé‡‡ę ·å€¼ć€‚å‚ę•° k ēš„åˆę³•č¾“å…„čŒƒå›“ę˜Æ 1≤ k ≤ 1000怂 -**čæ”å›žē»“ęžœē±»åž‹**: åŒåŽŸå§‹ę•°ę®ē±»åž‹ļ¼Œ čæ”å›žē»“ęžœäø­åø¦ęœ‰čÆ„č”Œč®°å½•ēš„ę—¶é—“ęˆ³ć€‚ +**čæ”å›žē»“ęžœē±»åž‹**: åŒåŽŸå§‹ę•°ę®ē±»åž‹ć€‚ -**é€‚ē”Øę•°ę®ē±»åž‹**: åœØč¶…ēŗ§č”ØęŸ„čÆ¢äø­ä½æē”Øę—¶ļ¼Œäøčƒ½åŗ”ē”ØåœØę ‡ē­¾ä¹‹äøŠć€‚ +**é€‚ē”Øę•°ę®ē±»åž‹**: å…ØéƒØē±»åž‹å­—ę®µć€‚ **åµŒå„—å­ęŸ„čÆ¢ę”ÆęŒ**: é€‚ē”ØäŗŽå†…å±‚ęŸ„čÆ¢å’Œå¤–å±‚ęŸ„čÆ¢ć€‚ **é€‚ē”ØäŗŽ**ļ¼šč”Øå’Œč¶…ēŗ§č”Øć€‚ -**ä½æē”ØčÆ“ę˜Ž**: - -- äøčƒ½å‚äøŽč”Øč¾¾å¼č®”ē®—ļ¼›čÆ„å‡½ę•°åÆä»„åŗ”ē”ØåœØę™®é€šč”Øå’Œč¶…ēŗ§č”ØäøŠļ¼› - ### TAIL @@ -1047,11 +1043,11 @@ TOP(expr, k) UNIQUE(expr) ``` -**åŠŸčƒ½čÆ“ę˜Ž**ļ¼ščæ”å›žčÆ„åˆ—ēš„ę•°å€¼é¦–ę¬”å‡ŗēŽ°ēš„å€¼ć€‚čÆ„å‡½ę•°åŠŸčƒ½äøŽ distinct ē›øä¼¼ļ¼Œä½†ę˜ÆåÆä»„åŒ¹é…ę ‡ē­¾å’Œę—¶é—“ęˆ³äæ”ęÆć€‚åÆä»„é’ˆåÆ¹é™¤ę—¶é—“åˆ—ä»„å¤–ēš„å­—ę®µčæ›č”ŒęŸ„čÆ¢ļ¼ŒåÆä»„åŒ¹é…ę ‡ē­¾å’Œę—¶é—“ęˆ³ļ¼Œå…¶äø­ēš„ę ‡ē­¾å’Œę—¶é—“ęˆ³ę˜Æē¬¬äø€ę¬”å‡ŗēŽ°ę—¶åˆ»ēš„ę ‡ē­¾å’Œę—¶é—“ęˆ³ć€‚ +**åŠŸčƒ½čÆ“ę˜Ž**ļ¼ščæ”å›žčÆ„åˆ—ę•°ę®é¦–ę¬”å‡ŗēŽ°ēš„å€¼ć€‚čÆ„å‡½ę•°åŠŸčƒ½äøŽ distinct 相似。 **čæ”å›žę•°ę®ē±»åž‹**ļ¼šåŒåŗ”ē”Øēš„å­—ę®µć€‚ -**é€‚ē”Øę•°ę®ē±»åž‹**ļ¼šé€‚åˆäŗŽé™¤ę—¶é—“ē±»åž‹ä»„å¤–ēš„å­—ę®µć€‚ +**é€‚ē”Øę•°ę®ē±»åž‹**ļ¼šå…ØéƒØē±»åž‹å­—ę®µć€‚ **é€‚ē”ØäŗŽ**: č”Øå’Œč¶…ēŗ§č”Øć€‚ diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md index 12ad665e42..f3397ae82d 100644 --- a/docs/zh/12-taos-sql/24-show.md +++ b/docs/zh/12-taos-sql/24-show.md @@ -36,7 +36,7 @@ SHOW CONNECTIONS; SHOW CONSUMERS; ``` -ę˜¾ē¤ŗå½“å‰ę•°ę®åŗ“äø‹ę‰€ęœ‰ę“»č·ƒēš„ę¶ˆč“¹č€…ēš„äæ”ęÆć€‚ +ę˜¾ē¤ŗå½“å‰ę•°ę®åŗ“äø‹ę‰€ęœ‰ę¶ˆč“¹č€…ēš„äæ”ęÆć€‚ ## SHOW CREATE DATABASE diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index ae47388566..557552bc1c 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ TDengine 2.x å„ē‰ˆęœ¬å®‰č£…åŒ…čÆ·č®æé—®[čæ™é‡Œ](https://www.taosdata.com/all-do import Release from "/components/ReleaseV3"; +## 3.0.6.0 + + + ## 3.0.5.1 diff --git a/include/common/tglobal.h b/include/common/tglobal.h index d53f78b41e..bc4037c642 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -164,6 +164,8 @@ extern char tsSmlTagName[]; // extern bool tsSmlDataFormat; // extern int32_t tsSmlBatchSize; +extern int32_t tmqMaxTopicNum; + // wal extern int64_t tsWalFsyncDataSizeLimit; diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index e98453f571..8ebf07bfcc 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -145,7 +145,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DROP_TOPIC, "drop-topic", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_SUBSCRIBE, "subscribe", SCMSubscribeReq, SCMSubscribeRsp) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_ASK_EP, "ask-ep", SMqAskEpReq, SMqAskEpRsp) - TD_DEF_MSG_TYPE(TDMT_MND_TMQ_CONSUMER_LOST, "consumer-lost", SMqConsumerLostMsg, NULL) +// TD_DEF_MSG_TYPE(TDMT_MND_TMQ_CONSUMER_LOST, "consumer-lost", SMqConsumerLostMsg, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_CONSUMER_RECOVER, "consumer-recover", SMqConsumerRecoverMsg, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_HB, "consumer-hb", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DO_REBALANCE, "do-rebalance", SMqDoRebalanceMsg, NULL) diff --git a/include/libs/function/tudf.h b/include/libs/function/tudf.h index b71d50d43c..6b15833917 100644 --- a/include/libs/function/tudf.h +++ b/include/libs/function/tudf.h @@ -111,6 +111,12 @@ int32_t udfStartUdfd(int32_t startDnodeId); */ int32_t udfStopUdfd(); +/** + * get udfd pid + * + */ + int32_t udfGetUdfdPid(int32_t* pUdfdPid); + #ifdef __cplusplus } #endif diff --git a/include/util/taoserror.h b/include/util/taoserror.h index ce24761df9..772a668f0f 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -66,8 +66,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RPC_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0018) // #define TSDB_CODE_RPC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0019) // #define TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED TAOS_DEF_ERROR_CODE(0, 0x0020) // "Vgroup could not be connected" -#define TSDB_CODE_RPC_SOMENODE_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0021) // -#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x0022) // +#define TSDB_CODE_RPC_SOMENODE_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0021) // +#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x0022) // @@ -277,7 +277,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_INVALID_FUNC_COMMENT TAOS_DEF_ERROR_CODE(0, 0x0378) #define TSDB_CODE_MND_INVALID_FUNC_RETRIEVE TAOS_DEF_ERROR_CODE(0, 0x0379) - + // mnode-db #define TSDB_CODE_MND_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0380) @@ -288,9 +288,9 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_TOO_MANY_DATABASES TAOS_DEF_ERROR_CODE(0, 0x0385) #define TSDB_CODE_MND_DB_IN_DROPPING TAOS_DEF_ERROR_CODE(0, 0x0386) // // #define TSDB_CODE_MND_VGROUP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0387) // 2.x -#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388) // +#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388) // #define TSDB_CODE_MND_INVALID_DB_ACCT TAOS_DEF_ERROR_CODE(0, 0x0389) // internal -#define TSDB_CODE_MND_DB_OPTION_UNCHANGED TAOS_DEF_ERROR_CODE(0, 0x038A) // +#define TSDB_CODE_MND_DB_OPTION_UNCHANGED TAOS_DEF_ERROR_CODE(0, 0x038A) // #define TSDB_CODE_MND_DB_INDEX_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x038B) #define TSDB_CODE_MND_DB_RETENTION_PERIOD_ZERO TAOS_DEF_ERROR_CODE(0, 0x038C) // #define TSDB_CODE_MND_INVALID_DB_OPTION_DAYS TAOS_DEF_ERROR_CODE(0, 0x0390) // 2.x @@ -516,6 +516,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_QRY_JSON_IN_GROUP_ERROR TAOS_DEF_ERROR_CODE(0, 0x072E) #define TSDB_CODE_QRY_JOB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x072F) #define TSDB_CODE_QRY_QWORKER_QUIT TAOS_DEF_ERROR_CODE(0, 0x0730) +#define TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR TAOS_DEF_ERROR_CODE(0, 0x0731) // grant #define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800) @@ -768,6 +769,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001) #define TSDB_CODE_TMQ_CONSUMER_CLOSED TAOS_DEF_ERROR_CODE(0, 0x4002) #define TSDB_CODE_TMQ_CONSUMER_ERROR TAOS_DEF_ERROR_CODE(0, 0x4003) +#define TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x4004) +#define TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x4005) // stream #define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100) @@ -778,7 +781,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TDLITE_IVLD_OPEN_DIR TAOS_DEF_ERROR_CODE(0, 0x5101) // UTIL -#define TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x6000) +#define TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x6000) #ifdef __cplusplus } diff --git a/include/util/tcompare.h b/include/util/tcompare.h index f92e1c3970..2fa736f4df 100644 --- a/include/util/tcompare.h +++ b/include/util/tcompare.h @@ -79,6 +79,7 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight); int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight); int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight); +int32_t compareLenBinaryVal(const void *pLeft, const void *pRight); int32_t comparestrRegexMatch(const void *pLeft, const void *pRight); int32_t comparestrRegexNMatch(const void *pLeft, const void *pRight); diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh index 081383f89b..0622b01f2b 100755 --- a/packaging/testpackage.sh +++ b/packaging/testpackage.sh @@ -152,7 +152,7 @@ function wgetFile { file=$1 versionPath=$2 sourceP=$3 -nasServerIP="192.168.1.131" +nasServerIP="192.168.1.213" packagePath="/nas/TDengine/v${versionPath}/${verMode}" if [ -f ${file} ];then echoColor YD "${file} already exists ,it will delete it and download it again " diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 503120fe85..13dc019feb 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -749,6 +749,9 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns, pReq.suid = pTableMeta->uid; pReq.source = TD_REQ_FROM_TAOX; pSql = (action == SCHEMA_ACTION_ADD_COLUMN) ? "sml_add_column" : "sml_modify_column_size"; + } else{ + uError("SML:0x%" PRIx64 " invalid action:%d", info->id, action); + goto end; } code = buildRequest(info->taos->id, pSql, strlen(pSql), NULL, false, &pRequest, 0); diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c index 975b304bf4..8ac9550aca 100644 --- a/source/client/src/clientStmt.c +++ b/source/client/src/clientStmt.c @@ -939,8 +939,6 @@ int stmtClose(TAOS_STMT* stmt) { stmtCleanSQLInfo(pStmt); taosMemoryFree(stmt); - STMT_DLOG_E("stmt freed"); - return TSDB_CODE_SUCCESS; } diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index 8758cec2ec..83550aa15d 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -652,7 +652,7 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm int32_t j = 0; int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs); for (j = 0; j < numOfVgroups; j++) { - SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); + SMqClientVg* pVg = (SMqClientVg*)taosArrayGet(pTopic->vgs, j); if (pVg->vgId == vgId) { break; } @@ -666,7 +666,7 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm return; } - SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); + SMqClientVg* pVg = (SMqClientVg*)taosArrayGet(pTopic->vgs, j); if (pVg->offsetInfo.currentOffset.type > 0 && !tOffsetEqual(&pVg->offsetInfo.currentOffset, &pVg->offsetInfo.committedOffset)) { code = doSendCommitMsg(tmq, pVg, pTopic->topicName, pParamSet, j, numOfVgroups, type); @@ -742,13 +742,15 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us static void generateTimedTask(int64_t refId, int32_t type) { tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); - if (tmq != NULL) { - int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); - *pTaskType = type; - taosWriteQitem(tmq->delayedTask, pTaskType); - tsem_post(&tmq->rspSem); - taosReleaseRef(tmqMgmt.rsetId, refId); - } + if(tmq == NULL) return; + + int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); + if(pTaskType == NULL) return; + + *pTaskType = type; + taosWriteQitem(tmq->delayedTask, pTaskType); + tsem_post(&tmq->rspSem); + taosReleaseRef(tmqMgmt.rsetId, refId); } void tmqAssignAskEpTask(void* param, void* tmrId) { @@ -763,19 +765,19 @@ void tmqAssignDelayedCommitTask(void* param, void* tmrId) { taosMemoryFree(param); } -void tmqAssignDelayedReportTask(void* param, void* tmrId) { - int64_t refId = *(int64_t*)param; - tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); - if (tmq != NULL) { - int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); - *pTaskType = TMQ_DELAYED_TASK__REPORT; - taosWriteQitem(tmq->delayedTask, pTaskType); - tsem_post(&tmq->rspSem); - } - - taosReleaseRef(tmqMgmt.rsetId, refId); - taosMemoryFree(param); -} +//void tmqAssignDelayedReportTask(void* param, void* tmrId) { +// int64_t refId = *(int64_t*)param; +// tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId); +// if (tmq != NULL) { +// int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0); +// *pTaskType = TMQ_DELAYED_TASK__REPORT; +// taosWriteQitem(tmq->delayedTask, pTaskType); +// tsem_post(&tmq->rspSem); +// } +// +// taosReleaseRef(tmqMgmt.rsetId, refId); +// taosMemoryFree(param); +//} int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { if (pMsg) { @@ -813,7 +815,7 @@ void tmqSendHbReq(void* param, void* tmrId) { offRows->offset = pVg->offsetInfo.currentOffset; char buf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(buf, TSDB_OFFSET_LEN, &offRows->offset); - tscInfo("report offset: vgId:%d, offset:%s, rows:%"PRId64, offRows->vgId, buf, offRows->rows); + tscInfo("consumer:0x%" PRIx64 ",report offset: vgId:%d, offset:%s, rows:%"PRId64, tmq->consumerId, offRows->vgId, buf, offRows->rows); } } // tmq->needReportOffsetRows = false; @@ -1489,7 +1491,8 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic makeTopicVgroupKey(vgKey, pTopic->topicName, pVgEp->vgId); SVgroupSaveInfo* pInfo = taosHashGet(pVgOffsetHashMap, vgKey, strlen(vgKey)); - STqOffsetVal offsetNew = {.type = tmq->resetOffsetCfg}; + STqOffsetVal offsetNew = {0}; + offsetNew.type = tmq->resetOffsetCfg; SMqClientVg clientVg = { .pollCnt = 0, diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 12ec97080f..5d1854ee2c 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -162,7 +162,7 @@ static const SSysDbTableSchema streamTaskSchema[] = { {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "task_id", .bytes = 32, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "node_type", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "node_id", .bytes = 8, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, + {.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, {.name = "level", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, }; @@ -290,7 +290,7 @@ static const SSysDbTableSchema subscriptionSchema[] = { {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, - {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "consumer_id", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "offset", .bytes = TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, }; @@ -352,7 +352,7 @@ static const SSysDbTableSchema connectionsSchema[] = { static const SSysDbTableSchema consumerSchema[] = { - {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, + {.name = "consumer_id", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "consumer_group", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false}, diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 5f6ec92d50..74471eca9a 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -105,11 +105,13 @@ char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table // bool tsSmlDataFormat = false; // int32_t tsSmlBatchSize = 10000; +// tmq +int32_t tmqMaxTopicNum = 20; // query int32_t tsQueryPolicy = 1; int32_t tsQueryRspPolicy = 0; int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT -bool tsEnableQueryHb = false; +bool tsEnableQueryHb = true; bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true int32_t tsQuerySmaOptimize = 0; @@ -511,6 +513,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddString(pCfg, "telemetryServer", tsTelemServer, 0) != 0) return -1; if (cfgAddInt32(pCfg, "telemetryPort", tsTelemPort, 1, 65056, 0) != 0) return -1; + if (cfgAddInt32(pCfg, "tmqMaxTopicNum", tmqMaxTopicNum, 1, 10000, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1; if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1; @@ -882,6 +886,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN); tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32; + tmqMaxTopicNum= cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; + tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32; tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 4e8797b1ec..debb93e8ba 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -6982,8 +6982,11 @@ int32_t tDecodeSVAlterTbReqSetCtime(SDecoder* pDecoder, SVAlterTbReq* pReq, int6 if (tStartDecode(pDecoder) < 0) return -1; if (tDecodeSVAlterTbReqCommon(pDecoder, pReq) < 0) return -1; - *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; - if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + pReq->ctimeMs = 0; + if (!tDecodeIsEnd(pDecoder)) { + *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; + if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + } tEndDecode(pDecoder); return 0; @@ -7541,8 +7544,11 @@ int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) { int32_t tDecodeSBatchDeleteReqSetCtime(SDecoder *pDecoder, SBatchDeleteReq *pReq, int64_t ctimeMs) { if (tDecodeSBatchDeleteReqCommon(pDecoder, pReq)) return -1; - *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; - if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + pReq->ctimeMs = 0; + if (!tDecodeIsEnd(pDecoder)) { + *(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs; + if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1; + } return 0; } diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index d8c43747f7..7a5581efbe 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -969,7 +969,7 @@ void taosFormatUtcTime(char* buf, int32_t bufLen, int64_t t, int32_t precision) default: fractionLen = 0; - ASSERT(false); + return; } if (taosLocalTime(", &ptm, buf) == NULL) { diff --git a/source/dnode/mnode/impl/inc/mndConsumer.h b/source/dnode/mnode/impl/inc/mndConsumer.h index 96401511d2..a3a31cfc5a 100644 --- a/source/dnode/mnode/impl/inc/mndConsumer.h +++ b/source/dnode/mnode/impl/inc/mndConsumer.h @@ -25,14 +25,15 @@ extern "C" { enum { MQ_CONSUMER_STATUS_REBALANCE = 1, // MQ_CONSUMER_STATUS__MODIFY_IN_REB, // this value is not used anymore - MQ_CONSUMER_STATUS__READY, - MQ_CONSUMER_STATUS__LOST, + MQ_CONSUMER_STATUS_READY, + MQ_CONSUMER_STATUS_LOST, // MQ_CONSUMER_STATUS__LOST_IN_REB, // this value is not used anymore - MQ_CONSUMER_STATUS__LOST_REBD, -}; +// MQ_CONSUMER_STATUS__LOST_REBD, +};\ int32_t mndInitConsumer(SMnode *pMnode); void mndCleanupConsumer(SMnode *pMnode); +void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId); SMqConsumerObj *mndAcquireConsumer(SMnode *pMnode, int64_t consumerId); void mndReleaseConsumer(SMnode *pMnode, SMqConsumerObj *pConsumer); diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 03ad1ed581..1f4bc19e33 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -137,12 +137,12 @@ typedef enum { } EDndReason; typedef enum { - CONSUMER_UPDATE__TOUCH = 1, // rebalance req do not need change consume topic - CONSUMER_UPDATE__ADD, - CONSUMER_UPDATE__REMOVE, - CONSUMER_UPDATE__LOST, - CONSUMER_UPDATE__RECOVER, - CONSUMER_UPDATE__REBALANCE, // subscribe req need change consume topic + CONSUMER_UPDATE_REB_MODIFY_NOTOPIC = 1, // topic do not need modified after rebalance + CONSUMER_UPDATE_REB_MODIFY_TOPIC, // topic need modified after rebalance + CONSUMER_UPDATE_REB_MODIFY_REMOVE, // topic need removed after rebalance +// CONSUMER_UPDATE_TIMER_LOST, + CONSUMER_UPDATE_RECOVER, + CONSUMER_UPDATE_SUB_MODIFY, // modify after subscribe req } ECsmUpdateType; typedef struct { @@ -549,7 +549,7 @@ typedef struct { // data for display int32_t pid; SEpSet ep; - int64_t upTime; + int64_t createTime; int64_t subscribeTime; int64_t rebalanceTime; @@ -560,7 +560,7 @@ typedef struct { } SMqConsumerObj; SMqConsumerObj* tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_LEN]); -void tDeleteSMqConsumerObj(SMqConsumerObj* pConsumer); +void tDeleteSMqConsumerObj(SMqConsumerObj* pConsumer, bool delete); int32_t tEncodeSMqConsumerObj(void** buf, const SMqConsumerObj* pConsumer); void* tDecodeSMqConsumerObj(const void* buf, SMqConsumerObj* pConsumer, int8_t sver); diff --git a/source/dnode/mnode/impl/inc/mndSubscribe.h b/source/dnode/mnode/impl/inc/mndSubscribe.h index fad316ea12..ba4328b8fe 100644 --- a/source/dnode/mnode/impl/inc/mndSubscribe.h +++ b/source/dnode/mnode/impl/inc/mndSubscribe.h @@ -25,6 +25,7 @@ extern "C" { int32_t mndInitSubscribe(SMnode *pMnode); void mndCleanupSubscribe(SMnode *pMnode); +int32_t mndGetGroupNumByTopic(SMnode *pMnode, const char *topicName); SMqSubscribeObj *mndAcquireSubscribe(SMnode *pMnode, const char *CGroup, const char *topicName); SMqSubscribeObj *mndAcquireSubscribeByKey(SMnode *pMnode, const char *key); void mndReleaseSubscribe(SMnode *pMnode, SMqSubscribeObj *pSub); diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 4dded61ce3..47cc4a1ce7 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -26,6 +26,7 @@ #define MND_CONSUMER_VER_NUMBER 2 #define MND_CONSUMER_RESERVE_SIZE 64 +#define MND_MAX_GROUP_PER_TOPIC 100 #define MND_CONSUMER_LOST_HB_CNT 6 #define MND_CONSUMER_LOST_CLEAR_THRESHOLD 43200 @@ -63,7 +64,7 @@ int32_t mndInitConsumer(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_TMQ_HB, mndProcessMqHbReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_ASK_EP, mndProcessAskEpReq); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_TIMER, mndProcessMqTimerMsg); - mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_LOST, mndProcessConsumerLostMsg); +// mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_LOST, mndProcessConsumerLostMsg); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_RECOVER, mndProcessConsumerRecoverMsg); mndSetMsgHandle(pMnode, TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, mndProcessConsumerClearMsg); @@ -75,6 +76,22 @@ int32_t mndInitConsumer(SMnode *pMnode) { void mndCleanupConsumer(SMnode *pMnode) {} +void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId){ + SMqConsumerClearMsg *pClearMsg = rpcMallocCont(sizeof(SMqConsumerClearMsg)); + if (pClearMsg == NULL) { + mError("consumer:0x%"PRIx64" failed to clear consumer due to out of memory. alloc size:%d", consumerId, (int32_t)sizeof(SMqConsumerClearMsg)); + return; + } + + pClearMsg->consumerId = consumerId; + SRpcMsg rpcMsg = { + .msgType = TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, .pCont = pClearMsg, .contLen = sizeof(SMqConsumerClearMsg)}; + + mInfo("consumer:0x%" PRIx64 " drop from sdb", consumerId); + tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + return; +} + bool mndRebTryStart() { int32_t old = atomic_val_compare_exchange_32(&mqRebInExecCnt, 0, 1); mDebug("tq timer, rebalance counter old val:%d", old); @@ -105,50 +122,48 @@ void mndRebCntDec() { } } -static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - SMqConsumerLostMsg *pLostMsg = pMsg->pCont; - SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, pLostMsg->consumerId); - if (pConsumer == NULL) { - return 0; - } - - mInfo("process consumer lost msg, consumer:0x%" PRIx64 " status:%d(%s)", pLostMsg->consumerId, pConsumer->status, - mndConsumerStatusName(pConsumer->status)); - - if (pConsumer->status != MQ_CONSUMER_STATUS__READY) { - mndReleaseConsumer(pMnode, pConsumer); - return -1; - } - - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__LOST; - - mndReleaseConsumer(pMnode, pConsumer); - - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "lost-csm"); - if (pTrans == NULL) { - goto FAIL; - } - - if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - goto FAIL; - } - - if (mndTransPrepare(pMnode, pTrans) != 0) { - goto FAIL; - } - - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); - mndTransDrop(pTrans); - return 0; -FAIL: - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); - mndTransDrop(pTrans); - return -1; -} +//static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg) { +// SMnode *pMnode = pMsg->info.node; +// SMqConsumerLostMsg *pLostMsg = pMsg->pCont; +// SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, pLostMsg->consumerId); +// if (pConsumer == NULL) { +// return 0; +// } +// +// mInfo("process consumer lost msg, consumer:0x%" PRIx64 " status:%d(%s)", pLostMsg->consumerId, pConsumer->status, +// mndConsumerStatusName(pConsumer->status)); +// +// if (pConsumer->status != MQ_CONSUMER_STATUS_READY) { +// mndReleaseConsumer(pMnode, pConsumer); +// return -1; +// } +// +// SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); +// pConsumerNew->updateType = CONSUMER_UPDATE_TIMER_LOST; +// +// mndReleaseConsumer(pMnode, pConsumer); +// +// STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "lost-csm"); +// if (pTrans == NULL) { +// goto FAIL; +// } +// +// if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { +// goto FAIL; +// } +// +// if (mndTransPrepare(pMnode, pTrans) != 0) { +// goto FAIL; +// } +// +// tDeleteSMqConsumerObj(pConsumerNew, true); +// mndTransDrop(pTrans); +// return 0; +//FAIL: +// tDeleteSMqConsumerObj(pConsumerNew, true); +// mndTransDrop(pTrans); +// return -1; +//} static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) { SMnode *pMnode = pMsg->info.node; @@ -162,14 +177,14 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) { mInfo("receive consumer recover msg, consumer:0x%" PRIx64 " status:%d(%s)", pRecoverMsg->consumerId, pConsumer->status, mndConsumerStatusName(pConsumer->status)); - if (pConsumer->status != MQ_CONSUMER_STATUS__LOST_REBD) { + if (pConsumer->status != MQ_CONSUMER_STATUS_LOST) { mndReleaseConsumer(pMnode, pConsumer); terrno = TSDB_CODE_MND_CONSUMER_NOT_READY; return -1; } SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__RECOVER; + pConsumerNew->updateType = CONSUMER_UPDATE_RECOVER; mndReleaseConsumer(pMnode, pConsumer); @@ -181,13 +196,13 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) { if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL; if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL; - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return 0; FAIL: - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return -1; } @@ -206,13 +221,13 @@ static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg) { mInfo("consumer:0x%" PRIx64 " needs to be cleared, status %s", pClearMsg->consumerId, mndConsumerStatusName(pConsumer->status)); - if (pConsumer->status != MQ_CONSUMER_STATUS__LOST_REBD) { - mndReleaseConsumer(pMnode, pConsumer); - return -1; - } +// if (pConsumer->status != MQ_CONSUMER_STATUS_LOST) { +// mndReleaseConsumer(pMnode, pConsumer); +// return -1; +// } SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumer->consumerId, pConsumer->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__LOST; +// pConsumerNew->updateType = CONSUMER_UPDATE_TIMER_LOST; mndReleaseConsumer(pMnode, pConsumer); @@ -223,14 +238,14 @@ static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg) { if (mndSetConsumerDropLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL; if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL; - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return 0; FAIL: - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); + mndTransDrop(pTrans); return -1; } @@ -297,56 +312,29 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) { int32_t hbStatus = atomic_add_fetch_32(&pConsumer->hbStatus, 1); int32_t status = atomic_load_32(&pConsumer->status); - mDebug("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", uptime:%" PRId64 ", hbstatus:%d", - pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->upTime, + mDebug("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", createTime:%" PRId64 ", hbstatus:%d", + pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->createTime, hbStatus); - if (status == MQ_CONSUMER_STATUS__READY) { - if (hbStatus > MND_CONSUMER_LOST_HB_CNT) { - SMqConsumerLostMsg *pLostMsg = rpcMallocCont(sizeof(SMqConsumerLostMsg)); - if (pLostMsg == NULL) { - mError("consumer:0x%"PRIx64" failed to transfer consumer status to lost due to out of memory. alloc size:%d", - pConsumer->consumerId, (int32_t)sizeof(SMqConsumerLostMsg)); - continue; + if (status == MQ_CONSUMER_STATUS_READY) { + if (taosArrayGetSize(pConsumer->assignedTopics) == 0) { // unsubscribe or close + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); + } else if (hbStatus > MND_CONSUMER_LOST_HB_CNT) { + taosRLockLatch(&pConsumer->lock); + int32_t topicNum = taosArrayGetSize(pConsumer->currentTopics); + for (int32_t i = 0; i < topicNum; i++) { + char key[TSDB_SUBSCRIBE_KEY_LEN]; + char *removedTopic = taosArrayGetP(pConsumer->currentTopics, i); + mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); + SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); + taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); } - - pLostMsg->consumerId = pConsumer->consumerId; - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_TMQ_CONSUMER_LOST, .pCont = pLostMsg, .contLen = sizeof(SMqConsumerLostMsg)}; - - mDebug("consumer:0x%"PRIx64" hb not received beyond threshold %d, set to lost", pConsumer->consumerId, - MND_CONSUMER_LOST_HB_CNT); - tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + taosRUnLockLatch(&pConsumer->lock); } - } else if (status == MQ_CONSUMER_STATUS__LOST_REBD) { - // if the client is lost longer than one day, clear it. Otherwise, do nothing about the lost consumers. - if (hbStatus > MND_CONSUMER_LOST_CLEAR_THRESHOLD) { - SMqConsumerClearMsg *pClearMsg = rpcMallocCont(sizeof(SMqConsumerClearMsg)); - if (pClearMsg == NULL) { - mError("consumer:0x%"PRIx64" failed to clear consumer due to out of memory. alloc size:%d", - pConsumer->consumerId, (int32_t)sizeof(SMqConsumerClearMsg)); - continue; - } - - pClearMsg->consumerId = pConsumer->consumerId; - SRpcMsg rpcMsg = { - .msgType = TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, .pCont = pClearMsg, .contLen = sizeof(SMqConsumerClearMsg)}; - - mDebug("consumer:0x%" PRIx64 " lost beyond threshold %d, clear it", pConsumer->consumerId, - MND_CONSUMER_LOST_CLEAR_THRESHOLD); - tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + } else if (status == MQ_CONSUMER_STATUS_LOST) { + if (hbStatus > MND_CONSUMER_LOST_CLEAR_THRESHOLD) { // clear consumer if lost a day + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); } - } else if (status == MQ_CONSUMER_STATUS__LOST) { - taosRLockLatch(&pConsumer->lock); - int32_t topicNum = taosArrayGetSize(pConsumer->currentTopics); - for (int32_t i = 0; i < topicNum; i++) { - char key[TSDB_SUBSCRIBE_KEY_LEN]; - char *removedTopic = taosArrayGetP(pConsumer->currentTopics, i); - mndMakeSubscribeKey(key, pConsumer->cgroup, removedTopic); - SMqRebInfo *pRebSub = mndGetOrCreateRebSub(pRebMsg->rebSubHash, key); - taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId); - } - taosRUnLockLatch(&pConsumer->lock); } else { // MQ_CONSUMER_STATUS_REBALANCE taosRLockLatch(&pConsumer->lock); @@ -413,7 +401,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { int32_t status = atomic_load_32(&pConsumer->status); - if (status == MQ_CONSUMER_STATUS__LOST_REBD) { + if (status == MQ_CONSUMER_STATUS_LOST) { mInfo("try to recover consumer:0x%" PRIx64 "", consumerId); SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg)); @@ -475,7 +463,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { mError("consumer:0x%" PRIx64 " group:%s not consistent with data in sdb, saved cgroup:%s", consumerId, req.cgroup, pConsumer->cgroup); terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST; - return -1; + goto FAIL; } atomic_store_32(&pConsumer->hbStatus, 0); @@ -483,7 +471,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { // 1. check consumer status int32_t status = atomic_load_32(&pConsumer->status); - if (status == MQ_CONSUMER_STATUS__LOST_REBD) { + if (status == MQ_CONSUMER_STATUS_LOST) { mInfo("try to recover consumer:0x%" PRIx64, consumerId); SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg)); @@ -497,10 +485,10 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &pRpcMsg); } - if (status != MQ_CONSUMER_STATUS__READY) { + if (status != MQ_CONSUMER_STATUS_READY) { mInfo("consumer:0x%" PRIx64 " not ready, status: %s", consumerId, mndConsumerStatusName(status)); terrno = TSDB_CODE_MND_CONSUMER_NOT_READY; - return -1; + goto FAIL; } int32_t serverEpoch = atomic_load_32(&pConsumer->epoch); @@ -582,7 +570,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) { void *buf = rpcMallocCont(tlen); if (buf == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; + goto FAIL; } SMqRspHead* pHead = buf; @@ -669,6 +657,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { char *cgroup = subscribe.cgroup; SMqConsumerObj *pExistedConsumer = NULL; SMqConsumerObj *pConsumerNew = NULL; + STrans *pTrans = NULL; int32_t code = -1; SArray *pTopicList = subscribe.topicNames; @@ -676,9 +665,17 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { taosArrayRemoveDuplicate(pTopicList, taosArrayCompareString, freeItem); int32_t newTopicNum = taosArrayGetSize(pTopicList); + for(int i = 0; i < newTopicNum; i++){ + int32_t gNum = mndGetGroupNumByTopic(pMnode, (const char*)taosArrayGetP(pTopicList, i)); + if(gNum >= MND_MAX_GROUP_PER_TOPIC){ + terrno = TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE; + code = terrno; + goto _over; + } + } // check topic existence - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg, "subscribe"); + pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg, "subscribe"); if (pTrans == NULL) { goto _over; } @@ -701,8 +698,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { pConsumerNew->autoCommitInterval = subscribe.autoCommitInterval; pConsumerNew->resetOffsetCfg = subscribe.resetOffsetCfg; - // set the update type - pConsumerNew->updateType = CONSUMER_UPDATE__REBALANCE; +// pConsumerNew->updateType = CONSUMER_UPDATE_SUB_MODIFY; // use insert logic taosArrayDestroy(pConsumerNew->assignedTopics); pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup); @@ -721,7 +717,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { " cgroup:%s, current status:%d(%s), subscribe topic num: %d", consumerId, subscribe.cgroup, status, mndConsumerStatusName(status), newTopicNum); - if (status != MQ_CONSUMER_STATUS__READY) { + if (status != MQ_CONSUMER_STATUS_READY) { terrno = TSDB_CODE_MND_CONSUMER_NOT_READY; goto _over; } @@ -732,11 +728,11 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) { } // set the update type - pConsumerNew->updateType = CONSUMER_UPDATE__REBALANCE; + pConsumerNew->updateType = CONSUMER_UPDATE_SUB_MODIFY; taosArrayDestroy(pConsumerNew->assignedTopics); pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup); - int32_t oldTopicNum = (pExistedConsumer->currentTopics) ? taosArrayGetSize(pExistedConsumer->currentTopics) : 0; + int32_t oldTopicNum = taosArrayGetSize(pExistedConsumer->currentTopics); int32_t i = 0, j = 0; while (i < oldTopicNum || j < newTopicNum) { @@ -791,10 +787,7 @@ _over: mndReleaseConsumer(pMnode, pExistedConsumer); } - if (pConsumerNew) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); - } + tDeleteSMqConsumerObj(pConsumerNew, true); // TODO: replace with destroy subscribe msg taosArrayDestroyP(subscribe.topicNames, (FDelete)taosMemoryFree); @@ -894,17 +887,17 @@ CM_DECODE_OVER: } static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer) { - mDebug("consumer:0x%" PRIx64 " cgroup:%s status:%d(%s) epoch:%d load from sdb, perform insert action", + mInfo("consumer:0x%" PRIx64 " sub insert, cgroup:%s status:%d(%s) epoch:%d", pConsumer->consumerId, pConsumer->cgroup, pConsumer->status, mndConsumerStatusName(pConsumer->status), pConsumer->epoch); - pConsumer->subscribeTime = pConsumer->upTime; + pConsumer->subscribeTime = taosGetTimestampMs(); return 0; } static int32_t mndConsumerActionDelete(SSdb *pSdb, SMqConsumerObj *pConsumer) { - mDebug("consumer:0x%" PRIx64 " perform delete action, status:(%d)%s", pConsumer->consumerId, pConsumer->status, + mInfo("consumer:0x%" PRIx64 " perform delete action, status:(%d)%s", pConsumer->consumerId, pConsumer->status, mndConsumerStatusName(pConsumer->status)); - tDeleteSMqConsumerObj(pConsumer); + tDeleteSMqConsumerObj(pConsumer, false); return 0; } @@ -913,10 +906,9 @@ static void updateConsumerStatus(SMqConsumerObj *pConsumer) { if (taosArrayGetSize(pConsumer->rebNewTopics) == 0 && taosArrayGetSize(pConsumer->rebRemovedTopics) == 0) { if (status == MQ_CONSUMER_STATUS_REBALANCE) { - pConsumer->status = MQ_CONSUMER_STATUS__READY; - } else if (status == MQ_CONSUMER_STATUS__LOST) { - ASSERT(taosArrayGetSize(pConsumer->currentTopics) == 0); - pConsumer->status = MQ_CONSUMER_STATUS__LOST_REBD; + pConsumer->status = MQ_CONSUMER_STATUS_READY; + } else if (status == MQ_CONSUMER_STATUS_READY) { + pConsumer->status = MQ_CONSUMER_STATUS_LOST; } } } @@ -930,7 +922,7 @@ static void removeFromNewTopicList(SMqConsumerObj *pConsumer, const char *pTopic taosArrayRemove(pConsumer->rebNewTopics, i); taosMemoryFree(p); - mDebug("consumer:0x%" PRIx64 " remove new topic:%s in the topic list, remain newTopics:%d", pConsumer->consumerId, + mInfo("consumer:0x%" PRIx64 " remove new topic:%s in the topic list, remain newTopics:%d", pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->rebNewTopics)); break; } @@ -946,7 +938,7 @@ static void removeFromRemoveTopicList(SMqConsumerObj *pConsumer, const char *pTo taosArrayRemove(pConsumer->rebRemovedTopics, i); taosMemoryFree(p); - mDebug("consumer:0x%" PRIx64 " remove topic:%s in the removed topic list, remain removedTopics:%d", + mInfo("consumer:0x%" PRIx64 " remove topic:%s in the removed topic list, remain removedTopics:%d", pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->rebRemovedTopics)); break; } @@ -961,7 +953,7 @@ static void removeFromCurrentTopicList(SMqConsumerObj *pConsumer, const char *pT taosArrayRemove(pConsumer->currentTopics, i); taosMemoryFree(topic); - mDebug("consumer:0x%" PRIx64 " remove topic:%s in the current topic list, remain currentTopics:%d", + mInfo("consumer:0x%" PRIx64 " remove topic:%s in the current topic list, remain currentTopics:%d", pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->currentTopics)); break; } @@ -984,47 +976,46 @@ static bool existInCurrentTopicList(const SMqConsumerObj* pConsumer, const char* } static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, SMqConsumerObj *pNewConsumer) { - mDebug("consumer:0x%" PRIx64 " perform update action, update type:%d, subscribe-time:%" PRId64 ", uptime:%" PRId64, - pOldConsumer->consumerId, pNewConsumer->updateType, pOldConsumer->subscribeTime, pOldConsumer->upTime); + mInfo("consumer:0x%" PRIx64 " perform update action, update type:%d, subscribe-time:%" PRId64 ", createTime:%" PRId64, + pOldConsumer->consumerId, pNewConsumer->updateType, pOldConsumer->subscribeTime, pOldConsumer->createTime); taosWLockLatch(&pOldConsumer->lock); - if (pNewConsumer->updateType == CONSUMER_UPDATE__REBALANCE) { + if (pNewConsumer->updateType == CONSUMER_UPDATE_SUB_MODIFY) { TSWAP(pOldConsumer->rebNewTopics, pNewConsumer->rebNewTopics); TSWAP(pOldConsumer->rebRemovedTopics, pNewConsumer->rebRemovedTopics); TSWAP(pOldConsumer->assignedTopics, pNewConsumer->assignedTopics); - pOldConsumer->subscribeTime = pNewConsumer->upTime; + pOldConsumer->subscribeTime = taosGetTimestampMs(); pOldConsumer->status = MQ_CONSUMER_STATUS_REBALANCE; - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__LOST) { - int32_t sz = taosArrayGetSize(pOldConsumer->currentTopics); - for (int32_t i = 0; i < sz; i++) { - char *topic = taosStrdup(taosArrayGetP(pOldConsumer->currentTopics, i)); - taosArrayPush(pOldConsumer->rebRemovedTopics, &topic); - } - - pOldConsumer->rebalanceTime = pNewConsumer->upTime; - - int32_t prevStatus = pOldConsumer->status; - pOldConsumer->status = MQ_CONSUMER_STATUS__LOST; - mDebug("consumer:0x%" PRIx64 " state %s -> %s, reb-time:%" PRId64 ", reb-removed-topics:%d", - pOldConsumer->consumerId, mndConsumerStatusName(prevStatus), mndConsumerStatusName(pOldConsumer->status), - pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics)); - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__RECOVER) { + mInfo("consumer:0x%" PRIx64 " sub update, modify existed consumer",pOldConsumer->consumerId); +// } else if (pNewConsumer->updateType == CONSUMER_UPDATE_TIMER_LOST) { +// int32_t sz = taosArrayGetSize(pOldConsumer->currentTopics); +// for (int32_t i = 0; i < sz; i++) { +// char *topic = taosStrdup(taosArrayGetP(pOldConsumer->currentTopics, i)); +// taosArrayPush(pOldConsumer->rebRemovedTopics, &topic); +// } +// +// int32_t prevStatus = pOldConsumer->status; +// pOldConsumer->status = MQ_CONSUMER_STATUS_LOST; +// mInfo("consumer:0x%" PRIx64 " timer update, timer lost. state %s -> %s, reb-time:%" PRId64 ", reb-removed-topics:%d", +// pOldConsumer->consumerId, mndConsumerStatusName(prevStatus), mndConsumerStatusName(pOldConsumer->status), +// pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics)); + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_RECOVER) { int32_t sz = taosArrayGetSize(pOldConsumer->assignedTopics); for (int32_t i = 0; i < sz; i++) { char *topic = taosStrdup(taosArrayGetP(pOldConsumer->assignedTopics, i)); taosArrayPush(pOldConsumer->rebNewTopics, &topic); } - pOldConsumer->rebalanceTime = pNewConsumer->upTime; pOldConsumer->status = MQ_CONSUMER_STATUS_REBALANCE; - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__TOUCH) { + mInfo("consumer:0x%" PRIx64 " timer update, timer recover",pOldConsumer->consumerId); + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_REB_MODIFY_NOTOPIC) { atomic_add_fetch_32(&pOldConsumer->epoch, 1); - pOldConsumer->rebalanceTime = pNewConsumer->upTime; - - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__ADD) { + pOldConsumer->rebalanceTime = taosGetTimestampMs(); + mInfo("consumer:0x%" PRIx64 " reb update, only rebalance time", pOldConsumer->consumerId); + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_REB_MODIFY_TOPIC) { char *pNewTopic = taosStrdup(taosArrayGetP(pNewConsumer->rebNewTopics, 0)); // check if exist in current topic @@ -1033,6 +1024,7 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, // add to current topic bool existing = existInCurrentTopicList(pOldConsumer, pNewTopic); if (existing) { + mError("consumer:0x%" PRIx64 "new topic:%s should not in currentTopics", pOldConsumer->consumerId, pNewTopic); taosMemoryFree(pNewTopic); } else { // added into current topic list taosArrayPush(pOldConsumer->currentTopics, &pNewTopic); @@ -1044,17 +1036,17 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, updateConsumerStatus(pOldConsumer); // the re-balance is triggered when the new consumer is launched. - pOldConsumer->rebalanceTime = pNewConsumer->upTime; + pOldConsumer->rebalanceTime = taosGetTimestampMs(); atomic_add_fetch_32(&pOldConsumer->epoch, 1); - mDebug("consumer:0x%" PRIx64 " state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 + mInfo("consumer:0x%" PRIx64 " reb update add, state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 ", current topics:%d, newTopics:%d, removeTopics:%d", pOldConsumer->consumerId, status, mndConsumerStatusName(status), pOldConsumer->status, mndConsumerStatusName(pOldConsumer->status), pOldConsumer->epoch, pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->currentTopics), (int)taosArrayGetSize(pOldConsumer->rebNewTopics), (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics)); - } else if (pNewConsumer->updateType == CONSUMER_UPDATE__REMOVE) { + } else if (pNewConsumer->updateType == CONSUMER_UPDATE_REB_MODIFY_REMOVE) { char *removedTopic = taosArrayGetP(pNewConsumer->rebRemovedTopics, 0); // remove from removed topic @@ -1067,10 +1059,10 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, int32_t status = pOldConsumer->status; updateConsumerStatus(pOldConsumer); - pOldConsumer->rebalanceTime = pNewConsumer->upTime; + pOldConsumer->rebalanceTime = taosGetTimestampMs(); atomic_add_fetch_32(&pOldConsumer->epoch, 1); - mDebug("consumer:0x%" PRIx64 " state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 + mInfo("consumer:0x%" PRIx64 " reb update remove, state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64 ", current topics:%d, newTopics:%d, removeTopics:%d", pOldConsumer->consumerId, status, mndConsumerStatusName(status), pOldConsumer->status, mndConsumerStatusName(pOldConsumer->status), pOldConsumer->epoch, pOldConsumer->rebalanceTime, @@ -1133,8 +1125,12 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * int32_t cols = 0; // consumer id + char consumerIdHex[32] = {0}; + sprintf(varDataVal(consumerIdHex), "0x%"PRIx64, pConsumer->consumerId); + varDataSetLen(consumerIdHex, strlen(varDataVal(consumerIdHex))); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->consumerId, false); + colDataSetVal(pColInfo, numOfRows, (const char *)consumerIdHex, false); // consumer group char cgroup[TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE] = {0}; @@ -1175,7 +1171,7 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * // up time pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->upTime, false); + colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->createTime, false); // subscribe time pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -1190,7 +1186,7 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * tFormatOffset(buf, TSDB_OFFSET_LEN, &pVal); char parasStr[64 + TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE] = {0}; - sprintf(varDataVal(parasStr), "tbname:%d,commit:%d,interval:%d,reset:%s", pConsumer->withTbName, pConsumer->autoCommit, pConsumer->autoCommitInterval, buf); + sprintf(varDataVal(parasStr), "tbname:%d,commit:%d,interval:%dms,reset:%s", pConsumer->withTbName, pConsumer->autoCommit, pConsumer->autoCommitInterval, buf); varDataSetLen(parasStr, strlen(varDataVal(parasStr))); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); @@ -1216,10 +1212,9 @@ static void mndCancelGetNextConsumer(SMnode *pMnode, void *pIter) { static const char *mndConsumerStatusName(int status) { switch (status) { - case MQ_CONSUMER_STATUS__READY: + case MQ_CONSUMER_STATUS_READY: return "ready"; - case MQ_CONSUMER_STATUS__LOST: - case MQ_CONSUMER_STATUS__LOST_REBD: + case MQ_CONSUMER_STATUS_LOST: return "lost"; case MQ_CONSUMER_STATUS_REBALANCE: return "rebalancing"; diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index 5e5d52b310..a8a719edda 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -223,7 +223,7 @@ void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp, int8_t sver) { return (void *)buf; } -SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_LEN]) { +SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char* cgroup) { SMqConsumerObj *pConsumer = taosMemoryCalloc(1, sizeof(SMqConsumerObj)); if (pConsumer == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -254,16 +254,20 @@ SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_L return NULL; } - pConsumer->upTime = taosGetTimestampMs(); + pConsumer->createTime = taosGetTimestampMs(); return pConsumer; } -void tDeleteSMqConsumerObj(SMqConsumerObj *pConsumer) { +void tDeleteSMqConsumerObj(SMqConsumerObj *pConsumer, bool delete) { + if(pConsumer == NULL) return; taosArrayDestroyP(pConsumer->currentTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->rebNewTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->rebRemovedTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->assignedTopics, (FDelete)taosMemoryFree); + if(delete){ + taosMemoryFree(pConsumer); + } } int32_t tEncodeSMqConsumerObj(void **buf, const SMqConsumerObj *pConsumer) { @@ -278,7 +282,7 @@ int32_t tEncodeSMqConsumerObj(void **buf, const SMqConsumerObj *pConsumer) { tlen += taosEncodeFixedI32(buf, pConsumer->pid); tlen += taosEncodeSEpSet(buf, &pConsumer->ep); - tlen += taosEncodeFixedI64(buf, pConsumer->upTime); + tlen += taosEncodeFixedI64(buf, pConsumer->createTime); tlen += taosEncodeFixedI64(buf, pConsumer->subscribeTime); tlen += taosEncodeFixedI64(buf, pConsumer->rebalanceTime); @@ -348,7 +352,7 @@ void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer, int8_t s buf = taosDecodeFixedI32(buf, &pConsumer->pid); buf = taosDecodeSEpSet(buf, &pConsumer->ep); - buf = taosDecodeFixedI64(buf, &pConsumer->upTime); + buf = taosDecodeFixedI64(buf, &pConsumer->createTime); buf = taosDecodeFixedI64(buf, &pConsumer->subscribeTime); buf = taosDecodeFixedI64(buf, &pConsumer->rebalanceTime); diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 5482f36940..3c2335a6ee 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -233,7 +233,6 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { } code = -1; - taosIp2String(pReq->info.conn.clientIp, ip); if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CONNECT) != 0) { mGError("user:%s, failed to login from %s since %s", pReq->info.conn.user, ip, terrstr()); @@ -271,6 +270,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { } } +_CONNECT: pConn = mndCreateConn(pMnode, pReq->info.conn.user, connReq.connType, pReq->info.conn.clientIp, pReq->info.conn.clientPort, connReq.pid, connReq.app, connReq.startTime); if (pConn == NULL) { @@ -842,7 +842,7 @@ static int32_t packQueriesIntoBlock(SShowObj* pShow, SConnObj* pConn, SSDataBloc } varDataLen(subStatus) = strlen(&subStatus[VARSTR_HEADER_SIZE]); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, curRowIndex, subStatus, false); + colDataSetVal(pColInfo, curRowIndex, subStatus, (varDataLen(subStatus) == 0) ? true : false); char sql[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0}; STR_TO_VARSTR(sql, pQuery->sql); diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 61691a30d5..7ecd994b5a 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -160,10 +160,10 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, SMqSubscribeObj static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub, const SMqRebOutputVg *pRebVg, SSubplan* pPlan) { -// if (pRebVg->oldConsumerId == pRebVg->newConsumerId) { -// terrno = TSDB_CODE_MND_INVALID_SUB_OPTION; -// return -1; -// } + if (pRebVg->oldConsumerId == pRebVg->newConsumerId) { + terrno = TSDB_CODE_MND_INVALID_SUB_OPTION; + return -1; + } void *buf; int32_t tlen; @@ -175,7 +175,7 @@ static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, SMqSubsc SVgObj *pVgObj = mndAcquireVgroup(pMnode, vgId); if (pVgObj == NULL) { taosMemoryFree(buf); - terrno = TSDB_CODE_OUT_OF_MEMORY; + terrno = TSDB_CODE_MND_VGROUP_NOT_EXIST; return -1; } @@ -296,17 +296,17 @@ static void addUnassignedVgroups(SMqRebOutputObj *pOutput, SHashObj *pHash) { } } -static void putNoTransferToOutput(SMqRebOutputObj *pOutput, SMqConsumerEp *pConsumerEp){ - for(int i = 0; i < taosArrayGetSize(pConsumerEp->vgs); i++){ - SMqVgEp *pVgEp = (SMqVgEp *)taosArrayGetP(pConsumerEp->vgs, i); - SMqRebOutputVg outputVg = { - .oldConsumerId = pConsumerEp->consumerId, - .newConsumerId = pConsumerEp->consumerId, - .pVgEp = pVgEp, - }; - taosArrayPush(pOutput->rebVgs, &outputVg); - } -} +//static void putNoTransferToOutput(SMqRebOutputObj *pOutput, SMqConsumerEp *pConsumerEp){ +// for(int i = 0; i < taosArrayGetSize(pConsumerEp->vgs); i++){ +// SMqVgEp *pVgEp = (SMqVgEp *)taosArrayGetP(pConsumerEp->vgs, i); +// SMqRebOutputVg outputVg = { +// .oldConsumerId = pConsumerEp->consumerId, +// .newConsumerId = pConsumerEp->consumerId, +// .pVgEp = pVgEp, +// }; +// taosArrayPush(pOutput->rebVgs, &outputVg); +// } +//} static void transferVgroupsForConsumers(SMqRebOutputObj *pOutput, SHashObj *pHash, int32_t minVgCnt, int32_t imbConsumerNum) { @@ -357,7 +357,7 @@ static void transferVgroupsForConsumers(SMqRebOutputObj *pOutput, SHashObj *pHas } } } - putNoTransferToOutput(pOutput, pConsumerEp); +// putNoTransferToOutput(pOutput, pConsumerEp); } } @@ -468,40 +468,51 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR } } - if(taosHashGetSize(pOutput->pSub->consumerHash) == 0) { // if all consumer is removed +// if(taosHashGetSize(pOutput->pSub->consumerHash) == 0) { // if all consumer is removed SMqSubscribeObj *pSub = mndAcquireSubscribeByKey(pMnode, pInput->pRebInfo->key); // put all offset rows if (pSub) { taosRLockLatch(&pSub->lock); - bool init = false; if (pOutput->pSub->offsetRows == NULL) { pOutput->pSub->offsetRows = taosArrayInit(4, sizeof(OffsetRows)); - init = true; } pIter = NULL; while (1) { pIter = taosHashIterate(pSub->consumerHash, pIter); if (pIter == NULL) break; SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter; - if (init) { - taosArrayAddAll(pOutput->pSub->offsetRows, pConsumerEp->offsetRows); -// mDebug("pSub->offsetRows is init"); - } else { - for (int j = 0; j < taosArrayGetSize(pConsumerEp->offsetRows); j++) { - OffsetRows *d1 = taosArrayGet(pConsumerEp->offsetRows, j); - for (int i = 0; i < taosArrayGetSize(pOutput->pSub->offsetRows); i++) { - OffsetRows *d2 = taosArrayGet(pOutput->pSub->offsetRows, i); - if (d1->vgId == d2->vgId) { - d2->rows += d1->rows; - d2->offset = d1->offset; -// mDebug("pSub->offsetRows add vgId:%d, after:%"PRId64", before:%"PRId64, d2->vgId, d2->rows, d1->rows); - } + SMqConsumerEp *pConsumerEpNew = taosHashGet(pOutput->pSub->consumerHash, &pConsumerEp->consumerId, sizeof(int64_t)); + + for (int j = 0; j < taosArrayGetSize(pConsumerEp->offsetRows); j++) { + OffsetRows *d1 = taosArrayGet(pConsumerEp->offsetRows, j); + bool jump = false; + for (int i = 0; pConsumerEpNew && i < taosArrayGetSize(pConsumerEpNew->vgs); i++){ + SMqVgEp *pVgEp = taosArrayGetP(pConsumerEpNew->vgs, i); + if(pVgEp->vgId == d1->vgId){ + jump = true; + mInfo("pSub->offsetRows jump, because consumer id:%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId); + break; } } + if(jump) continue; + bool find = false; + for (int i = 0; i < taosArrayGetSize(pOutput->pSub->offsetRows); i++) { + OffsetRows *d2 = taosArrayGet(pOutput->pSub->offsetRows, i); + if (d1->vgId == d2->vgId) { + d2->rows += d1->rows; + d2->offset = d1->offset; + find = true; + mInfo("pSub->offsetRows add vgId:%d, after:%"PRId64", before:%"PRId64, d2->vgId, d2->rows, d1->rows); + break; + } + } + if(!find){ + taosArrayPush(pOutput->pSub->offsetRows, d1); + } } } taosRUnLockLatch(&pSub->lock); mndReleaseSubscribe(pMnode, pSub); - } +// } } // 8. generate logs @@ -576,50 +587,44 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu return -1; } + char topic[TSDB_TOPIC_FNAME_LEN] = {0}; + char cgroup[TSDB_CGROUP_LEN] = {0}; + mndSplitSubscribeKey(pOutput->pSub->key, topic, cgroup, true); + // 3. commit log: consumer to update status and epoch // 3.1 set touched consumer int32_t consumerNum = taosArrayGetSize(pOutput->modifyConsumers); for (int32_t i = 0; i < consumerNum; i++) { int64_t consumerId = *(int64_t *)taosArrayGet(pOutput->modifyConsumers, i); - SMqConsumerObj *pConsumerOld = mndAcquireConsumer(pMnode, consumerId); - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumerOld->consumerId, pConsumerOld->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__TOUCH; - mndReleaseConsumer(pMnode, pConsumerOld); + SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup); + pConsumerNew->updateType = CONSUMER_UPDATE_REB_MODIFY_NOTOPIC; if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); mndTransDrop(pTrans); return -1; } - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); } // 3.2 set new consumer consumerNum = taosArrayGetSize(pOutput->newConsumers); for (int32_t i = 0; i < consumerNum; i++) { int64_t consumerId = *(int64_t *)taosArrayGet(pOutput->newConsumers, i); + SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup); + pConsumerNew->updateType = CONSUMER_UPDATE_REB_MODIFY_TOPIC; - SMqConsumerObj *pConsumerOld = mndAcquireConsumer(pMnode, consumerId); - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumerOld->consumerId, pConsumerOld->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__ADD; - char *topic = taosMemoryCalloc(1, TSDB_TOPIC_FNAME_LEN); - char cgroup[TSDB_CGROUP_LEN]; - mndSplitSubscribeKey(pOutput->pSub->key, topic, cgroup, true); - taosArrayPush(pConsumerNew->rebNewTopics, &topic); - mndReleaseConsumer(pMnode, pConsumerOld); + char* topicTmp = taosStrdup(topic); + taosArrayPush(pConsumerNew->rebNewTopics, &topicTmp); if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); mndTransDrop(pTrans); return -1; } - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); } // 3.3 set removed consumer @@ -627,24 +632,19 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu for (int32_t i = 0; i < consumerNum; i++) { int64_t consumerId = *(int64_t *)taosArrayGet(pOutput->removedConsumers, i); - SMqConsumerObj *pConsumerOld = mndAcquireConsumer(pMnode, consumerId); - SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(pConsumerOld->consumerId, pConsumerOld->cgroup); - pConsumerNew->updateType = CONSUMER_UPDATE__REMOVE; - char *topic = taosMemoryCalloc(1, TSDB_TOPIC_FNAME_LEN); - char cgroup[TSDB_CGROUP_LEN]; - mndSplitSubscribeKey(pOutput->pSub->key, topic, cgroup, true); - taosArrayPush(pConsumerNew->rebRemovedTopics, &topic); - mndReleaseConsumer(pMnode, pConsumerOld); + SMqConsumerObj *pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup); + pConsumerNew->updateType = CONSUMER_UPDATE_REB_MODIFY_REMOVE; + + char* topicTmp = taosStrdup(topic); + taosArrayPush(pConsumerNew->rebRemovedTopics, &topicTmp); if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) { - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); mndTransDrop(pTrans); return -1; } - tDeleteSMqConsumerObj(pConsumerNew); - taosMemoryFree(pConsumerNew); + tDeleteSMqConsumerObj(pConsumerNew, true); } // 4. TODO commit log: modification log @@ -771,8 +771,10 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { } static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) { - SMnode *pMnode = pMsg->info.node; - SMDropCgroupReq dropReq = {0}; + SMnode *pMnode = pMsg->info.node; + SMDropCgroupReq dropReq = {0}; + STrans *pTrans = NULL; + int32_t code = TSDB_CODE_ACTION_IN_PROGRESS; if (tDeserializeSMDropCgroupReq(pMsg->pCont, pMsg->contLen, &dropReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; @@ -791,38 +793,54 @@ static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) { } } + taosWLockLatch(&pSub->lock); if (taosHashGetSize(pSub->consumerHash) != 0) { terrno = TSDB_CODE_MND_CGROUP_USED; mError("cgroup:%s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); - mndReleaseSubscribe(pMnode, pSub); - return -1; + code = -1; + goto end; } - STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "drop-cgroup"); + void *pIter = NULL; + SMqConsumerObj *pConsumer; + while (1) { + pIter = sdbFetch(pMnode->pSdb, SDB_CONSUMER, pIter, (void **)&pConsumer); + if (pIter == NULL) { + break; + } + + if (strcmp(dropReq.cgroup, pConsumer->cgroup) == 0) { + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); + } + sdbRelease(pMnode->pSdb, pConsumer); + } + + pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "drop-cgroup"); if (pTrans == NULL) { mError("cgroup: %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); - mndReleaseSubscribe(pMnode, pSub); - mndTransDrop(pTrans); - return -1; + code = -1; + goto end; } mInfo("trans:%d, used to drop cgroup:%s on topic %s", pTrans->id, dropReq.cgroup, dropReq.topic); if (mndSetDropSubCommitLogs(pMnode, pTrans, pSub) < 0) { mError("cgroup %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr()); - mndReleaseSubscribe(pMnode, pSub); - mndTransDrop(pTrans); - return -1; + code = -1; + goto end; } if (mndTransPrepare(pMnode, pTrans) < 0) { - mndReleaseSubscribe(pMnode, pSub); - mndTransDrop(pTrans); - return -1; + code = -1; + goto end; } - mndReleaseSubscribe(pMnode, pSub); - return TSDB_CODE_ACTION_IN_PROGRESS; +end: + taosWUnLockLatch(&pSub->lock); + mndReleaseSubscribe(pMnode, pSub); + mndTransDrop(pTrans); + + return code; } void mndCleanupSubscribe(SMnode *pMnode) {} @@ -989,6 +1007,32 @@ SMqSubscribeObj *mndAcquireSubscribeByKey(SMnode *pMnode, const char *key) { return pSub; } +int32_t mndGetGroupNumByTopic(SMnode *pMnode, const char *topicName) { + int32_t num = 0; + SSdb *pSdb = pMnode->pSdb; + + void *pIter = NULL; + SMqSubscribeObj *pSub = NULL; + while (1) { + pIter = sdbFetch(pSdb, SDB_SUBSCRIBE, pIter, (void **)&pSub); + if (pIter == NULL) break; + + + char topic[TSDB_TOPIC_FNAME_LEN]; + char cgroup[TSDB_CGROUP_LEN]; + mndSplitSubscribeKey(pSub->key, topic, cgroup, true); + if (strcmp(topic, topicName) != 0) { + sdbRelease(pSdb, pSub); + continue; + } + + num++; + sdbRelease(pSdb, pSub); + } + + return num; +} + void mndReleaseSubscribe(SMnode *pMnode, SMqSubscribeObj *pSub) { SSdb *pSdb = pMnode->pSdb; sdbRelease(pSdb, pSub); @@ -1114,9 +1158,13 @@ static int32_t buildResult(SSDataBlock *pBlock, int32_t* numOfRows, int64_t cons colDataSetVal(pColInfo, *numOfRows, (const char *)&pVgEp->vgId, false); // consumer id + char consumerIdHex[32] = {0}; + sprintf(varDataVal(consumerIdHex), "0x%"PRIx64, consumerId); + varDataSetLen(consumerIdHex, strlen(varDataVal(consumerIdHex))); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, *numOfRows, (const char *)&consumerId, consumerId == -1); - + colDataSetVal(pColInfo, *numOfRows, (const char *)consumerIdHex, consumerId == -1); + mDebug("mnd show subscriptions: topic %s, consumer:0x%" PRIx64 " cgroup %s vgid %d", varDataVal(topic), consumerId, varDataVal(cgroup), pVgEp->vgId); diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 4bbe531bf8..485823edf3 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -569,6 +569,11 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) { SMqTopicObj *pTopic = NULL; SDbObj *pDb = NULL; SCMCreateTopicReq createTopicReq = {0}; + if (sdbGetSize(pMnode->pSdb, SDB_TOPIC) >= tmqMaxTopicNum){ + terrno = TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE; + mError("topic num out of range"); + return code; + } if (tDeserializeSCMCreateTopicReq(pReq->pCont, pReq->contLen, &createTopicReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; @@ -681,7 +686,11 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { break; } - if (pConsumer->status == MQ_CONSUMER_STATUS__LOST_REBD) continue; + if (pConsumer->status == MQ_CONSUMER_STATUS_LOST){ + mndDropConsumerFromSdb(pMnode, pConsumer->consumerId); + mndReleaseConsumer(pMnode, pConsumer); + continue; + } int32_t sz = taosArrayGetSize(pConsumer->assignedTopics); for (int32_t i = 0; i < sz; i++) { diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 54e89ee269..cb4b3231f6 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -1980,6 +1980,11 @@ static int metaUpdateTtl(SMeta *pMeta, const SMetaEntry *pME) { int metaUpdateChangeTime(SMeta *pMeta, tb_uid_t uid, int64_t changeTimeMs) { if (!tsTtlChangeOnWrite) return 0; + if (changeTimeMs <= 0) { + metaWarn("Skip to change ttl deletetion time on write, uid: %" PRId64, uid); + return TSDB_CODE_VERSION_NOT_COMPATIBLE; + } + STtlUpdCtimeCtx ctx = {.uid = uid, .changeTimeMs = changeTimeMs}; return ttlMgrUpdateChangeTime(pMeta->pTtlMgr, &ctx); diff --git a/source/dnode/vnode/src/meta/metaTtl.c b/source/dnode/vnode/src/meta/metaTtl.c index c283472c24..af4827a9c7 100644 --- a/source/dnode/vnode/src/meta/metaTtl.c +++ b/source/dnode/vnode/src/meta/metaTtl.c @@ -358,7 +358,8 @@ int ttlMgrFlush(STtlManger *pTtlMgr, TXN *pTxn) { STtlCacheEntry *cacheEntry = taosHashGet(pTtlMgr->pTtlCache, pUid, sizeof(*pUid)); if (cacheEntry == NULL) { - metaError("ttlMgr flush failed to get ttl cache since %s", tstrerror(terrno)); + metaError("ttlMgr flush failed to get ttl cache since %s, uid: %" PRId64 ", type: %d", tstrerror(terrno), *pUid, + pEntry->type); goto _out; } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index e562264711..ef4ab9242f 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -388,7 +388,7 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) { int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData); while (pReader->nextBlk < numOfBlocks) { - tqDebug("tq reader next data block %d/%d, len:%d %" PRId64 " %d", pReader->nextBlk, + tqTrace("tq reader next data block %d/%d, len:%d %" PRId64 " %d", pReader->nextBlk, numOfBlocks, pReader->msg.msgLen, pReader->msg.ver, pReader->nextBlk); SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk); @@ -403,7 +403,7 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) { void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t)); if (ret != NULL) { - tqDebug("tq reader return submit block, uid:%" PRId64 ", ver:%" PRId64, pSubmitTbData->uid, pReader->msg.ver); + tqTrace("tq reader return submit block, uid:%" PRId64 ", ver:%" PRId64, pSubmitTbData->uid, pReader->msg.ver); SSDataBlock* pRes = NULL; int32_t code = tqRetrieveDataBlock(pReader, &pRes, NULL); @@ -412,11 +412,11 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) { } } else { pReader->nextBlk += 1; - tqDebug("tq reader discard submit block, uid:%" PRId64 ", continue", pSubmitTbData->uid); + tqTrace("tq reader discard submit block, uid:%" PRId64 ", continue", pSubmitTbData->uid); } } - qDebug("stream scan return empty, all %d submit blocks consumed, %s", numOfBlocks, id); + qTrace("stream scan return empty, all %d submit blocks consumed, %s", numOfBlocks, id); tDestroySubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE); pReader->msg.msgStr = NULL; @@ -604,7 +604,7 @@ static int32_t doSetVal(SColumnInfoData* pColumnInfoData, int32_t rowIndex, SCol } int32_t tqRetrieveDataBlock(STqReader* pReader, SSDataBlock** pRes, const char* id) { - tqDebug("tq reader retrieve data block %p, index:%d", pReader->msg.msgStr, pReader->nextBlk); + tqTrace("tq reader retrieve data block %p, index:%d", pReader->msg.msgStr, pReader->nextBlk); SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk++); SSDataBlock* pBlock = pReader->pResBlock; diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index 964d8b105b..b22650d249 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -335,6 +335,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d tagArray = taosArrayInit(1, sizeof(STagVal)); if (!tagArray) { tdDestroySVCreateTbReq(pCreateTbReq); + taosMemoryFreeClear(pCreateTbReq); goto _end; } STagVal tagVal = { @@ -350,6 +351,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d tagArray = taosArrayDestroy(tagArray); if (pTag == NULL) { tdDestroySVCreateTbReq(pCreateTbReq); + taosMemoryFreeClear(pCreateTbReq); terrno = TSDB_CODE_OUT_OF_MEMORY; goto _end; } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index c27aab5b63..40bca57827 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -234,8 +234,10 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int } } - *(int64_t *)(pCoder->data + pCoder->pos) = ctimeMs; - pCoder->pos += sizeof(int64_t); + if (!tDecodeIsEnd(pCoder)) { + *(int64_t *)(pCoder->data + pCoder->pos) = ctimeMs; + pCoder->pos += sizeof(int64_t); + } tEndDecode(pCoder); diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 5f86f195b6..d67088ebe1 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1134,6 +1134,16 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT SOperatorInfo* pOperator = pTaskInfo->pRoot; const char* id = GET_TASKID(pTaskInfo); + if(subType == TOPIC_SUB_TYPE__COLUMN && pOffset->type == TMQ_OFFSET__LOG){ + pOperator = extractOperatorInTree(pOperator, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN, id); + if (pOperator == NULL) { + return -1; + } + SStreamScanInfo* pInfo = pOperator->info; + SStoreTqReader* pReaderAPI = &pTaskInfo->storageAPI.tqReaderFn; + SWalReader* pWalReader = pReaderAPI->tqReaderGetWalReader(pInfo->tqReader); + walReaderVerifyOffset(pWalReader, pOffset); + } // if pOffset equal to current offset, means continue consume if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.currentOffset)) { return 0; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index d56fa9de78..bfe4ed0533 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2456,6 +2456,10 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys if (pHandle->vnode) { SOperatorInfo* pTableScanOp = createTableScanOperatorInfo(pTableScanNode, pHandle, pTableListInfo, pTaskInfo); + if (pTableScanOp == NULL) { + qError("createTableScanOperatorInfo error, errorcode: %d", pTaskInfo->code); + goto _error; + } STableScanInfo* pTSInfo = (STableScanInfo*)pTableScanOp->info; if (pHandle->version > 0) { pTSInfo->base.cond.endVersion = pHandle->version; diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index d447fe261d..21b36d69ec 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2415,6 +2415,10 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) { } static int32_t firstLastTransferInfoImpl(SFirstLastRes* pInput, SFirstLastRes* pOutput, bool isFirst) { + if (!pInput->hasResult) { + return TSDB_CODE_FAILED; + } + if (pOutput->hasResult) { if (isFirst) { if (pInput->ts > pOutput->ts) { diff --git a/source/libs/function/src/thistogram.c b/source/libs/function/src/thistogram.c index e7d631f638..b56691f35d 100644 --- a/source/libs/function/src/thistogram.c +++ b/source/libs/function/src/thistogram.c @@ -474,8 +474,8 @@ double* tHistogramUniform(SHistogramInfo* pHisto, double* ratio, int32_t num) { } ASSERTS(total <= numOfElem && total + pHisto->elems[j + 1].num > numOfElem, - "tHistogramUniform Error, total:%d, numOfElem:%d, elems[%d].num:%d", - total, numOfElem, j + 1, pHisto->elems[j + 1].num); + "tHistogramUniform Error, total:%ld, numOfElem:%ld, elems[%d].num:%ld", + total, (int64_t)numOfElem, j + 1, pHisto->elems[j + 1].num); double delta = numOfElem - total; if (fabs(delta) < FLT_EPSILON) { diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 3ec802a7ce..8101b342a4 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -39,6 +39,7 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx) if (p != NULL) { pIdList = *(SArray **)p; } else { + taosMemoryFree(buffer); return NULL; } @@ -48,6 +49,7 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx) SFilePage *pg = getBufPage(pMemBucket->pBuffer, *pageId); if (pg == NULL) { + taosMemoryFree(buffer); return NULL; } diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 31a7dfdbc5..5b9f44c812 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -255,6 +255,18 @@ int32_t udfStopUdfd() { return 0; } +int32_t udfGetUdfdPid(int32_t* pUdfdPid) { + SUdfdData *pData = &udfdGlobal; + if (pData->spawnErr) { + return pData->spawnErr; + } + uv_pid_t pid = uv_process_get_pid(&pData->process); + if (pUdfdPid) { + *pUdfdPid = (int32_t)pid; + } + return TSDB_CODE_SUCCESS; +} + //============================================================================================== /* Copyright (c) 2013, Ben Noordhuis * The QUEUE is copied from queue.h under libuv diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 3b827a2f99..93259924d5 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -965,40 +965,6 @@ int32_t udfdFillUdfInfoFromMNode(void *clientRpc, char *udfName, SUdf *udf) { return code; } -int32_t udfdConnectToMnode() { - SConnectReq connReq = {0}; - connReq.connType = CONN_TYPE__UDFD; - tstrncpy(connReq.app, "udfd", sizeof(connReq.app)); - tstrncpy(connReq.user, TSDB_DEFAULT_USER, sizeof(connReq.user)); - char pass[TSDB_PASSWORD_LEN + 1] = {0}; - taosEncryptPass_c((uint8_t *)(TSDB_DEFAULT_PASS), strlen(TSDB_DEFAULT_PASS), pass); - tstrncpy(connReq.passwd, pass, sizeof(connReq.passwd)); - connReq.pid = taosGetPId(); - connReq.startTime = taosGetTimestampMs(); - strcpy(connReq.sVer, version); - - int32_t contLen = tSerializeSConnectReq(NULL, 0, &connReq); - void *pReq = rpcMallocCont(contLen); - tSerializeSConnectReq(pReq, contLen, &connReq); - - SUdfdRpcSendRecvInfo *msgInfo = taosMemoryCalloc(1, sizeof(SUdfdRpcSendRecvInfo)); - msgInfo->rpcType = UDFD_RPC_MNODE_CONNECT; - uv_sem_init(&msgInfo->resultSem, 0); - - SRpcMsg rpcMsg = {0}; - rpcMsg.msgType = TDMT_MND_CONNECT; - rpcMsg.pCont = pReq; - rpcMsg.contLen = contLen; - rpcMsg.info.ahandle = msgInfo; - rpcSendRequest(global.clientRpc, &global.mgmtEp.epSet, &rpcMsg, NULL); - - uv_sem_wait(&msgInfo->resultSem); - int32_t code = msgInfo->code; - uv_sem_destroy(&msgInfo->resultSem); - taosMemoryFree(msgInfo); - return code; -} - static bool udfdRpcRfp(int32_t code, tmsg_t msgType) { if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL || code == TSDB_CODE_RPC_BROKEN_LINK || code == TSDB_CODE_SYN_NOT_LEADER || code == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED || code == TSDB_CODE_SYN_RESTORING || @@ -1378,23 +1344,6 @@ static int32_t udfdRun() { return 0; } -void udfdConnectMnodeThreadFunc(void *args) { - int32_t retryMnodeTimes = 0; - int32_t code = 0; - while (retryMnodeTimes++ <= TSDB_MAX_REPLICA) { - uv_sleep(100 * (1 << retryMnodeTimes)); - code = udfdConnectToMnode(); - if (code == 0) { - break; - } - fnError("udfd can not connect to mnode, code: %s. retry", tstrerror(code)); - } - - if (code != 0) { - fnError("udfd can not connect to mnode"); - } -} - int32_t udfdInitResidentFuncs() { if (strlen(tsUdfdResFuncs) == 0) { return TSDB_CODE_SUCCESS; @@ -1497,9 +1446,6 @@ int main(int argc, char *argv[]) { udfdInitResidentFuncs(); - uv_thread_t mnodeConnectThread; - uv_thread_create(&mnodeConnectThread, udfdConnectMnodeThreadFunc, NULL); - udfdRun(); removeListeningPipe(); diff --git a/source/libs/geometry/src/geosWrapper.c b/source/libs/geometry/src/geosWrapper.c index dd83083ec9..993178e2b0 100644 --- a/source/libs/geometry/src/geosWrapper.c +++ b/source/libs/geometry/src/geosWrapper.c @@ -173,6 +173,7 @@ int32_t initCtxAsText() { if (geosCtx->WKTWriter) { GEOSWKTWriter_setRoundingPrecision_r(geosCtx->handle, geosCtx->WKTWriter, 6); + GEOSWKTWriter_setTrim_r(geosCtx->handle, geosCtx->WKTWriter, 0); } else { return code; } diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index 2c12c84081..bfdcd2b030 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -639,6 +639,10 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP ret = indexJsonSearch(arg->ivtIdx, mtm, output->result); indexMultiTermQueryDestroy(mtm); } else { + if (left->colValType == TSDB_DATA_TYPE_GEOMETRY || right->colValType == TSDB_DATA_TYPE_GEOMETRY) { + return TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR; + } + bool reverse = false, equal = false; FilterFunc filterFunc = sifGetFilterFunc(qtype, &reverse, &equal); diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index f9b4e54318..8d35674949 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -331,6 +331,7 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, int64_t iv; uint64_t uv; char* endptr = NULL; + int32_t code = TSDB_CODE_SUCCESS; if (isNullValue(pSchema->type, pToken)) { if (TSDB_DATA_TYPE_TIMESTAMP == pSchema->type && PRIMARYKEY_TIMESTAMP_COL_ID == pSchema->colId) { @@ -467,8 +468,7 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, break; } - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_GEOMETRY: { + case TSDB_DATA_TYPE_BINARY: { // Too long values will raise the invalid sql error message if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); @@ -478,6 +478,30 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, break; } + case TSDB_DATA_TYPE_GEOMETRY: { + unsigned char* output = NULL; + size_t size = 0; + + code = parseGeometry(pToken, &output, &size); + if (code != TSDB_CODE_SUCCESS) { + code = buildSyntaxErrMsg(pMsgBuf, getThreadLocalGeosCtx()->errMsg, pToken->z); + } else if (size + VARSTR_HEADER_SIZE > pSchema->bytes) { + // Too long values will raise the invalid sql error message + code = generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); + } else { + val->pData = taosMemoryMalloc(size); + if (NULL == val->pData) { + code = TSDB_CODE_OUT_OF_MEMORY; + } else { + memcpy(val->pData, output, size); + val->nData = size; + } + } + + geosFreeBuffer(output); + break; + } + case TSDB_DATA_TYPE_NCHAR: { int32_t output = 0; void* p = taosMemoryCalloc(1, pSchema->bytes - VARSTR_HEADER_SIZE); @@ -508,7 +532,7 @@ static int32_t parseTagToken(const char** end, SToken* pToken, SSchema* pSchema, } } - return TSDB_CODE_SUCCESS; + return code; } // input pStmt->pSql: [(tag1_name, ...)] TAGS (tag1_value, ...) ... @@ -1382,7 +1406,7 @@ static int32_t parseValueTokenImpl(SInsertParseContext* pCxt, const char** pSql, code = buildSyntaxErrMsg(&pCxt->msg, getThreadLocalGeosCtx()->errMsg, pToken->z); } // Too long values will raise the invalid sql error message - else if (size > pSchema->bytes) { + else if (size + VARSTR_HEADER_SIZE > pSchema->bytes) { code = generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name); } else { diff --git a/source/libs/scalar/CMakeLists.txt b/source/libs/scalar/CMakeLists.txt index 30c68cb512..1fe0f9a18d 100644 --- a/source/libs/scalar/CMakeLists.txt +++ b/source/libs/scalar/CMakeLists.txt @@ -8,13 +8,14 @@ target_include_directories( ) target_link_libraries(scalar - PRIVATE os - PRIVATE util + PRIVATE os + PRIVATE util PRIVATE common PRIVATE nodes PRIVATE function PRIVATE qcom PRIVATE parser + PRIVATE geometry ) if(${BUILD_TEST}) diff --git a/source/libs/scalar/inc/filterInt.h b/source/libs/scalar/inc/filterInt.h index 1ca8ac1d8c..5fb7b0e90c 100644 --- a/source/libs/scalar/inc/filterInt.h +++ b/source/libs/scalar/inc/filterInt.h @@ -271,8 +271,9 @@ struct SFilterInfo { SFilterPCtx pctx; }; -#define FILTER_NO_MERGE_DATA_TYPE(t) \ - ((t) == TSDB_DATA_TYPE_BINARY || (t) == TSDB_DATA_TYPE_NCHAR || (t) == TSDB_DATA_TYPE_JSON) +#define FILTER_NO_MERGE_DATA_TYPE(t) \ + ((t) == TSDB_DATA_TYPE_BINARY || (t) == TSDB_DATA_TYPE_NCHAR || (t) == TSDB_DATA_TYPE_JSON || \ + (t) == TSDB_DATA_TYPE_GEOMETRY) #define FILTER_NO_MERGE_OPTR(o) ((o) == OP_TYPE_IS_NULL || (o) == OP_TYPE_IS_NOT_NULL || (o) == FILTER_DUMMY_EMPTY_OPTR) #define MR_EMPTY_RES(ctx) (ctx->rs == NULL) diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index bbefcc6b3a..b3afbb53c1 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -133,7 +133,7 @@ __compar_fn_t gDataCompare[] = { setChkInBytes2, setChkInBytes4, setChkInBytes8, comparestrRegexMatch, comparestrRegexNMatch, setChkNotInBytes1, setChkNotInBytes2, setChkNotInBytes4, setChkNotInBytes8, compareChkNotInString, comparestrPatternNMatch, comparewcsPatternNMatch, - comparewcsRegexMatch, comparewcsRegexNMatch, + comparewcsRegexMatch, comparewcsRegexNMatch, compareLenBinaryVal }; __compar_fn_t gInt8SignCompare[] = {compareInt8Val, compareInt8Int16, compareInt8Int32, @@ -257,8 +257,7 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { case TSDB_DATA_TYPE_DOUBLE: comparFn = 5; break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_GEOMETRY: { + case TSDB_DATA_TYPE_BINARY: { if (optr == OP_TYPE_MATCH) { comparFn = 19; } else if (optr == OP_TYPE_NMATCH) { @@ -297,6 +296,21 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { break; } + case TSDB_DATA_TYPE_GEOMETRY: { + if (optr == OP_TYPE_EQUAL || optr == OP_TYPE_NOT_EQUAL || optr == OP_TYPE_IS_NULL || + optr == OP_TYPE_IS_NOT_NULL) { + comparFn = 30; + } else if (optr == OP_TYPE_IN) { + comparFn = 8; + } else if (optr == OP_TYPE_NOT_IN) { + comparFn = 25; + } else { + terrno = TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR; + return 0; + } + break; + } + case TSDB_DATA_TYPE_UTINYINT: comparFn = 11; break; @@ -1042,12 +1056,12 @@ static FORCE_INLINE int32_t filterAddColFieldFromField(SFilterInfo *info, SFilte int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *fid) { if (node == NULL) { fltDebug("empty node"); - FLT_ERR_RET(TSDB_CODE_APP_ERROR); + goto _return; } if (nodeType(node) != QUERY_NODE_COLUMN && nodeType(node) != QUERY_NODE_VALUE && nodeType(node) != QUERY_NODE_NODE_LIST) { - FLT_ERR_RET(TSDB_CODE_APP_ERROR); + goto _return; } int32_t type; @@ -1063,6 +1077,7 @@ int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *f filterAddField(info, v, NULL, type, fid, 0, true, NULL); +_return: return TSDB_CODE_SUCCESS; } @@ -1948,33 +1963,15 @@ int32_t fltInitValFieldData(SFilterInfo *info) { } SDataType *dType = &var->node.resType; - size_t bytes = 0; - - if (type == TSDB_DATA_TYPE_BINARY) { - size_t len = (dType->type == TSDB_DATA_TYPE_BINARY || dType->type == TSDB_DATA_TYPE_NCHAR) ? dType->bytes - : MAX_NUM_STR_SIZE; - bytes = len + 1 + VARSTR_HEADER_SIZE; - - fi->data = taosMemoryCalloc(1, bytes); - } else if (type == TSDB_DATA_TYPE_NCHAR) { - size_t len = (dType->type == TSDB_DATA_TYPE_BINARY || dType->type == TSDB_DATA_TYPE_NCHAR) ? dType->bytes - : MAX_NUM_STR_SIZE; - bytes = (len + 1) * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE; - - fi->data = taosMemoryCalloc(1, bytes); - } else { - fi->data = taosMemoryCalloc(1, sizeof(int64_t)); - } - if (dType->type == type) { + size_t bufBytes = TMAX(dType->bytes, sizeof(int64_t)); + fi->data = taosMemoryCalloc(1, bufBytes); assignVal(fi->data, nodesGetValueFromNode(var), dType->bytes, type); } else { SScalarParam out = {.columnData = taosMemoryCalloc(1, sizeof(SColumnInfoData))}; out.columnData->info.type = type; out.columnData->info.precision = precision; - if (IS_VAR_DATA_TYPE(type)) { - out.columnData->info.bytes = bytes; - } else { + if (!IS_VAR_DATA_TYPE(type)) { out.columnData->info.bytes = tDataTypes[type].bytes; } @@ -1985,7 +1982,13 @@ int32_t fltInitValFieldData(SFilterInfo *info) { return TSDB_CODE_TSC_INVALID_OPERATION; } - memcpy(fi->data, out.columnData->pData, out.columnData->info.bytes); + size_t bufBytes = IS_VAR_DATA_TYPE(type) ? varDataTLen(out.columnData->pData) + : TMAX(out.columnData->info.bytes, sizeof(int64_t)); + fi->data = taosMemoryCalloc(1, bufBytes); + + size_t valBytes = IS_VAR_DATA_TYPE(type) ? varDataTLen(out.columnData->pData) : out.columnData->info.bytes; + memcpy(fi->data, out.columnData->pData, valBytes); + colDataDestroy(out.columnData); taosMemoryFree(out.columnData); } @@ -2751,6 +2754,7 @@ int32_t filterPostProcessRange(SFilterInfo *info) { } int32_t filterGenerateComInfo(SFilterInfo *info) { + terrno = 0; info->cunits = taosMemoryMalloc(info->unitNum * sizeof(*info->cunits)); info->blkUnitRes = taosMemoryMalloc(sizeof(*info->blkUnitRes) * info->unitNum); info->blkUnits = taosMemoryMalloc(sizeof(*info->blkUnits) * (info->unitNum + 1) * info->groupNum); @@ -2758,7 +2762,7 @@ int32_t filterGenerateComInfo(SFilterInfo *info) { for (uint32_t i = 0; i < info->unitNum; ++i) { SFilterUnit *unit = &info->units[i]; - info->cunits[i].func = filterGetCompFuncIdx(FILTER_UNIT_DATA_TYPE(unit), unit->compare.optr); + info->cunits[i].func = filterGetCompFuncIdx(FILTER_UNIT_DATA_TYPE(unit), unit->compare.optr); // set terrno if err info->cunits[i].rfunc = filterGetRangeCompFuncFromOptrs(unit->compare.optr, unit->compare.optr2); info->cunits[i].optr = FILTER_UNIT_OPTR(unit); info->cunits[i].colData = NULL; @@ -2779,7 +2783,7 @@ int32_t filterGenerateComInfo(SFilterInfo *info) { info->cunits[i].dataType = FILTER_UNIT_DATA_TYPE(unit); } - return TSDB_CODE_SUCCESS; + return terrno; } int32_t filterUpdateComUnits(SFilterInfo *info) { @@ -3336,6 +3340,7 @@ int32_t filterSetExecFunc(SFilterInfo *info) { } int32_t filterPreprocess(SFilterInfo *info) { + int32_t code = TSDB_CODE_SUCCESS; SFilterGroupCtx **gRes = taosMemoryCalloc(info->groupNum, sizeof(SFilterGroupCtx *)); int32_t gResNum = 0; @@ -3361,7 +3366,7 @@ int32_t filterPreprocess(SFilterInfo *info) { filterRewrite(info, gRes, gResNum); - filterGenerateComInfo(info); + FLT_ERR_JRET(filterGenerateComInfo(info)); _return: @@ -3373,7 +3378,7 @@ _return: taosMemoryFreeClear(gRes); - return TSDB_CODE_SUCCESS; + return code; } int32_t fltSetColFieldDataImpl(SFilterInfo *info, void *param, filer_get_col_from_id fp, bool fromColId) { @@ -3741,10 +3746,10 @@ int32_t fltSclBuildRangeFromBlockSma(SFltSclColumnRange *colRange, SColumnDataAg taosArrayPush(points, &startPt); taosArrayPush(points, &endPt); } - SFltSclDatum min; + SFltSclDatum min = {0}; fltSclBuildDatumFromBlockSmaValue(&min, colRange->colNode->node.resType.type, pAgg->min); SFltSclPoint minPt = {.excl = false, .start = true, .val = min}; - SFltSclDatum max; + SFltSclDatum max = {0}; fltSclBuildDatumFromBlockSmaValue(&max, colRange->colNode->node.resType.type, pAgg->max); SFltSclPoint maxPt = {.excl = false, .start = false, .val = max}; taosArrayPush(points, &minPt); @@ -4290,30 +4295,27 @@ EDealRes fltReviseRewriter(SNode **pNode, void *pContext) { return DEAL_RES_ERROR; } + SColumnNode *refNode = (SColumnNode *)node->pLeft; + SExprNode *exprNode = NULL; if (OP_TYPE_IN != node->opType) { - SColumnNode *refNode = (SColumnNode *)node->pLeft; SValueNode *valueNode = (SValueNode *)node->pRight; if (FILTER_GET_FLAG(stat->info->options, FLT_OPTION_TIMESTAMP) && TSDB_DATA_TYPE_UBIGINT == valueNode->node.resType.type && valueNode->datum.u <= INT64_MAX) { valueNode->node.resType.type = TSDB_DATA_TYPE_BIGINT; } - int32_t type = vectorGetConvertType(refNode->node.resType.type, valueNode->node.resType.type); - if (0 != type && type != refNode->node.resType.type) { - stat->scalarMode = true; - return DEAL_RES_CONTINUE; - } + exprNode = &valueNode->node; } else { - SColumnNode *refNode = (SColumnNode *)node->pLeft; SNodeListNode *listNode = (SNodeListNode *)node->pRight; if (LIST_LENGTH(listNode->pNodeList) > 10) { stat->scalarMode = true; return DEAL_RES_CONTINUE; } - int32_t type = vectorGetConvertType(refNode->node.resType.type, listNode->node.resType.type); - if (0 != type && type != refNode->node.resType.type) { - stat->scalarMode = true; - return DEAL_RES_CONTINUE; - } + exprNode = &listNode->node; + } + int32_t type = vectorGetConvertType(refNode->node.resType.type, exprNode->resType.type); + if (0 != type && type != refNode->node.resType.type) { + stat->scalarMode = true; + return DEAL_RES_CONTINUE; } } @@ -4664,7 +4666,7 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData **p, SC code = scalarCalculate(info->sclCtx.node, pList, &output); taosArrayDestroy(pList); - FLT_ERR_RET(code); + FLT_ERR_RET(code); // TODO: current errcode returns as true *p = output.columnData; diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c index b41eba293b..35256d0c96 100644 --- a/source/libs/scalar/src/sclvector.c +++ b/source/libs/scalar/src/sclvector.c @@ -26,6 +26,7 @@ #include "tdataformat.h" #include "ttime.h" #include "ttypes.h" +#include "geosWrapper.h" #define LEFT_COL ((pLeftCol->info.type == TSDB_DATA_TYPE_JSON ? (void *)pLeftCol : pLeftCol->pData)) #define RIGHT_COL ((pRightCol->info.type == TSDB_DATA_TYPE_JSON ? (void *)pRightCol : pRightCol->pData)) @@ -378,6 +379,31 @@ static FORCE_INLINE void ncharToVar(char *buf, SScalarParam *pOut, int32_t rowIn taosMemoryFree(t); } +// todo remove this malloc +static FORCE_INLINE void varToGeometry(char *buf, SScalarParam *pOut, int32_t rowIndex, int32_t *overflow) { + //[ToDo] support to parse WKB as well as WKT + unsigned char *t = NULL; + size_t len = 0; + + if (initCtxGeomFromText()) { + sclError("failed to init geometry ctx"); + return; + } + if (doGeomFromText(buf, &t, &len)) { + sclDebug("failed to convert text to geometry"); + return; + } + + char *output = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE); + memcpy(output + VARSTR_HEADER_SIZE, t, len); + varDataSetLen(output, len); + + colDataSetVal(pOut->columnData, rowIndex, output, false); + + taosMemoryFree(output); + geosFreeBuffer(t); +} + // TODO opt performance, tmp is not needed. int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) { bool vton = false; @@ -401,6 +427,8 @@ int32_t vectorConvertFromVarData(SSclVectorConvCtx *pCtx, int32_t *overflow) { vton = true; } else if (TSDB_DATA_TYPE_TIMESTAMP == pCtx->outType) { func = varToTimestamp; + } else if (TSDB_DATA_TYPE_GEOMETRY == pCtx->outType) { + func = varToGeometry; } else { sclError("invalid convert outType:%d, inType:%d", pCtx->outType, pCtx->inType); return TSDB_CODE_APP_ERROR; @@ -881,7 +909,7 @@ int32_t vectorConvertSingleColImpl(const SScalarParam *pIn, SScalarParam *pOut, } int8_t gConvertTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX] = { - /* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON GEOM VARB DECI BLOB MEDB*/ + /* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB MEDB GEOM*/ /*NULL*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 5, 9, 7, 11, 12, 13, 14, 0, 7, 0, 0, 0, 0, /*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 5, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0, 0, 0, @@ -890,7 +918,7 @@ int8_t gConvertTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX] = { /*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 5, 9, 7, 5, 5, 5, 7, 0, 7, 0, 0, 0, 0, /*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 7, 6, 7, 6, 6, 6, 6, 0, 7, 0, 0, 0, 0, /*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, 7, 0, 0, 0, 0, - /*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0, 0, 0, + /*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0, 0, 20, /*TIME*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 7, 0, 7, 0, 0, 0, 0, /*NCHA*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 0, 0, 0, 0, 0, 0, /*UTIN*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 13, 14, 0, 7, 0, 0, 0, 0, diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index 7840fe2017..aecf3d5d91 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -57,7 +57,7 @@ typedef enum { #define SCHEDULE_DEFAULT_POLICY SCH_LOAD_SEQ #define SCHEDULE_DEFAULT_MAX_NODE_NUM 20 -#define SCH_DEFAULT_TASK_TIMEOUT_USEC 60000000 +#define SCH_DEFAULT_TASK_TIMEOUT_USEC 5000000 #define SCH_MAX_TASK_TIMEOUT_USEC 300000000 #define SCH_DEFAULT_MAX_RETRY_NUM 6 #define SCH_MIN_AYSNC_EXEC_NUM 3 @@ -239,7 +239,7 @@ typedef struct SSchTask { int32_t lastMsgType; // last sent msg type int64_t timeoutUsec; // task timeout useconds before reschedule SQueryNodeAddr succeedAddr; // task executed success node address - int8_t candidateIdx; // current try condidation index + int32_t candidateIdx; // current try condidation index SArray *candidateAddrs; // condidate node addresses, element is SQueryNodeAddr SHashObj *execNodes; // all tried node for current task, element is SSchNodeInfo SSchTaskProfile profile; // task execution profile diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index 78e28bce49..d4ded2dd8b 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -745,7 +745,6 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) { return TSDB_CODE_SUCCESS; } - pTask->candidateIdx = 0; pTask->candidateAddrs = taosArrayInit(SCHEDULE_DEFAULT_MAX_NODE_NUM, sizeof(SQueryNodeAddr)); if (NULL == pTask->candidateAddrs) { SCH_TASK_ELOG("taosArrayInit %d condidate addrs failed", SCHEDULE_DEFAULT_MAX_NODE_NUM); @@ -770,6 +769,8 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) { SCH_ERR_RET(schSetAddrsFromNodeList(pJob, pTask)); + pTask->candidateIdx = taosRand() % taosArrayGetSize(pTask->candidateAddrs); + /* for (int32_t i = 0; i < job->dataSrcEps.numOfEps && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) { strncpy(epSet->fqdn[epSet->numOfEps], job->dataSrcEps.fqdn[i], sizeof(job->dataSrcEps.fqdn[i])); diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index 0799671bce..dd857141c1 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -376,7 +376,7 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, ASSERT(pPos->pRowBuff && pFileState->rowSize > 0); if (streamStateGetBatchSize(batch) >= BATCH_LIMIT) { - code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); + streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); streamStateClearBatch(batch); } @@ -390,7 +390,7 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, taosMemoryFree(buf); if (streamStateGetBatchSize(batch) > 0) { - code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); + streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); } streamStateClearBatch(batch); @@ -407,7 +407,7 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, int32_t len = 0; sprintf(keyBuf, "%s:%" PRId64 "", taskKey, ((SStreamState*)pFileState->pFileStore)->checkPointId); streamFileStateEncode(&pFileState->flushMark, &valBuf, &len); - code = streamStatePutBatch(pFileState->pFileStore, "default", batch, keyBuf, valBuf, len, 0); + streamStatePutBatch(pFileState->pFileStore, "default", batch, keyBuf, valBuf, len, 0); taosMemoryFree(valBuf); } { @@ -511,7 +511,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState) { break; } memcpy(pNewPos->pRowBuff, pVal, pVLen); - code = tSimpleHashPut(pFileState->rowBuffMap, pNewPos->pKey, pFileState->rowSize, &pNewPos, POINTER_BYTES); + code = tSimpleHashPut(pFileState->rowBuffMap, pNewPos->pKey, pFileState->keyLen, &pNewPos, POINTER_BYTES); if (code != TSDB_CODE_SUCCESS) { destroyRowBuffPos(pNewPos); break; diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index a12f8051ba..1e70ce4a1c 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -47,9 +47,7 @@ static FORCE_INLINE int walBuildTmpMetaName(SWal* pWal, char* buf) { } static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { - int32_t sz = taosArrayGetSize(pWal->fileInfoSet); - terrno = TSDB_CODE_SUCCESS; - + int32_t sz = taosArrayGetSize(pWal->fileInfoSet); SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); char fnameStr[WAL_FILE_LEN]; walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); @@ -74,13 +72,12 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { int64_t capacity = 0; int64_t readSize = 0; char* buf = NULL; - bool firstTrial = pFileInfo->fileSize < fileSize; int64_t offset = TMIN(pFileInfo->fileSize, fileSize); - int64_t offsetForward = offset - stepSize + walCkHeadSz - 1; - int64_t offsetBackward = offset; int64_t retVer = -1; int64_t lastEntryBeginOffset = 0; int64_t lastEntryEndOffset = 0; + int64_t recordLen = 0; + bool forwardStage = false; // check recover size if (2 * tsWalFsyncDataSizeLimit + offset < end) { @@ -91,14 +88,8 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { // search for the valid last WAL entry, e.g. block by block while (1) { - offset = (firstTrial) ? TMIN(fileSize, offsetForward + stepSize - walCkHeadSz + 1) - : TMAX(0, offsetBackward - stepSize + walCkHeadSz - 1); + offset = (lastEntryEndOffset > 0) ? offset : TMAX(0, offset - stepSize + walCkHeadSz - 1); end = TMIN(offset + stepSize, fileSize); - if (firstTrial) { - offsetForward = offset; - } else { - offsetBackward = offset; - } readSize = end - offset; capacity = readSize + sizeof(magic); @@ -129,7 +120,16 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { int64_t pos = 0; SWalCkHead* logContent = NULL; - while ((candidate = tmemmem(haystack, readSize - (haystack - buf), (char*)&magic, sizeof(magic))) != NULL) { + while (true) { + forwardStage = (lastEntryEndOffset > 0 || offset == 0); + terrno = TSDB_CODE_SUCCESS; + if (forwardStage) { + candidate = (readSize - (haystack - buf)) > 0 ? haystack : NULL; + } else { + candidate = tmemmem(haystack, readSize - (haystack - buf), (char*)&magic, sizeof(magic)); + } + + if (candidate == NULL) break; pos = candidate - buf; // validate head @@ -137,13 +137,14 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { if (len < walCkHeadSz) { break; } + logContent = (SWalCkHead*)(buf + pos); if (walValidHeadCksum(logContent) != 0) { terrno = TSDB_CODE_WAL_CHKSUM_MISMATCH; wWarn("vgId:%d, failed to validate checksum of wal entry header. offset:%" PRId64 ", file:%s", pWal->cfg.vgId, offset + pos, fnameStr); haystack = buf + pos + 1; - if (firstTrial) { + if (forwardStage) { break; } else { continue; @@ -151,9 +152,9 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { } // validate body - int64_t size = walCkHeadSz + logContent->head.bodyLen; - if (len < size) { - int64_t extraSize = size - len; + recordLen = walCkHeadSz + logContent->head.bodyLen; + if (len < recordLen) { + int64_t extraSize = recordLen - len; if (capacity < readSize + extraSize + sizeof(magic)) { capacity += extraSize; void* ptr = taosMemoryRealloc(buf, capacity); @@ -184,7 +185,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { wWarn("vgId:%d, failed to validate checksum of wal entry body. offset:%" PRId64 ", file:%s", pWal->cfg.vgId, offset + pos, fnameStr); haystack = buf + pos + 1; - if (firstTrial) { + if (forwardStage) { break; } else { continue; @@ -194,21 +195,14 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { // found one retVer = logContent->head.version; lastEntryBeginOffset = offset + pos; - lastEntryEndOffset = offset + pos + sizeof(SWalCkHead) + logContent->head.bodyLen; + lastEntryEndOffset = offset + pos + recordLen; // try next - haystack = buf + pos + 1; + haystack = buf + pos + recordLen; } - if (end == fileSize) firstTrial = false; - if (firstTrial) { - if (terrno == TSDB_CODE_SUCCESS) { - continue; - } else { - firstTrial = false; - } - } - if (retVer >= 0 || offset == 0) break; + offset = (lastEntryEndOffset > 0) ? lastEntryEndOffset : offset; + if (forwardStage && (terrno != TSDB_CODE_SUCCESS || end == fileSize)) break; } if (retVer < 0) { diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c index e32ff3da95..843f9c56dc 100644 --- a/source/util/src/tcompare.c +++ b/source/util/src/tcompare.c @@ -225,6 +225,23 @@ int32_t compareLenPrefixedWStrDesc(const void *pLeft, const void *pRight) { return compareLenPrefixedWStr(pRight, pLeft); } +int32_t compareLenBinaryVal(const void *pLeft, const void *pRight) { + int32_t len1 = varDataLen(pLeft); + int32_t len2 = varDataLen(pRight); + + int32_t minLen = TMIN(len1, len2); + int32_t ret = memcmp(varDataVal(pLeft), varDataVal(pRight), minLen); + if (ret == 0) { + if (len1 == len2) { + return 0; + } else { + return len1 > len2 ? 1 : -1; + } + } else { + return ret > 0 ? 1 : -1; + } +} + // string > number > bool > null // ref: https://dev.mysql.com/doc/refman/8.0/en/json.html#json-comparison int32_t compareJsonVal(const void *pLeft, const void *pRight) { diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 0a53ece746..d2b9edf753 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -405,6 +405,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR, "Json not support in t TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JSON_IN_GROUP_ERROR, "Json not support in group/partition by") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JOB_NOT_EXIST, "Job not exist") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_QWORKER_QUIT, "Vnode/Qnode is quitting") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR, "Geometry not support in this operator") // grant TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, "License expired") @@ -629,7 +630,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_INVALID_FILE, "Index file is inval TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message") TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_MISMATCH, "Consumer mismatch") TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_CLOSED, "Consumer closed") -TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_ERROR, "Consumer error, to see log") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_ERROR, "Consumer error, to see log") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE, "Topic num out of range") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE, "Group num out of range 100") // stream TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_TASK_NOT_EXIST, "Stream task not exist") diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 90a7f3fe42..21fed2e1f5 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -33,6 +33,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb3.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeDb0.py -N 3 -n 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/ins_topics_test.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqParamsTest.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqClientConsLog.py @@ -128,6 +129,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-19201.py ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-21561.py ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-3404.py +,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-3581.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/balance_vgroups_r1.py -N 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosShell.py @@ -779,7 +781,7 @@ ,,y,script,./test.sh -f tsim/user/basic.sim ,,y,script,./test.sh -f tsim/user/password.sim ,,y,script,./test.sh -f tsim/user/privilege_db.sim -,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim +#,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim ,,y,script,./test.sh -f tsim/user/privilege_topic.sim ,,y,script,./test.sh -f tsim/user/privilege_table.sim ,,y,script,./test.sh -f tsim/db/alter_option.sim diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim index e539f11531..fbf9d50c25 100644 --- a/tests/script/tsim/query/udf.sim +++ b/tests/script/tsim/query/udf.sim @@ -8,6 +8,9 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c udf -v 1 system sh/exec.sh -n dnode1 -s start sql connect +sql alter user root pass 'taosdata2' +system sh/exec.sh -n dnode1 -s stop +system sh/exec.sh -n dnode1 -s start print ======== step1 udf system sh/compile_udf.sh diff --git a/tests/system-test/7-tmq/checkOffsetRowParams.py b/tests/system-test/7-tmq/checkOffsetRowParams.py index 8a24148064..f7e4c61c9c 100644 --- a/tests/system-test/7-tmq/checkOffsetRowParams.py +++ b/tests/system-test/7-tmq/checkOffsetRowParams.py @@ -245,7 +245,7 @@ class TDTestCase: tdSql.query("show consumers") tdSql.checkRows(1) - tdSql.checkData(0, 8, "tbname:1,commit:1,interval:2000,reset:earliest") + tdSql.checkData(0, 8, "tbname:1,commit:1,interval:2000ms,reset:earliest") time.sleep(2) tdLog.info("start insert data") diff --git a/tests/system-test/7-tmq/tmqParamsTest.py b/tests/system-test/7-tmq/tmqParamsTest.py new file mode 100644 index 0000000000..f48eaa84d4 --- /dev/null +++ b/tests/system-test/7-tmq/tmqParamsTest.py @@ -0,0 +1,178 @@ + +import sys +import time +import threading +from taos.tmq import Consumer +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +sys.path.append("./7-tmq") +from tmqCommon import * + +class TDTestCase: + updatecfgDict = {'debugFlag': 135} + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.wal_retention_period1 = 3600 + self.wal_retention_period2 = 1 + self.commit_value_list = ["true", "false"] + self.offset_value_list = ["", "earliest", "latest", "none"] + self.tbname_value_list = ["true", "false"] + self.snapshot_value_list = ["true", "false"] + + # self.commit_value_list = ["true"] + # self.offset_value_list = ["none"] + # self.tbname_value_list = ["true"] + # self.snapshot_value_list = ["true"] + + def tmqParamsTest(self): + paraDict = {'dbName': 'db1', + 'dropFlag': 1, + 'vgroups': 4, + 'stbName': 'stb', + 'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}], + 'ctbPrefix': 'ctb', + 'ctbNum': 1, + 'rowsPerTbl': 10000, + 'batchNum': 10, + 'startTs': 1640966400000, # 2022-01-01 00:00:00.000 + 'auto_commit_interval': "100"} + + + start_group_id = 1 + for snapshot_value in self.snapshot_value_list: + for commit_value in self.commit_value_list: + for offset_value in self.offset_value_list: + for tbname_value in self.tbname_value_list: + topic_name = 'topic1' + tmqCom.initConsumerTable() + tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) + tdLog.info("create stb") + tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema']) + tdLog.info("create ctb") + tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) + tdLog.info("insert data") + tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) + + + tdLog.info("create topics from stb with filter") + queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) + sqlString = "create topic %s as %s" %(topic_name, queryString) + tdSql.query(f'select * from information_schema.ins_databases') + db_wal_retention_period_list = list(map(lambda x:x[-8] if x[0] == paraDict['dbName'] else None, tdSql.queryResult)) + for i in range(len(db_wal_retention_period_list)): + if db_wal_retention_period_list[0] is None or db_wal_retention_period_list[-1] is None: + db_wal_retention_period_list.remove(None) + if snapshot_value =="true": + if db_wal_retention_period_list[0] != self.wal_retention_period2: + tdSql.execute(f"alter database {paraDict['dbName']} wal_retention_period {self.wal_retention_period2}") + time.sleep(self.wal_retention_period2+1) + tdSql.execute(f'flush database {paraDict["dbName"]}') + else: + if db_wal_retention_period_list[0] != self.wal_retention_period1: + tdSql.execute(f"alter database {paraDict['dbName']} wal_retention_period {self.wal_retention_period1}") + tdLog.info("create topic sql: %s"%sqlString) + tdSql.execute(sqlString) + tdSql.query(queryString) + expected_res = tdSql.queryRows + group_id = "csm_" + str(start_group_id) + consumer_dict = { + "group.id": group_id, + "td.connect.user": "root", + "td.connect.pass": "taosdata", + "auto.commit.interval.ms": paraDict["auto_commit_interval"], + "enable.auto.commit": commit_value, + "auto.offset.reset": offset_value, + "experimental.snapshot.enable": snapshot_value, + "msg.with.table.name": tbname_value + } + consumer_commit = 1 if consumer_dict["enable.auto.commit"] == "true" else 0 + consumer_tbname = 1 if consumer_dict["msg.with.table.name"] == "true" else 0 + consumer_ret = "earliest" if offset_value == "" else offset_value + expected_parameters=f'tbname:{consumer_tbname},commit:{consumer_commit},interval:{paraDict["auto_commit_interval"]}ms,reset:{consumer_ret}' + if len(offset_value) == 0: + del consumer_dict["auto.offset.reset"] + consumer = Consumer(consumer_dict) + consumer.subscribe([topic_name]) + tdLog.info(f"enable.auto.commit: {commit_value}, auto.offset.reset: {offset_value}, experimental.snapshot.enable: {snapshot_value}, msg.with.table.name: {tbname_value}") + stop_flag = 0 + try: + while True: + res = consumer.poll(1) + tdSql.query('show consumers;') + consumer_info = tdSql.queryResult[0][-1] + if offset_value == "latest": + if not res and stop_flag == 1: + break + else: + if not res: + break + # err = res.error() + # if err is not None: + # raise err + # val = res.value() + # for block in val: + # print(block.fetchall()) + if offset_value == "latest" and stop_flag == 0: + tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],int(round(time.time()*1000))) + stop_flag = 1 + finally: + consumer.unsubscribe() + consumer.close() + tdSql.checkEqual(consumer_info, expected_parameters) + start_group_id += 1 + tdSql.query('show subscriptions;') + subscription_info = tdSql.queryResult + if snapshot_value == "true": + if offset_value != "earliest" and offset_value != "": + if offset_value == "latest": + offset_value_list = list(map(lambda x: int(x[-2].replace("wal:", "").replace("earliest", "0")), subscription_info)) + tdSql.checkEqual(sum(offset_value_list) > 0, True) + rows_value_list = list(map(lambda x: int(x[-1]), subscription_info)) + tdSql.checkEqual(sum(rows_value_list), expected_res) + elif offset_value == "none": + offset_value_list = list(map(lambda x: x[-2], subscription_info)) + tdSql.checkEqual(offset_value_list, ['none']*len(subscription_info)) + rows_value_list = list(map(lambda x: x[-1], subscription_info)) + tdSql.checkEqual(rows_value_list, [0]*len(subscription_info)) + else: + if offset_value != "none": + offset_value_str = ",".join(list(map(lambda x: x[-2], subscription_info))) + tdSql.checkEqual("tsdb" in offset_value_str, True) + rows_value_list = list(map(lambda x: int(x[-1]), subscription_info)) + tdSql.checkEqual(sum(rows_value_list), expected_res) + else: + offset_value_list = list(map(lambda x: x[-2], subscription_info)) + tdSql.checkEqual(offset_value_list, [None]*len(subscription_info)) + rows_value_list = list(map(lambda x: x[-1], subscription_info)) + tdSql.checkEqual(rows_value_list, [None]*len(subscription_info)) + else: + if offset_value != "none": + offset_value_list = list(map(lambda x: int(x[-2].replace("wal:", "").replace("earliest", "0")), subscription_info)) + tdSql.checkEqual(sum(offset_value_list) > 0, True) + rows_value_list = list(map(lambda x: int(x[-1]), subscription_info)) + tdSql.checkEqual(sum(rows_value_list), expected_res) + else: + offset_value_list = list(map(lambda x: x[-2], subscription_info)) + tdSql.checkEqual(offset_value_list, ['none']*len(subscription_info)) + rows_value_list = list(map(lambda x: x[-1], subscription_info)) + tdSql.checkEqual(rows_value_list, [0]*len(subscription_info)) + tdSql.execute(f"drop topic if exists {topic_name}") + tdSql.execute(f'drop database if exists {paraDict["dbName"]}') + + def run(self): + self.tmqParamsTest() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/99-TDcase/TS-3581.py b/tests/system-test/99-TDcase/TS-3581.py new file mode 100644 index 0000000000..18488af0a6 --- /dev/null +++ b/tests/system-test/99-TDcase/TS-3581.py @@ -0,0 +1,79 @@ +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + #tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files or "taosd.exe" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def create_tables(self): + tdSql.execute(f'''CREATE STABLE `dwd_log_master` (`ts` TIMESTAMP, `dim_ip` NCHAR(64)) TAGS (`group_id` BIGINT, `st_hour` NCHAR(2), `org_id` NCHAR(32), + `dev_manufacturer_name` NCHAR(64), `dev_manufacturer_id` INT, `dev_category_name` NCHAR(64), `dev_category_id` INT, `dev_feature_name` NCHAR(64), + `dev_feature_id` INT, `dev_ip` NCHAR(64), `black_list` TINYINT, `white_list` TINYINT)''') + tdSql.execute(f'''CREATE TABLE `dwd_log_master_475021043` USING `dwd_log_master` (`group_id`, `st_hour`, `org_id`, `dev_manufacturer_name`, `dev_manufacturer_id`, + `dev_category_name`, `dev_category_id`, `dev_feature_name`, `dev_feature_id`, `dev_ip`, `black_list`, `white_list`) TAGS + (475021043, "14", NULL, NULL, NULL, NULL, NULL, NULL, NULL, "172.18.22.230", NULL, NULL)''') + + def insert_data(self): + tdLog.debug("start to insert data ............") + + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:30.000','192.168.192.102')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:31.000','172.18.23.249')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:32.000','192.168.200.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:33.000','172.18.22.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:34.000','192.168.210.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:35.000','192.168.192.100')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:36.000','192.168.192.231')") + tdSql.execute(f"INSERT INTO `dwd_log_master_475021043` VALUES ('2023-06-26 14:38:37.000','172.18.23.231')") + + tdLog.debug("insert data ............ [OK]") + + def run(self): + tdSql.prepare() + self.create_tables() + self.insert_data() + tdLog.printNoPrefix("======== test TS-3581") + + for i in range(100): + tdSql.query(f"select first(ts), last(ts), count(*) from dwd_log_master;") + tdSql.checkRows(1) + print(tdSql.queryResult) + tdSql.checkData(0, 0, '2023-06-26 14:38:30.000') + return + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c index d8920cb4c3..af7f13c69c 100644 --- a/tools/shell/src/shellWebsocket.c +++ b/tools/shell/src/shellWebsocket.c @@ -17,6 +17,9 @@ #include #include +// save current database name +char curDBName[128] = ""; // TDB_MAX_DBNAME_LEN is 24, put large + int shell_conn_ws_server(bool first) { char cuttedDsn[SHELL_WS_DSN_BUFF] = {0}; int dsnLen = strlen(shell.args.dsn); @@ -59,6 +62,14 @@ int shell_conn_ws_server(bool first) { fprintf(stdout, "successfully connected to cloud service\n"); } fflush(stdout); + + // switch to current database if have + if(curDBName[0] !=0) { + char command[256]; + sprintf(command, "use %s;", curDBName); + shellRunSingleCommandWebsocketImp(command); + } + return 0; } @@ -290,7 +301,46 @@ void shellRunSingleCommandWebsocketImp(char *command) { if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { - fprintf(stdout, "Database changed.\r\n\r\n"); + + // copy dbname to curDBName + char *p = command; + bool firstStart = false; + bool firstEnd = false; + int i = 0; + while (*p != 0) { + if (*p != ' ') { + // not blank + if (!firstStart) { + firstStart = true; + } else if (firstEnd) { + if(*p == ';' && *p != '\\') { + break; + } + // database name + curDBName[i++] = *p; + if(i + 4 > sizeof(curDBName)) { + // DBName is too long, reset zero and break + i = 0; + break; + } + } + } else { + // blank + if(firstStart == true && firstEnd == false){ + firstEnd = true; + } + if(firstStart && firstEnd && i > 0){ + // blank after database name + break; + } + } + // move next + p++; + } + // append end + curDBName[i] = 0; + + fprintf(stdout, "Database changed to %s.\r\n\r\n", curDBName); fflush(stdout); ws_free_result(res); return;